2 * Copyright (c) 2012-2019 Etnaviv Project
3 * Copyright (c) 2019 Zodiac Inflight Innovations
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jonathan Marek <jonathan@marek.ca>
26 * Wladimir J. van der Laan <laanwj@gmail.com>
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_asm.h"
31 #include "etnaviv_context.h"
32 #include "etnaviv_debug.h"
33 #include "etnaviv_disasm.h"
34 #include "etnaviv_uniforms.h"
35 #include "etnaviv_util.h"
38 #include "util/u_memory.h"
39 #include "util/register_allocate.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "compiler/nir/nir_worklist.h"
43 #include "tgsi/tgsi_strings.h"
44 #include "util/u_half.h"
48 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
49 const struct etna_specs
*specs
;
50 struct etna_shader_variant
*variant
;
52 /* block # to instr index */
56 int inst_ptr
; /* current instruction pointer */
57 struct etna_inst code
[ETNA_MAX_INSTRUCTIONS
* ETNA_INST_SIZE
];
60 uint64_t consts
[ETNA_MAX_IMM
];
62 /* There was an error during compilation */
66 #define compile_error(ctx, args...) ({ \
72 /* io related lowering
73 * run after lower_int_to_float because it adds i2f/f2i ops
76 etna_lower_io(nir_shader
*shader
, struct etna_shader_variant
*v
)
78 nir_foreach_function(function
, shader
) {
80 nir_builder_init(&b
, function
->impl
);
82 nir_foreach_block(block
, function
->impl
) {
83 nir_foreach_instr_safe(instr
, block
) {
84 if (instr
->type
== nir_instr_type_intrinsic
) {
85 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
87 switch (intr
->intrinsic
) {
88 case nir_intrinsic_load_front_face
: {
89 /* HW front_face is 0.0/1.0, not 0/~0u for bool
90 * lower with a comparison with 0
92 intr
->dest
.ssa
.bit_size
= 32;
94 b
.cursor
= nir_after_instr(instr
);
96 nir_ssa_def
*ssa
= nir_ine(&b
, &intr
->dest
.ssa
, nir_imm_int(&b
, 0));
98 nir_instr_as_alu(ssa
->parent_instr
)->op
= nir_op_ieq
;
100 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
,
101 nir_src_for_ssa(ssa
),
104 case nir_intrinsic_store_deref
: {
105 if (shader
->info
.stage
!= MESA_SHADER_FRAGMENT
|| !v
->key
.frag_rb_swap
)
108 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
109 assert(deref
->deref_type
== nir_deref_type_var
);
111 if (deref
->var
->data
.location
!= FRAG_RESULT_COLOR
&&
112 deref
->var
->data
.location
!= FRAG_RESULT_DATA0
)
115 b
.cursor
= nir_before_instr(instr
);
117 nir_ssa_def
*ssa
= nir_mov(&b
, intr
->src
[1].ssa
);
118 nir_alu_instr
*alu
= nir_instr_as_alu(ssa
->parent_instr
);
119 alu
->src
[0].swizzle
[0] = 2;
120 alu
->src
[0].swizzle
[2] = 0;
121 nir_instr_rewrite_src(instr
, &intr
->src
[1], nir_src_for_ssa(ssa
));
123 case nir_intrinsic_load_uniform
: {
124 /* multiply by 16 and convert to int */
125 b
.cursor
= nir_before_instr(instr
);
126 nir_ssa_def
*ssa
= nir_imul(&b
, intr
->src
[0].ssa
, nir_imm_int(&b
, 16));
127 nir_instr_rewrite_src(instr
, &intr
->src
[0], nir_src_for_ssa(ssa
));
134 if (instr
->type
!= nir_instr_type_tex
)
137 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
138 nir_src
*coord
= NULL
;
139 nir_src
*lod_bias
= NULL
;
140 unsigned lod_bias_idx
;
142 assert(tex
->sampler_index
== tex
->texture_index
);
144 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
145 switch (tex
->src
[i
].src_type
) {
146 case nir_tex_src_coord
:
147 coord
= &tex
->src
[i
].src
;
149 case nir_tex_src_bias
:
150 case nir_tex_src_lod
:
152 lod_bias
= &tex
->src
[i
].src
;
155 case nir_tex_src_comparator
:
163 if (tex
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
164 /* use a dummy load_uniform here to represent texcoord scale */
165 b
.cursor
= nir_before_instr(instr
);
166 nir_intrinsic_instr
*load
=
167 nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_uniform
);
168 nir_intrinsic_set_base(load
, ~tex
->sampler_index
);
169 load
->num_components
= 2;
170 load
->src
[0] = nir_src_for_ssa(nir_imm_float(&b
, 0.0f
));
171 nir_ssa_dest_init(&load
->instr
, &load
->dest
, 2, 32, NULL
);
172 nir_intrinsic_set_type(load
, nir_type_float
);
174 nir_builder_instr_insert(&b
, &load
->instr
);
176 nir_ssa_def
*new_coord
= nir_fmul(&b
, coord
->ssa
, &load
->dest
.ssa
);
177 nir_instr_rewrite_src(&tex
->instr
, coord
, nir_src_for_ssa(new_coord
));
180 /* pre HALTI5 needs texture sources in a single source */
182 if (!lod_bias
|| v
->shader
->specs
->halti
>= 5)
185 assert(coord
&& lod_bias
&& tex
->coord_components
< 4);
187 nir_alu_instr
*vec
= nir_alu_instr_create(shader
, nir_op_vec4
);
188 for (unsigned i
= 0; i
< tex
->coord_components
; i
++) {
189 vec
->src
[i
].src
= nir_src_for_ssa(coord
->ssa
);
190 vec
->src
[i
].swizzle
[0] = i
;
192 for (unsigned i
= tex
->coord_components
; i
< 4; i
++)
193 vec
->src
[i
].src
= nir_src_for_ssa(lod_bias
->ssa
);
195 vec
->dest
.write_mask
= 0xf;
196 nir_ssa_dest_init(&vec
->instr
, &vec
->dest
.dest
, 4, 32, NULL
);
198 nir_tex_instr_remove_src(tex
, lod_bias_idx
);
199 nir_instr_rewrite_src(&tex
->instr
, coord
, nir_src_for_ssa(&vec
->dest
.dest
.ssa
));
200 tex
->coord_components
= 4;
202 nir_instr_insert_before(&tex
->instr
, &vec
->instr
);
209 etna_alu_to_scalar_filter_cb(const nir_instr
*instr
, const void *data
)
211 const struct etna_specs
*specs
= data
;
213 if (instr
->type
!= nir_instr_type_alu
)
216 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
228 /* TODO: can do better than alu_to_scalar for vector compares */
229 case nir_op_b32all_fequal2
:
230 case nir_op_b32all_fequal3
:
231 case nir_op_b32all_fequal4
:
232 case nir_op_b32any_fnequal2
:
233 case nir_op_b32any_fnequal3
:
234 case nir_op_b32any_fnequal4
:
235 case nir_op_b32all_iequal2
:
236 case nir_op_b32all_iequal3
:
237 case nir_op_b32all_iequal4
:
238 case nir_op_b32any_inequal2
:
239 case nir_op_b32any_inequal3
:
240 case nir_op_b32any_inequal4
:
243 if (!specs
->has_halti2_instructions
)
254 etna_lower_alu_impl(nir_function_impl
*impl
, struct etna_compile
*c
)
256 nir_shader
*shader
= impl
->function
->shader
;
259 nir_builder_init(&b
, impl
);
261 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
262 nir_foreach_block(block
, impl
) {
263 nir_foreach_instr_safe(instr
, block
) {
264 if (instr
->type
!= nir_instr_type_alu
)
267 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
268 /* multiply sin/cos src by constant
269 * TODO: do this earlier (but it breaks const_prop opt)
271 if (alu
->op
== nir_op_fsin
|| alu
->op
== nir_op_fcos
) {
272 b
.cursor
= nir_before_instr(instr
);
274 nir_ssa_def
*imm
= c
->specs
->has_new_transcendentals
?
275 nir_imm_float(&b
, 1.0 / M_PI
) :
276 nir_imm_float(&b
, 2.0 / M_PI
);
278 nir_instr_rewrite_src(instr
, &alu
->src
[0].src
,
279 nir_src_for_ssa(nir_fmul(&b
, alu
->src
[0].src
.ssa
, imm
)));
282 /* change transcendental ops to vec2 and insert vec1 mul for the result
283 * TODO: do this earlier (but it breaks with optimizations)
285 if (c
->specs
->has_new_transcendentals
&& (
286 alu
->op
== nir_op_fdiv
|| alu
->op
== nir_op_flog2
||
287 alu
->op
== nir_op_fsin
|| alu
->op
== nir_op_fcos
)) {
288 nir_ssa_def
*ssa
= &alu
->dest
.dest
.ssa
;
290 assert(ssa
->num_components
== 1);
292 nir_alu_instr
*mul
= nir_alu_instr_create(shader
, nir_op_fmul
);
293 mul
->src
[0].src
= mul
->src
[1].src
= nir_src_for_ssa(ssa
);
294 mul
->src
[1].swizzle
[0] = 1;
296 mul
->dest
.write_mask
= 1;
297 nir_ssa_dest_init(&mul
->instr
, &mul
->dest
.dest
, 1, 32, NULL
);
299 ssa
->num_components
= 2;
301 mul
->dest
.saturate
= alu
->dest
.saturate
;
302 alu
->dest
.saturate
= 0;
304 nir_instr_insert_after(instr
, &mul
->instr
);
306 nir_ssa_def_rewrite_uses_after(ssa
, nir_src_for_ssa(&mul
->dest
.dest
.ssa
), &mul
->instr
);
312 static void etna_lower_alu(nir_shader
*shader
, struct etna_compile
*c
)
314 nir_foreach_function(function
, shader
) {
316 etna_lower_alu_impl(function
->impl
, c
);
321 emit_inst(struct etna_compile
*c
, struct etna_inst
*inst
)
323 c
->code
[c
->inst_ptr
++] = *inst
;
326 /* to map nir srcs should to etna_inst srcs */
328 SRC_0_1_2
= (0 << 0) | (1 << 2) | (2 << 4),
329 SRC_0_1_X
= (0 << 0) | (1 << 2) | (3 << 4),
330 SRC_0_X_X
= (0 << 0) | (3 << 2) | (3 << 4),
331 SRC_0_X_1
= (0 << 0) | (3 << 2) | (1 << 4),
332 SRC_0_1_0
= (0 << 0) | (1 << 2) | (0 << 4),
333 SRC_X_X_0
= (3 << 0) | (3 << 2) | (0 << 4),
334 SRC_0_X_0
= (0 << 0) | (3 << 2) | (0 << 4),
337 /* info to translate a nir op to etna_inst */
338 struct etna_op_info
{
339 uint8_t opcode
; /* INST_OPCODE_ */
340 uint8_t src
; /* SRC_ enum */
341 uint8_t cond
; /* INST_CONDITION_ */
342 uint8_t type
; /* INST_TYPE_ */
345 static const struct etna_op_info etna_ops
[] = {
346 [0 ... nir_num_opcodes
- 1] = {0xff},
349 #define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
352 INST_CONDITION_##cond, \
355 #define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
356 #define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
357 #define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
358 #define OP(nir, op, src) OPC(nir, op, src, TRUE)
359 #define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
360 #define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
361 OP(mov
, MOV
, X_X_0
), OP(fneg
, MOV
, X_X_0
), OP(fabs
, MOV
, X_X_0
), OP(fsat
, MOV
, X_X_0
),
362 OP(fmul
, MUL
, 0_1_X
), OP(fadd
, ADD
, 0_X_1
), OP(ffma
, MAD
, 0_1_2
),
363 OP(fdot2
, DP2
, 0_1_X
), OP(fdot3
, DP3
, 0_1_X
), OP(fdot4
, DP4
, 0_1_X
),
364 OPC(fmin
, SELECT
, 0_1_0
, GT
), OPC(fmax
, SELECT
, 0_1_0
, LT
),
365 OP(ffract
, FRC
, X_X_0
), OP(frcp
, RCP
, X_X_0
), OP(frsq
, RSQ
, X_X_0
),
366 OP(fsqrt
, SQRT
, X_X_0
), OP(fsin
, SIN
, X_X_0
), OP(fcos
, COS
, X_X_0
),
367 OP(fsign
, SIGN
, X_X_0
), OP(ffloor
, FLOOR
, X_X_0
), OP(fceil
, CEIL
, X_X_0
),
368 OP(flog2
, LOG
, X_X_0
), OP(fexp2
, EXP
, X_X_0
),
369 OPC(seq
, SET
, 0_1_X
, EQ
), OPC(sne
, SET
, 0_1_X
, NE
), OPC(sge
, SET
, 0_1_X
, GE
), OPC(slt
, SET
, 0_1_X
, LT
),
370 OPC(fcsel
, SELECT
, 0_1_2
, NZ
),
371 OP(fdiv
, DIV
, 0_1_X
),
372 OP(fddx
, DSX
, 0_X_0
), OP(fddy
, DSY
, 0_X_0
),
375 IOP(i2f32
, I2F
, 0_X_X
),
376 UOP(u2f32
, I2F
, 0_X_X
),
377 IOP(f2i32
, F2I
, 0_X_X
),
378 UOP(f2u32
, F2I
, 0_X_X
),
379 UOP(b2f32
, AND
, 0_X_X
), /* AND with fui(1.0f) */
380 UOP(b2i32
, AND
, 0_X_X
), /* AND with 1 */
381 OPC(f2b32
, CMP
, 0_X_X
, NE
), /* != 0.0 */
382 UOPC(i2b32
, CMP
, 0_X_X
, NE
), /* != 0 */
385 IOP(iadd
, ADD
, 0_X_1
),
386 IOP(imul
, IMULLO0
, 0_1_X
),
387 /* IOP(imad, IMADLO0, 0_1_2), */
388 IOP(ineg
, ADD
, X_X_0
), /* ADD 0, -x */
389 IOP(iabs
, IABS
, X_X_0
),
390 IOP(isign
, SIGN
, X_X_0
),
391 IOPC(imin
, SELECT
, 0_1_0
, GT
),
392 IOPC(imax
, SELECT
, 0_1_0
, LT
),
393 UOPC(umin
, SELECT
, 0_1_0
, GT
),
394 UOPC(umax
, SELECT
, 0_1_0
, LT
),
397 UOPC(b32csel
, SELECT
, 0_1_2
, NZ
),
399 /* compare with int result */
400 OPC(feq32
, CMP
, 0_1_X
, EQ
),
401 OPC(fne32
, CMP
, 0_1_X
, NE
),
402 OPC(fge32
, CMP
, 0_1_X
, GE
),
403 OPC(flt32
, CMP
, 0_1_X
, LT
),
404 IOPC(ieq32
, CMP
, 0_1_X
, EQ
),
405 IOPC(ine32
, CMP
, 0_1_X
, NE
),
406 IOPC(ige32
, CMP
, 0_1_X
, GE
),
407 IOPC(ilt32
, CMP
, 0_1_X
, LT
),
408 UOPC(uge32
, CMP
, 0_1_X
, GE
),
409 UOPC(ult32
, CMP
, 0_1_X
, LT
),
413 IOP(iand
, AND
, 0_X_1
),
414 IOP(ixor
, XOR
, 0_X_1
),
415 IOP(inot
, NOT
, X_X_0
),
416 IOP(ishl
, LSHIFT
, 0_X_1
),
417 IOP(ishr
, RSHIFT
, 0_X_1
),
418 UOP(ushr
, RSHIFT
, 0_X_1
),
422 etna_emit_block_start(struct etna_compile
*c
, unsigned block
)
424 c
->block_ptr
[block
] = c
->inst_ptr
;
428 etna_emit_alu(struct etna_compile
*c
, nir_op op
, struct etna_inst_dst dst
,
429 struct etna_inst_src src
[3], bool saturate
)
431 struct etna_op_info ei
= etna_ops
[op
];
432 unsigned swiz_scalar
= INST_SWIZ_BROADCAST(ffs(dst
.write_mask
) - 1);
434 assert(ei
.opcode
!= 0xff);
436 struct etna_inst inst
= {
449 if (c
->specs
->has_new_transcendentals
)
457 /* scalar instructions we want src to be in x component */
458 src
[0].swiz
= inst_swiz_compose(src
[0].swiz
, swiz_scalar
);
459 src
[1].swiz
= inst_swiz_compose(src
[1].swiz
, swiz_scalar
);
461 /* deal with instructions which don't have 1:1 mapping */
463 inst
.src
[2] = etna_immediate_float(1.0f
);
466 inst
.src
[2] = etna_immediate_int(1);
469 inst
.src
[1] = etna_immediate_float(0.0f
);
472 inst
.src
[1] = etna_immediate_int(0);
475 inst
.src
[0] = etna_immediate_int(0);
482 /* set the "true" value for CMP instructions */
483 if (inst
.opcode
== INST_OPCODE_CMP
)
484 inst
.src
[2] = etna_immediate_int(-1);
486 for (unsigned j
= 0; j
< 3; j
++) {
487 unsigned i
= ((ei
.src
>> j
*2) & 3);
489 inst
.src
[j
] = src
[i
];
496 etna_emit_tex(struct etna_compile
*c
, nir_texop op
, unsigned texid
, unsigned dst_swiz
,
497 struct etna_inst_dst dst
, struct etna_inst_src coord
,
498 struct etna_inst_src lod_bias
, struct etna_inst_src compare
)
500 struct etna_inst inst
= {
502 .tex
.id
= texid
+ (is_fs(c
) ? 0 : c
->specs
->vertex_sampler_offset
),
503 .tex
.swiz
= dst_swiz
,
508 inst
.src
[1] = lod_bias
;
511 inst
.src
[2] = compare
;
514 case nir_texop_tex
: inst
.opcode
= INST_OPCODE_TEXLD
; break;
515 case nir_texop_txb
: inst
.opcode
= INST_OPCODE_TEXLDB
; break;
516 case nir_texop_txl
: inst
.opcode
= INST_OPCODE_TEXLDL
; break;
525 etna_emit_jump(struct etna_compile
*c
, unsigned block
, struct etna_inst_src condition
)
527 if (!condition
.use
) {
528 emit_inst(c
, &(struct etna_inst
) {.opcode
= INST_OPCODE_BRANCH
, .imm
= block
});
532 struct etna_inst inst
= {
533 .opcode
= INST_OPCODE_BRANCH
,
534 .cond
= INST_CONDITION_NOT
,
535 .type
= INST_TYPE_U32
,
539 inst
.src
[0].swiz
= INST_SWIZ_BROADCAST(inst
.src
[0].swiz
& 3);
544 etna_emit_discard(struct etna_compile
*c
, struct etna_inst_src condition
)
546 if (!condition
.use
) {
547 emit_inst(c
, &(struct etna_inst
) { .opcode
= INST_OPCODE_TEXKILL
});
551 struct etna_inst inst
= {
552 .opcode
= INST_OPCODE_TEXKILL
,
553 .cond
= INST_CONDITION_NZ
,
554 .type
= (c
->specs
->halti
< 2) ? INST_TYPE_F32
: INST_TYPE_U32
,
557 inst
.src
[0].swiz
= INST_SWIZ_BROADCAST(inst
.src
[0].swiz
& 3);
562 etna_emit_output(struct etna_compile
*c
, nir_variable
*var
, struct etna_inst_src src
)
564 struct etna_shader_io_file
*sf
= &c
->variant
->outfile
;
567 switch (var
->data
.location
) {
568 case FRAG_RESULT_COLOR
:
569 case FRAG_RESULT_DATA0
: /* DATA0 is used by gallium shaders for color */
570 c
->variant
->ps_color_out_reg
= src
.reg
;
572 case FRAG_RESULT_DEPTH
:
573 c
->variant
->ps_depth_out_reg
= src
.reg
;
576 unreachable("Unsupported fs output");
581 switch (var
->data
.location
) {
582 case VARYING_SLOT_POS
:
583 c
->variant
->vs_pos_out_reg
= src
.reg
;
585 case VARYING_SLOT_PSIZ
:
586 c
->variant
->vs_pointsize_out_reg
= src
.reg
;
589 sf
->reg
[sf
->num_reg
].reg
= src
.reg
;
590 sf
->reg
[sf
->num_reg
].slot
= var
->data
.location
;
591 sf
->reg
[sf
->num_reg
].num_components
= glsl_get_components(var
->type
);
598 etna_emit_load_ubo(struct etna_compile
*c
, struct etna_inst_dst dst
,
599 struct etna_inst_src src
, struct etna_inst_src base
)
601 /* convert float offset back to integer */
602 if (c
->specs
->halti
< 2) {
603 emit_inst(c
, &(struct etna_inst
) {
604 .opcode
= INST_OPCODE_F2I
,
605 .type
= INST_TYPE_U32
,
610 emit_inst(c
, &(struct etna_inst
) {
611 .opcode
= INST_OPCODE_LOAD
,
612 .type
= INST_TYPE_U32
,
616 .rgroup
= INST_RGROUP_TEMP
,
618 .swiz
= INST_SWIZ_BROADCAST(ffs(dst
.write_mask
) - 1)
626 emit_inst(c
, &(struct etna_inst
) {
627 .opcode
= INST_OPCODE_LOAD
,
628 .type
= INST_TYPE_U32
,
635 #define OPT(nir, pass, ...) ({ \
636 bool this_progress = false; \
637 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
640 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
643 etna_optimize_loop(nir_shader
*s
)
649 OPT_V(s
, nir_lower_vars_to_ssa
);
650 progress
|= OPT(s
, nir_opt_copy_prop_vars
);
651 progress
|= OPT(s
, nir_copy_prop
);
652 progress
|= OPT(s
, nir_opt_dce
);
653 progress
|= OPT(s
, nir_opt_cse
);
654 progress
|= OPT(s
, nir_opt_peephole_select
, 16, true, true);
655 progress
|= OPT(s
, nir_opt_intrinsics
);
656 progress
|= OPT(s
, nir_opt_algebraic
);
657 progress
|= OPT(s
, nir_opt_constant_folding
);
658 progress
|= OPT(s
, nir_opt_dead_cf
);
659 if (OPT(s
, nir_opt_trivial_continues
)) {
661 /* If nir_opt_trivial_continues makes progress, then we need to clean
662 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
665 OPT(s
, nir_copy_prop
);
668 progress
|= OPT(s
, nir_opt_loop_unroll
, nir_var_all
);
669 progress
|= OPT(s
, nir_opt_if
, false);
670 progress
|= OPT(s
, nir_opt_remove_phis
);
671 progress
|= OPT(s
, nir_opt_undef
);
677 etna_glsl_type_size(const struct glsl_type
*type
, bool bindless
)
679 return glsl_count_attribute_slots(type
, false);
683 copy_uniform_state_to_shader(struct etna_shader_variant
*sobj
, uint64_t *consts
, unsigned count
)
685 struct etna_shader_uniform_info
*uinfo
= &sobj
->uniforms
;
687 uinfo
->imm_count
= count
* 4;
688 uinfo
->imm_data
= MALLOC(uinfo
->imm_count
* sizeof(*uinfo
->imm_data
));
689 uinfo
->imm_contents
= MALLOC(uinfo
->imm_count
* sizeof(*uinfo
->imm_contents
));
691 for (unsigned i
= 0; i
< uinfo
->imm_count
; i
++) {
692 uinfo
->imm_data
[i
] = consts
[i
];
693 uinfo
->imm_contents
[i
] = consts
[i
] >> 32;
696 etna_set_shader_uniforms_dirty_flags(sobj
);
699 #include "etnaviv_compiler_nir_emit.h"
702 etna_compile_shader_nir(struct etna_shader_variant
*v
)
707 struct etna_compile
*c
= CALLOC_STRUCT(etna_compile
);
712 c
->specs
= v
->shader
->specs
;
713 c
->nir
= nir_shader_clone(NULL
, v
->shader
->nir
);
715 nir_shader
*s
= c
->nir
;
716 const struct etna_specs
*specs
= c
->specs
;
718 v
->stage
= s
->info
.stage
;
719 v
->num_loops
= 0; /* TODO */
720 v
->vs_id_in_reg
= -1;
721 v
->vs_pos_out_reg
= -1;
722 v
->vs_pointsize_out_reg
= -1;
723 v
->ps_color_out_reg
= 0; /* 0 for shader that doesn't write fragcolor.. */
724 v
->ps_depth_out_reg
= -1;
726 /* setup input linking */
727 struct etna_shader_io_file
*sf
= &v
->infile
;
728 if (s
->info
.stage
== MESA_SHADER_VERTEX
) {
729 nir_foreach_variable(var
, &s
->inputs
) {
730 unsigned idx
= var
->data
.driver_location
;
731 sf
->reg
[idx
].reg
= idx
;
732 sf
->reg
[idx
].slot
= var
->data
.location
;
733 sf
->reg
[idx
].num_components
= glsl_get_components(var
->type
);
734 sf
->num_reg
= MAX2(sf
->num_reg
, idx
+1);
738 nir_foreach_variable(var
, &s
->inputs
) {
739 unsigned idx
= var
->data
.driver_location
;
740 sf
->reg
[idx
].reg
= idx
+ 1;
741 sf
->reg
[idx
].slot
= var
->data
.location
;
742 sf
->reg
[idx
].num_components
= glsl_get_components(var
->type
);
743 sf
->num_reg
= MAX2(sf
->num_reg
, idx
+1);
746 assert(sf
->num_reg
== count
);
749 NIR_PASS_V(s
, nir_lower_io
, ~nir_var_shader_out
, etna_glsl_type_size
,
750 (nir_lower_io_options
)0);
752 OPT_V(s
, nir_lower_regs_to_ssa
);
753 OPT_V(s
, nir_lower_vars_to_ssa
);
754 OPT_V(s
, nir_lower_indirect_derefs
, nir_var_all
);
755 OPT_V(s
, nir_lower_tex
, &(struct nir_lower_tex_options
) { .lower_txp
= ~0u });
756 OPT_V(s
, nir_lower_alu_to_scalar
, etna_alu_to_scalar_filter_cb
, specs
);
758 etna_optimize_loop(s
);
760 OPT_V(s
, etna_lower_io
, v
);
762 /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
763 if (c
->specs
->halti
< 2) {
764 /* use opt_algebraic between int_to_float and boot_to_float because
765 * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
767 OPT_V(s
, nir_lower_int_to_float
);
768 OPT_V(s
, nir_opt_algebraic
);
769 OPT_V(s
, nir_lower_bool_to_float
);
771 OPT_V(s
, nir_lower_idiv
);
772 OPT_V(s
, nir_lower_bool_to_int32
);
775 etna_optimize_loop(s
);
777 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS
))
778 nir_print_shader(s
, stdout
);
780 while( OPT(s
, nir_opt_vectorize
) );
781 OPT_V(s
, nir_lower_alu_to_scalar
, etna_alu_to_scalar_filter_cb
, specs
);
783 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_function_temp
);
784 NIR_PASS_V(s
, nir_opt_algebraic_late
);
786 NIR_PASS_V(s
, nir_move_vec_src_uses_to_dest
);
787 NIR_PASS_V(s
, nir_copy_prop
);
788 /* only HW supported integer source mod is ineg for iadd instruction (?) */
789 NIR_PASS_V(s
, nir_lower_to_source_mods
, ~nir_lower_int_source_mods
);
790 /* need copy prop after uses_to_dest, and before src mods: see
791 * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
794 NIR_PASS_V(s
, nir_opt_dce
);
796 NIR_PASS_V(s
, etna_lower_alu
, c
);
798 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS
))
799 nir_print_shader(s
, stdout
);
801 unsigned block_ptr
[nir_shader_get_entrypoint(s
)->num_blocks
];
802 c
->block_ptr
= block_ptr
;
805 ASSERTED
bool ok
= emit_shader(c
, &v
->num_temps
, &num_consts
);
808 /* empty shader, emit NOP */
810 emit_inst(c
, &(struct etna_inst
) { .opcode
= INST_OPCODE_NOP
});
812 /* assemble instructions, fixing up labels */
813 uint32_t *code
= MALLOC(c
->inst_ptr
* 16);
814 for (unsigned i
= 0; i
< c
->inst_ptr
; i
++) {
815 struct etna_inst
*inst
= &c
->code
[i
];
816 if (inst
->opcode
== INST_OPCODE_BRANCH
)
817 inst
->imm
= block_ptr
[inst
->imm
];
819 inst
->halti5
= specs
->halti
>= 5;
820 etna_assemble(&code
[i
* 4], inst
);
823 v
->code_size
= c
->inst_ptr
* 4;
825 v
->needs_icache
= c
->inst_ptr
> specs
->max_instructions
;
827 copy_uniform_state_to_shader(v
, c
->consts
, num_consts
);
829 if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
830 v
->input_count_unk8
= 31; /* XXX what is this */
831 assert(v
->ps_depth_out_reg
<= 0);
837 v
->input_count_unk8
= DIV_ROUND_UP(v
->infile
.num_reg
+ 4, 16); /* XXX what is this */
839 /* fill in "mystery meat" load balancing value. This value determines how
840 * work is scheduled between VS and PS
841 * in the unified shader architecture. More precisely, it is determined from
842 * the number of VS outputs, as well as chip-specific
843 * vertex output buffer size, vertex cache size, and the number of shader
846 * XXX this is a conservative estimate, the "optimal" value is only known for
847 * sure at link time because some
848 * outputs may be unused and thus unmapped. Then again, in the general use
849 * case with GLSL the vertex and fragment
850 * shaders are linked already before submitting to Gallium, thus all outputs
853 * note: TGSI compiler counts all outputs (including position and pointsize), here
854 * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
855 * TODO: might have a problem that we don't count pointsize when it is used
858 int half_out
= v
->outfile
.num_reg
/ 2 + 1;
861 uint32_t b
= ((20480 / (specs
->vertex_output_buffer_size
-
862 2 * half_out
* specs
->vertex_cache_size
)) +
865 uint32_t a
= (b
+ 256 / (specs
->shader_core_count
* half_out
)) / 2;
866 v
->vs_load_balancing
= VIVS_VS_LOAD_BALANCING_A(MIN2(a
, 255)) |
867 VIVS_VS_LOAD_BALANCING_B(MIN2(b
, 255)) |
868 VIVS_VS_LOAD_BALANCING_C(0x3f) |
869 VIVS_VS_LOAD_BALANCING_D(0x0f);
877 etna_destroy_shader_nir(struct etna_shader_variant
*shader
)
882 FREE(shader
->uniforms
.imm_data
);
883 FREE(shader
->uniforms
.imm_contents
);
887 extern const char *tgsi_swizzle_names
[];
889 etna_dump_shader_nir(const struct etna_shader_variant
*shader
)
891 if (shader
->stage
== MESA_SHADER_VERTEX
)
896 etna_disasm(shader
->code
, shader
->code_size
, PRINT_RAW
);
898 printf("num loops: %i\n", shader
->num_loops
);
899 printf("num temps: %i\n", shader
->num_temps
);
900 printf("immediates:\n");
901 for (int idx
= 0; idx
< shader
->uniforms
.imm_count
; ++idx
) {
902 printf(" [%i].%s = %f (0x%08x) (%d)\n",
904 tgsi_swizzle_names
[idx
% 4],
905 *((float *)&shader
->uniforms
.imm_data
[idx
]),
906 shader
->uniforms
.imm_data
[idx
],
907 shader
->uniforms
.imm_contents
[idx
]);
910 for (int idx
= 0; idx
< shader
->infile
.num_reg
; ++idx
) {
911 printf(" [%i] name=%s comps=%i\n", shader
->infile
.reg
[idx
].reg
,
912 (shader
->stage
== MESA_SHADER_VERTEX
) ?
913 gl_vert_attrib_name(shader
->infile
.reg
[idx
].slot
) :
914 gl_varying_slot_name(shader
->infile
.reg
[idx
].slot
),
915 shader
->infile
.reg
[idx
].num_components
);
917 printf("outputs:\n");
918 for (int idx
= 0; idx
< shader
->outfile
.num_reg
; ++idx
) {
919 printf(" [%i] name=%s comps=%i\n", shader
->outfile
.reg
[idx
].reg
,
920 (shader
->stage
== MESA_SHADER_VERTEX
) ?
921 gl_varying_slot_name(shader
->outfile
.reg
[idx
].slot
) :
922 gl_frag_result_name(shader
->outfile
.reg
[idx
].slot
),
923 shader
->outfile
.reg
[idx
].num_components
);
925 printf("special:\n");
926 if (shader
->stage
== MESA_SHADER_VERTEX
) {
927 printf(" vs_pos_out_reg=%i\n", shader
->vs_pos_out_reg
);
928 printf(" vs_pointsize_out_reg=%i\n", shader
->vs_pointsize_out_reg
);
929 printf(" vs_load_balancing=0x%08x\n", shader
->vs_load_balancing
);
931 printf(" ps_color_out_reg=%i\n", shader
->ps_color_out_reg
);
932 printf(" ps_depth_out_reg=%i\n", shader
->ps_depth_out_reg
);
934 printf(" input_count_unk8=0x%08x\n", shader
->input_count_unk8
);
937 static const struct etna_shader_inout
*
938 etna_shader_vs_lookup(const struct etna_shader_variant
*sobj
,
939 const struct etna_shader_inout
*in
)
941 for (int i
= 0; i
< sobj
->outfile
.num_reg
; i
++)
942 if (sobj
->outfile
.reg
[i
].slot
== in
->slot
)
943 return &sobj
->outfile
.reg
[i
];
949 etna_link_shader_nir(struct etna_shader_link_info
*info
,
950 const struct etna_shader_variant
*vs
,
951 const struct etna_shader_variant
*fs
)
954 /* For each fragment input we need to find the associated vertex shader
955 * output, which can be found by matching on semantic name and index. A
956 * binary search could be used because the vs outputs are sorted by their
957 * semantic index and grouped by semantic type by fill_in_vs_outputs.
959 assert(fs
->infile
.num_reg
< ETNA_NUM_INPUTS
);
960 info
->pcoord_varying_comp_ofs
= -1;
962 for (int idx
= 0; idx
< fs
->infile
.num_reg
; ++idx
) {
963 const struct etna_shader_inout
*fsio
= &fs
->infile
.reg
[idx
];
964 const struct etna_shader_inout
*vsio
= etna_shader_vs_lookup(vs
, fsio
);
965 struct etna_varying
*varying
;
966 bool interpolate_always
= true;
968 assert(fsio
->reg
> 0 && fsio
->reg
<= ARRAY_SIZE(info
->varyings
));
970 if (fsio
->reg
> info
->num_varyings
)
971 info
->num_varyings
= fsio
->reg
;
973 varying
= &info
->varyings
[fsio
->reg
- 1];
974 varying
->num_components
= fsio
->num_components
;
976 if (!interpolate_always
) /* colors affected by flat shading */
977 varying
->pa_attributes
= 0x200;
978 else /* texture coord or other bypasses flat shading */
979 varying
->pa_attributes
= 0x2f1;
981 varying
->use
[0] = VARYING_COMPONENT_USE_UNUSED
;
982 varying
->use
[1] = VARYING_COMPONENT_USE_UNUSED
;
983 varying
->use
[2] = VARYING_COMPONENT_USE_UNUSED
;
984 varying
->use
[3] = VARYING_COMPONENT_USE_UNUSED
;
986 /* point coord is an input to the PS without matching VS output,
987 * so it gets a varying slot without being assigned a VS register.
989 if (fsio
->slot
== VARYING_SLOT_PNTC
) {
990 varying
->use
[0] = VARYING_COMPONENT_USE_POINTCOORD_X
;
991 varying
->use
[1] = VARYING_COMPONENT_USE_POINTCOORD_Y
;
993 info
->pcoord_varying_comp_ofs
= comp_ofs
;
995 if (vsio
== NULL
) { /* not found -- link error */
996 BUG("Semantic value not found in vertex shader outputs\n");
999 varying
->reg
= vsio
->reg
;
1002 comp_ofs
+= varying
->num_components
;
1005 assert(info
->num_varyings
== fs
->infile
.num_reg
);