etnaviv: fix compile warnings in release build
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir_emit.h
1 /*
2 * Copyright (c) 2019 Zodiac Inflight Innovations
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jonathan Marek <jonathan@marek.ca>
25 */
26
27 #include "etnaviv_asm.h"
28 #include "etnaviv_context.h"
29
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "compiler/nir/nir_worklist.h"
33 #include "util/register_allocate.h"
34
35 struct emit_options {
36 unsigned max_temps; /* max # of vec4 registers */
37 unsigned max_consts; /* max # of vec4 consts */
38 unsigned id_reg; /* register with vertex/instance id */
39 bool single_const_src : 1; /* limited to 1 vec4 const src */
40 bool etna_new_transcendentals : 1;
41 void *user;
42 uint64_t *consts;
43 };
44
45 #define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
46 #define SRC_DISABLE ((hw_src){})
47 #define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_UNIFORM_0, .reg=idx, .swiz=s})
48 #define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_TEMP, .reg=idx, .swiz=s})
49
50 #define option(name) (state->options->name)
51 #define emit(type, args...) etna_emit_##type(state->options->user, args)
52
53 typedef struct etna_inst_dst hw_dst;
54 typedef struct etna_inst_src hw_src;
55
56 enum {
57 BYPASS_DST = 1,
58 BYPASS_SRC = 2,
59 };
60
61 struct state {
62 const struct emit_options *options;
63 unsigned const_count;
64
65 nir_shader *shader;
66 nir_function_impl *impl;
67
68 /* ra state */
69 struct ra_graph *g;
70 struct ra_regs *regs;
71 unsigned *live_map;
72 unsigned num_nodes;
73 };
74
75 static inline hw_src
76 src_swizzle(hw_src src, unsigned swizzle)
77 {
78 src.swiz = inst_swiz_compose(src.swiz, swizzle);
79 return src;
80 }
81
82 static inline bool is_sysval(nir_instr *instr)
83 {
84 if (instr->type != nir_instr_type_intrinsic)
85 return false;
86
87 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
88 return intr->intrinsic == nir_intrinsic_load_front_face ||
89 intr->intrinsic == nir_intrinsic_load_frag_coord;
90 }
91
92 /* constants are represented as 64-bit ints
93 * 32-bit for the value and 32-bit for the type (imm, uniform, etc)
94 */
95
96 #define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
97 #define CONST(x) CONST_VAL(ETNA_IMMEDIATE_CONSTANT, x)
98 #define UNIFORM(x) CONST_VAL(ETNA_IMMEDIATE_UNIFORM, x)
99 #define UNIFORM_BASE(x) CONST_VAL(ETNA_IMMEDIATE_UBO0_ADDR, x)
100 #define TEXSCALE(x, i) CONST_VAL(ETNA_IMMEDIATE_TEXRECT_SCALE_X + (i), x)
101
102 static int
103 const_add(uint64_t *c, uint64_t value)
104 {
105 for (unsigned i = 0; i < 4; i++) {
106 if (c[i] == value || !c[i]) {
107 c[i] = value;
108 return i;
109 }
110 }
111 return -1;
112 }
113
114 static hw_src
115 const_src(struct state *state, nir_const_value *value, unsigned num_components)
116 {
117 unsigned i;
118 int swiz = -1;
119 for (i = 0; swiz < 0; i++) {
120 uint64_t *a = &option(consts)[i*4];
121 uint64_t save[4];
122 memcpy(save, a, sizeof(save));
123 swiz = 0;
124 for (unsigned j = 0; j < num_components; j++) {
125 int c = const_add(a, value[j].u64);
126 if (c < 0) {
127 memcpy(a, save, sizeof(save));
128 swiz = -1;
129 break;
130 }
131 swiz |= c << j * 2;
132 }
133 }
134
135 assert(i <= option(max_consts));
136 state->const_count = MAX2(state->const_count, i);
137
138 return SRC_CONST(i - 1, swiz);
139 }
140
141 struct ssa_reg {
142 uint8_t idx;
143 uint8_t src_swizzle;
144 uint8_t dst_swizzle;
145 uint8_t write_mask;
146 };
147
148 /* Swizzles and write masks can be used to layer virtual non-interfering
149 * registers on top of the real VEC4 registers. For example, the virtual
150 * VEC3_XYZ register and the virtual SCALAR_W register that use the same
151 * physical VEC4 base register do not interfere.
152 */
153 enum {
154 REG_CLASS_VIRT_SCALAR,
155 REG_CLASS_VIRT_VEC2,
156 REG_CLASS_VIRT_VEC3,
157 REG_CLASS_VEC4,
158 /* special vec2 class for fast transcendentals, limited to XY or ZW */
159 REG_CLASS_VIRT_VEC2T,
160 NUM_REG_CLASSES,
161 } reg_class;
162
163 enum {
164 REG_TYPE_VEC4,
165 REG_TYPE_VIRT_VEC3_XYZ,
166 REG_TYPE_VIRT_VEC3_XYW,
167 REG_TYPE_VIRT_VEC3_XZW,
168 REG_TYPE_VIRT_VEC3_YZW,
169 REG_TYPE_VIRT_VEC2_XY,
170 REG_TYPE_VIRT_VEC2_XZ,
171 REG_TYPE_VIRT_VEC2_XW,
172 REG_TYPE_VIRT_VEC2_YZ,
173 REG_TYPE_VIRT_VEC2_YW,
174 REG_TYPE_VIRT_VEC2_ZW,
175 REG_TYPE_VIRT_SCALAR_X,
176 REG_TYPE_VIRT_SCALAR_Y,
177 REG_TYPE_VIRT_SCALAR_Z,
178 REG_TYPE_VIRT_SCALAR_W,
179 REG_TYPE_VIRT_VEC2T_XY,
180 REG_TYPE_VIRT_VEC2T_ZW,
181 NUM_REG_TYPES,
182 } reg_type;
183
184 /* writemask when used as dest */
185 static const uint8_t
186 reg_writemask[NUM_REG_TYPES] = {
187 [REG_TYPE_VEC4] = 0xf,
188 [REG_TYPE_VIRT_SCALAR_X] = 0x1,
189 [REG_TYPE_VIRT_SCALAR_Y] = 0x2,
190 [REG_TYPE_VIRT_VEC2_XY] = 0x3,
191 [REG_TYPE_VIRT_VEC2T_XY] = 0x3,
192 [REG_TYPE_VIRT_SCALAR_Z] = 0x4,
193 [REG_TYPE_VIRT_VEC2_XZ] = 0x5,
194 [REG_TYPE_VIRT_VEC2_YZ] = 0x6,
195 [REG_TYPE_VIRT_VEC3_XYZ] = 0x7,
196 [REG_TYPE_VIRT_SCALAR_W] = 0x8,
197 [REG_TYPE_VIRT_VEC2_XW] = 0x9,
198 [REG_TYPE_VIRT_VEC2_YW] = 0xa,
199 [REG_TYPE_VIRT_VEC3_XYW] = 0xb,
200 [REG_TYPE_VIRT_VEC2_ZW] = 0xc,
201 [REG_TYPE_VIRT_VEC2T_ZW] = 0xc,
202 [REG_TYPE_VIRT_VEC3_XZW] = 0xd,
203 [REG_TYPE_VIRT_VEC3_YZW] = 0xe,
204 };
205
206 /* how to swizzle when used as a src */
207 static const uint8_t
208 reg_swiz[NUM_REG_TYPES] = {
209 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
210 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
211 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
212 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
213 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
214 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
215 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
216 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
217 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
218 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
219 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
220 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
221 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
222 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
223 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
224 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
225 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
226 };
227
228 /* how to swizzle when used as a dest */
229 static const uint8_t
230 reg_dst_swiz[NUM_REG_TYPES] = {
231 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
232 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
233 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
234 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
235 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
236 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
237 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
238 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
239 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
240 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
241 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
242 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
243 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
244 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
245 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
246 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
247 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
248 };
249
250 static inline int reg_get_type(int virt_reg)
251 {
252 return virt_reg % NUM_REG_TYPES;
253 }
254
255 static inline int reg_get_base(struct state *state, int virt_reg)
256 {
257 /* offset by 1 to avoid reserved position register */
258 if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
259 return virt_reg / NUM_REG_TYPES + 1;
260 return virt_reg / NUM_REG_TYPES;
261 }
262
263 static inline int reg_get_class(int virt_reg)
264 {
265 switch (reg_get_type(virt_reg)) {
266 case REG_TYPE_VEC4:
267 return REG_CLASS_VEC4;
268 case REG_TYPE_VIRT_VEC3_XYZ:
269 case REG_TYPE_VIRT_VEC3_XYW:
270 case REG_TYPE_VIRT_VEC3_XZW:
271 case REG_TYPE_VIRT_VEC3_YZW:
272 return REG_CLASS_VIRT_VEC3;
273 case REG_TYPE_VIRT_VEC2_XY:
274 case REG_TYPE_VIRT_VEC2_XZ:
275 case REG_TYPE_VIRT_VEC2_XW:
276 case REG_TYPE_VIRT_VEC2_YZ:
277 case REG_TYPE_VIRT_VEC2_YW:
278 case REG_TYPE_VIRT_VEC2_ZW:
279 return REG_CLASS_VIRT_VEC2;
280 case REG_TYPE_VIRT_SCALAR_X:
281 case REG_TYPE_VIRT_SCALAR_Y:
282 case REG_TYPE_VIRT_SCALAR_Z:
283 case REG_TYPE_VIRT_SCALAR_W:
284 return REG_CLASS_VIRT_SCALAR;
285 case REG_TYPE_VIRT_VEC2T_XY:
286 case REG_TYPE_VIRT_VEC2T_ZW:
287 return REG_CLASS_VIRT_VEC2T;
288 }
289
290 assert(false);
291 return 0;
292 }
293
294 /* get unique ssa/reg index for nir_src */
295 static unsigned
296 src_index(nir_function_impl *impl, nir_src *src)
297 {
298 return src->is_ssa ? src->ssa->index : (src->reg.reg->index + impl->ssa_alloc);
299 }
300
301 /* get unique ssa/reg index for nir_dest */
302 static unsigned
303 dest_index(nir_function_impl *impl, nir_dest *dest)
304 {
305 return dest->is_ssa ? dest->ssa.index : (dest->reg.reg->index + impl->ssa_alloc);
306 }
307
308 /* nir_src to allocated register */
309 static hw_src
310 ra_src(struct state *state, nir_src *src)
311 {
312 unsigned reg = ra_get_node_reg(state->g, state->live_map[src_index(state->impl, src)]);
313 return SRC_REG(reg_get_base(state, reg), reg_swiz[reg_get_type(reg)]);
314 }
315
316 static hw_src
317 get_src(struct state *state, nir_src *src)
318 {
319 if (!src->is_ssa)
320 return ra_src(state, src);
321
322 nir_instr *instr = src->ssa->parent_instr;
323
324 if (instr->pass_flags & BYPASS_SRC) {
325 assert(instr->type == nir_instr_type_alu);
326 nir_alu_instr *alu = nir_instr_as_alu(instr);
327 assert(alu->op == nir_op_mov);
328 return src_swizzle(get_src(state, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
329 }
330
331 switch (instr->type) {
332 case nir_instr_type_load_const:
333 return const_src(state, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
334 case nir_instr_type_intrinsic: {
335 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
336 switch (intr->intrinsic) {
337 case nir_intrinsic_load_input:
338 case nir_intrinsic_load_instance_id:
339 case nir_intrinsic_load_uniform:
340 return ra_src(state, src);
341 case nir_intrinsic_load_front_face:
342 return (hw_src) { .use = 1, .rgroup = INST_RGROUP_INTERNAL };
343 case nir_intrinsic_load_frag_coord:
344 return SRC_REG(0, INST_SWIZ_IDENTITY);
345 default:
346 assert(0);
347 break;
348 }
349 } break;
350 case nir_instr_type_alu:
351 case nir_instr_type_tex:
352 return ra_src(state, src);
353 case nir_instr_type_ssa_undef: {
354 /* return zero to deal with broken Blur demo */
355 nir_const_value value = CONST(0);
356 return src_swizzle(const_src(state, &value, 1), SWIZZLE(X,X,X,X));
357 }
358 default:
359 assert(0);
360 break;
361 }
362
363 return SRC_DISABLE;
364 }
365
366 static void
367 update_swiz_mask(nir_alu_instr *alu, nir_dest *dest, unsigned *swiz, unsigned *mask)
368 {
369 if (!swiz)
370 return;
371
372 bool is_vec = dest != NULL;
373 unsigned swizzle = 0, write_mask = 0;
374 for (unsigned i = 0; i < 4; i++) {
375 /* channel not written */
376 if (!(alu->dest.write_mask & (1 << i)))
377 continue;
378 /* src is different (only check for vecN) */
379 if (is_vec && alu->src[i].src.ssa != &dest->ssa)
380 continue;
381
382 unsigned src_swiz = is_vec ? alu->src[i].swizzle[0] : alu->src[0].swizzle[i];
383 swizzle |= (*swiz >> src_swiz * 2 & 3) << i * 2;
384 /* this channel isn't written through this chain */
385 if (*mask & (1 << src_swiz))
386 write_mask |= 1 << i;
387 }
388 *swiz = swizzle;
389 *mask = write_mask;
390 }
391
392 static bool
393 vec_dest_has_swizzle(nir_alu_instr *vec, nir_ssa_def *ssa)
394 {
395 for (unsigned i = 0; i < 4; i++) {
396 if (!(vec->dest.write_mask & (1 << i)) || vec->src[i].src.ssa != ssa)
397 continue;
398
399 if (vec->src[i].swizzle[0] != i)
400 return true;
401 }
402
403 /* don't deal with possible bypassed vec/mov chain */
404 nir_foreach_use(use_src, ssa) {
405 nir_instr *instr = use_src->parent_instr;
406 if (instr->type != nir_instr_type_alu)
407 continue;
408
409 nir_alu_instr *alu = nir_instr_as_alu(instr);
410
411 switch (alu->op) {
412 case nir_op_mov:
413 case nir_op_vec2:
414 case nir_op_vec3:
415 case nir_op_vec4:
416 return true;
417 default:
418 break;
419 }
420 }
421 return false;
422 }
423
424 static nir_dest *
425 real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
426 {
427 if (!dest || !dest->is_ssa)
428 return dest;
429
430 bool can_bypass_src = !list_length(&dest->ssa.if_uses);
431 nir_instr *p_instr = dest->ssa.parent_instr;
432
433 /* if used by a vecN, the "real" destination becomes the vecN destination
434 * lower_alu guarantees that values used by a vecN are only used by that vecN
435 * we can apply the same logic to movs in a some cases too
436 */
437 nir_foreach_use(use_src, &dest->ssa) {
438 nir_instr *instr = use_src->parent_instr;
439
440 /* src bypass check: for now only deal with tex src mov case
441 * note: for alu don't bypass mov for multiple uniform sources
442 */
443 switch (instr->type) {
444 case nir_instr_type_tex:
445 if (p_instr->type == nir_instr_type_alu &&
446 nir_instr_as_alu(p_instr)->op == nir_op_mov) {
447 break;
448 }
449 default:
450 can_bypass_src = false;
451 break;
452 }
453
454 if (instr->type != nir_instr_type_alu)
455 continue;
456
457 nir_alu_instr *alu = nir_instr_as_alu(instr);
458
459 switch (alu->op) {
460 case nir_op_vec2:
461 case nir_op_vec3:
462 case nir_op_vec4:
463 assert(list_length(&dest->ssa.if_uses) == 0);
464 nir_foreach_use(use_src, &dest->ssa)
465 assert(use_src->parent_instr == instr);
466
467 update_swiz_mask(alu, dest, swiz, mask);
468 break;
469 case nir_op_mov: {
470 switch (dest->ssa.parent_instr->type) {
471 case nir_instr_type_alu:
472 case nir_instr_type_tex:
473 break;
474 default:
475 continue;
476 }
477 if (list_length(&dest->ssa.if_uses) || list_length(&dest->ssa.uses) > 1)
478 continue;
479
480 update_swiz_mask(alu, NULL, swiz, mask);
481 break;
482 };
483 default:
484 continue;
485 }
486
487 assert(!(instr->pass_flags & BYPASS_SRC));
488 instr->pass_flags |= BYPASS_DST;
489 return real_dest(&alu->dest.dest, swiz, mask);
490 }
491
492 if (can_bypass_src && !(p_instr->pass_flags & BYPASS_DST)) {
493 p_instr->pass_flags |= BYPASS_SRC;
494 return NULL;
495 }
496
497 return dest;
498 }
499
500 /* get allocated dest register for nir_dest
501 * *p_swiz tells how the components need to be placed into register
502 */
503 static hw_dst
504 ra_dest(struct state *state, nir_dest *dest, unsigned *p_swiz)
505 {
506 unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
507 dest = real_dest(dest, &swiz, &mask);
508
509 unsigned r = ra_get_node_reg(state->g, state->live_map[dest_index(state->impl, dest)]);
510 unsigned t = reg_get_type(r);
511
512 *p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
513
514 return (hw_dst) {
515 .use = 1,
516 .reg = reg_get_base(state, r),
517 .write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
518 };
519 }
520
521 /* if instruction dest needs a register, return nir_dest for it */
522 static nir_dest *
523 dest_for_instr(nir_instr *instr)
524 {
525 nir_dest *dest = NULL;
526
527 switch (instr->type) {
528 case nir_instr_type_alu:
529 dest = &nir_instr_as_alu(instr)->dest.dest;
530 break;
531 case nir_instr_type_tex:
532 dest =&nir_instr_as_tex(instr)->dest;
533 break;
534 case nir_instr_type_intrinsic: {
535 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
536 if (intr->intrinsic == nir_intrinsic_load_uniform ||
537 intr->intrinsic == nir_intrinsic_load_input ||
538 intr->intrinsic == nir_intrinsic_load_instance_id)
539 dest = &intr->dest;
540 }
541 default:
542 break;
543 }
544 return real_dest(dest, NULL, NULL);
545 }
546
547 struct live_def {
548 nir_instr *instr;
549 nir_dest *dest; /* cached dest_for_instr */
550 unsigned live_start, live_end; /* live range */
551 };
552
553 static void
554 range_include(struct live_def *def, unsigned index)
555 {
556 if (def->live_start > index)
557 def->live_start = index;
558 if (def->live_end < index)
559 def->live_end = index;
560 }
561
562 struct live_defs_state {
563 unsigned num_defs;
564 unsigned bitset_words;
565
566 nir_function_impl *impl;
567 nir_block *block; /* current block pointer */
568 unsigned index; /* current live index */
569
570 struct live_def *defs;
571 unsigned *live_map; /* to map ssa/reg index into defs array */
572
573 nir_block_worklist worklist;
574 };
575
576 static bool
577 init_liveness_block(nir_block *block,
578 struct live_defs_state *state)
579 {
580 block->live_in = reralloc(block, block->live_in, BITSET_WORD,
581 state->bitset_words);
582 memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
583
584 block->live_out = reralloc(block, block->live_out, BITSET_WORD,
585 state->bitset_words);
586 memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
587
588 nir_block_worklist_push_head(&state->worklist, block);
589
590 return true;
591 }
592
593 static bool
594 set_src_live(nir_src *src, void *void_state)
595 {
596 struct live_defs_state *state = void_state;
597
598 if (src->is_ssa) {
599 nir_instr *instr = src->ssa->parent_instr;
600
601 if (is_sysval(instr))
602 return true;
603
604 switch (instr->type) {
605 case nir_instr_type_load_const:
606 case nir_instr_type_ssa_undef:
607 return true;
608 case nir_instr_type_alu: {
609 /* alu op bypass */
610 nir_alu_instr *alu = nir_instr_as_alu(instr);
611 if (instr->pass_flags & BYPASS_SRC) {
612 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
613 set_src_live(&alu->src[i].src, state);
614 return true;
615 }
616 } break;
617 default:
618 break;
619 }
620 }
621
622 unsigned i = state->live_map[src_index(state->impl, src)];
623 assert(i != ~0u);
624
625 BITSET_SET(state->block->live_in, i);
626 range_include(&state->defs[i], state->index);
627
628 return true;
629 }
630
631 static bool
632 propagate_across_edge(nir_block *pred, nir_block *succ,
633 struct live_defs_state *state)
634 {
635 BITSET_WORD progress = 0;
636 for (unsigned i = 0; i < state->bitset_words; ++i) {
637 progress |= succ->live_in[i] & ~pred->live_out[i];
638 pred->live_out[i] |= succ->live_in[i];
639 }
640 return progress != 0;
641 }
642
643 static unsigned
644 live_defs(nir_function_impl *impl, struct live_def *defs, unsigned *live_map)
645 {
646 struct live_defs_state state;
647 unsigned block_live_index[impl->num_blocks + 1];
648
649 state.impl = impl;
650 state.defs = defs;
651 state.live_map = live_map;
652
653 state.num_defs = 0;
654 nir_foreach_block(block, impl) {
655 block_live_index[block->index] = state.num_defs;
656 nir_foreach_instr(instr, block) {
657 nir_dest *dest = dest_for_instr(instr);
658 if (!dest)
659 continue;
660
661 unsigned idx = dest_index(impl, dest);
662 /* register is already in defs */
663 if (live_map[idx] != ~0u)
664 continue;
665
666 defs[state.num_defs] = (struct live_def) {instr, dest, state.num_defs, 0};
667
668 /* input live from the start */
669 if (instr->type == nir_instr_type_intrinsic) {
670 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
671 if (intr->intrinsic == nir_intrinsic_load_input ||
672 intr->intrinsic == nir_intrinsic_load_instance_id)
673 defs[state.num_defs].live_start = 0;
674 }
675
676 live_map[idx] = state.num_defs;
677 state.num_defs++;
678 }
679 }
680 block_live_index[impl->num_blocks] = state.num_defs;
681
682 nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
683
684 /* We now know how many unique ssa definitions we have and we can go
685 * ahead and allocate live_in and live_out sets and add all of the
686 * blocks to the worklist.
687 */
688 state.bitset_words = BITSET_WORDS(state.num_defs);
689 nir_foreach_block(block, impl) {
690 init_liveness_block(block, &state);
691 }
692
693 /* We're now ready to work through the worklist and update the liveness
694 * sets of each of the blocks. By the time we get to this point, every
695 * block in the function implementation has been pushed onto the
696 * worklist in reverse order. As long as we keep the worklist
697 * up-to-date as we go, everything will get covered.
698 */
699 while (!nir_block_worklist_is_empty(&state.worklist)) {
700 /* We pop them off in the reverse order we pushed them on. This way
701 * the first walk of the instructions is backwards so we only walk
702 * once in the case of no control flow.
703 */
704 nir_block *block = nir_block_worklist_pop_head(&state.worklist);
705 state.block = block;
706
707 memcpy(block->live_in, block->live_out,
708 state.bitset_words * sizeof(BITSET_WORD));
709
710 state.index = block_live_index[block->index + 1];
711
712 nir_if *following_if = nir_block_get_following_if(block);
713 if (following_if)
714 set_src_live(&following_if->condition, &state);
715
716 nir_foreach_instr_reverse(instr, block) {
717 /* when we come across the next "live" instruction, decrement index */
718 if (state.index && instr == defs[state.index - 1].instr) {
719 state.index--;
720 /* the only source of writes to registers is phis:
721 * we don't expect any partial write_mask alus
722 * so clearing live_in here is OK
723 */
724 BITSET_CLEAR(block->live_in, state.index);
725 }
726
727 /* don't set_src_live for not-emitted instructions */
728 if (instr->pass_flags)
729 continue;
730
731 unsigned index = state.index;
732
733 /* output live till the end */
734 if (instr->type == nir_instr_type_intrinsic) {
735 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
736 if (intr->intrinsic == nir_intrinsic_store_output)
737 state.index = ~0u;
738 }
739
740 nir_foreach_src(instr, set_src_live, &state);
741
742 state.index = index;
743 }
744 assert(state.index == block_live_index[block->index]);
745
746 /* Walk over all of the predecessors of the current block updating
747 * their live in with the live out of this one. If anything has
748 * changed, add the predecessor to the work list so that we ensure
749 * that the new information is used.
750 */
751 set_foreach(block->predecessors, entry) {
752 nir_block *pred = (nir_block *)entry->key;
753 if (propagate_across_edge(pred, block, &state))
754 nir_block_worklist_push_tail(&state.worklist, pred);
755 }
756 }
757
758 nir_block_worklist_fini(&state.worklist);
759
760 /* apply live_in/live_out to ranges */
761
762 nir_foreach_block(block, impl) {
763 BITSET_WORD tmp;
764 int i;
765
766 BITSET_FOREACH_SET(i, tmp, block->live_in, state.num_defs)
767 range_include(&state.defs[i], block_live_index[block->index]);
768
769 BITSET_FOREACH_SET(i, tmp, block->live_out, state.num_defs)
770 range_include(&state.defs[i], block_live_index[block->index + 1]);
771 }
772
773 return state.num_defs;
774 }
775
776 /* precomputed by register_allocate */
777 static unsigned int *q_values[] = {
778 (unsigned int[]) { 1, 2, 3, 4, 2 },
779 (unsigned int[]) { 3, 5, 6, 6, 5 },
780 (unsigned int[]) { 3, 4, 4, 4, 4 },
781 (unsigned int[]) { 1, 1, 1, 1, 1 },
782 (unsigned int[]) { 1, 2, 2, 2, 1 },
783 };
784
785 static void
786 ra_assign(struct state *state, nir_shader *shader)
787 {
788 struct ra_regs *regs = ra_alloc_reg_set(NULL, option(max_temps) *
789 NUM_REG_TYPES, false);
790
791 /* classes always be created from index 0, so equal to the class enum
792 * which represents a register with (c+1) components
793 */
794 for (int c = 0; c < NUM_REG_CLASSES; c++)
795 ra_alloc_reg_class(regs);
796 /* add each register of each class */
797 for (int r = 0; r < NUM_REG_TYPES * option(max_temps); r++)
798 ra_class_add_reg(regs, reg_get_class(r), r);
799 /* set conflicts */
800 for (int r = 0; r < option(max_temps); r++) {
801 for (int i = 0; i < NUM_REG_TYPES; i++) {
802 for (int j = 0; j < i; j++) {
803 if (reg_writemask[i] & reg_writemask[j]) {
804 ra_add_reg_conflict(regs, NUM_REG_TYPES * r + i,
805 NUM_REG_TYPES * r + j);
806 }
807 }
808 }
809 }
810 ra_set_finalize(regs, q_values);
811
812 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
813
814 /* liveness and interference */
815
816 nir_index_blocks(impl);
817 nir_index_ssa_defs(impl);
818 nir_foreach_block(block, impl) {
819 nir_foreach_instr(instr, block)
820 instr->pass_flags = 0;
821 }
822
823 /* this gives an approximation/upper limit on how many nodes are needed
824 * (some ssa values do not represent an allocated register)
825 */
826 unsigned max_nodes = impl->ssa_alloc + impl->reg_alloc;
827 unsigned *live_map = ralloc_array(NULL, unsigned, max_nodes);
828 memset(live_map, 0xff, sizeof(unsigned) * max_nodes);
829 struct live_def *defs = rzalloc_array(NULL, struct live_def, max_nodes);
830
831 unsigned num_nodes = live_defs(impl, defs, live_map);
832 struct ra_graph *g = ra_alloc_interference_graph(regs, num_nodes);
833
834 /* set classes from num_components */
835 for (unsigned i = 0; i < num_nodes; i++) {
836 nir_instr *instr = defs[i].instr;
837 nir_dest *dest = defs[i].dest;
838
839 ra_set_node_class(g, i, nir_dest_num_components(*dest) - 1);
840
841 if (instr->type == nir_instr_type_alu && option(etna_new_transcendentals)) {
842 switch (nir_instr_as_alu(instr)->op) {
843 case nir_op_fdiv:
844 case nir_op_flog2:
845 case nir_op_fsin:
846 case nir_op_fcos:
847 assert(dest->is_ssa);
848 ra_set_node_class(g, i, REG_CLASS_VIRT_VEC2T);
849 default:
850 break;
851 }
852 }
853 }
854
855 nir_foreach_block(block, impl) {
856 nir_foreach_instr(instr, block) {
857 if (instr->type != nir_instr_type_intrinsic)
858 continue;
859
860 nir_dest *dest = dest_for_instr(instr);
861 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
862 unsigned reg;
863
864 switch (intr->intrinsic) {
865 case nir_intrinsic_store_output: {
866 /* don't want output to be swizzled
867 * TODO: better would be to set the type to X/XY/XYZ/XYZW
868 */
869 ra_set_node_class(g, live_map[src_index(impl, &intr->src[0])], REG_CLASS_VEC4);
870 } continue;
871 case nir_intrinsic_load_input:
872 reg = nir_intrinsic_base(intr) * NUM_REG_TYPES + (unsigned[]) {
873 REG_TYPE_VIRT_SCALAR_X,
874 REG_TYPE_VIRT_VEC2_XY,
875 REG_TYPE_VIRT_VEC3_XYZ,
876 REG_TYPE_VEC4,
877 }[nir_dest_num_components(*dest) - 1];
878 break;
879 case nir_intrinsic_load_instance_id:
880 reg = option(id_reg) * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;
881 break;
882 default:
883 continue;
884 }
885
886 ra_set_node_reg(g, live_map[dest_index(impl, dest)], reg);
887 }
888 }
889
890 /* add interference for intersecting live ranges */
891 for (unsigned i = 0; i < num_nodes; i++) {
892 assert(defs[i].live_start < defs[i].live_end);
893 for (unsigned j = 0; j < i; j++) {
894 if (defs[i].live_start >= defs[j].live_end || defs[j].live_start >= defs[i].live_end)
895 continue;
896 ra_add_node_interference(g, i, j);
897 }
898 }
899
900 ralloc_free(defs);
901
902 /* Allocate registers */
903 ASSERTED bool ok = ra_allocate(g);
904 assert(ok);
905
906 state->g = g;
907 state->regs = regs;
908 state->live_map = live_map;
909 state->num_nodes = num_nodes;
910 }
911
912 static unsigned
913 ra_finish(struct state *state)
914 {
915 /* TODO: better way to get number of registers used? */
916 unsigned j = 0;
917 for (unsigned i = 0; i < state->num_nodes; i++) {
918 j = MAX2(j, reg_get_base(state, ra_get_node_reg(state->g, i)) + 1);
919 }
920
921 ralloc_free(state->g);
922 ralloc_free(state->regs);
923 ralloc_free(state->live_map);
924
925 return j;
926 }
927
928 static void
929 emit_alu(struct state *state, nir_alu_instr * alu)
930 {
931 const nir_op_info *info = &nir_op_infos[alu->op];
932
933 /* marked as dead instruction (vecN and other bypassed instr) */
934 if (alu->instr.pass_flags)
935 return;
936
937 assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
938
939 unsigned dst_swiz;
940 hw_dst dst = ra_dest(state, &alu->dest.dest, &dst_swiz);
941
942 /* compose alu write_mask with RA write mask */
943 if (!alu->dest.dest.is_ssa)
944 dst.write_mask = inst_write_mask_compose(alu->dest.write_mask, dst.write_mask);
945
946 switch (alu->op) {
947 case nir_op_fdot2:
948 case nir_op_fdot3:
949 case nir_op_fdot4:
950 /* not per-component - don't compose dst_swiz */
951 dst_swiz = INST_SWIZ_IDENTITY;
952 break;
953 default:
954 break;
955 }
956
957 hw_src srcs[3];
958
959 for (int i = 0; i < info->num_inputs; i++) {
960 nir_alu_src *asrc = &alu->src[i];
961 hw_src src;
962
963 src = src_swizzle(get_src(state, &asrc->src), ALU_SWIZ(asrc));
964 src = src_swizzle(src, dst_swiz);
965
966 if (src.rgroup != INST_RGROUP_IMMEDIATE) {
967 src.neg = asrc->negate || (alu->op == nir_op_fneg);
968 src.abs = asrc->abs || (alu->op == nir_op_fabs);
969 } else {
970 assert(!asrc->negate && alu->op != nir_op_fneg);
971 assert(!asrc->abs && alu->op != nir_op_fabs);
972 }
973
974 srcs[i] = src;
975 }
976
977 emit(alu, alu->op, dst, srcs, alu->dest.saturate || (alu->op == nir_op_fsat));
978 }
979
980 static void
981 emit_tex(struct state *state, nir_tex_instr * tex)
982 {
983 unsigned dst_swiz;
984 hw_dst dst = ra_dest(state, &tex->dest, &dst_swiz);
985 nir_src *coord = NULL;
986 nir_src *lod_bias = NULL;
987
988 for (unsigned i = 0; i < tex->num_srcs; i++) {
989 switch (tex->src[i].src_type) {
990 case nir_tex_src_coord:
991 coord = &tex->src[i].src;
992 break;
993 case nir_tex_src_bias:
994 case nir_tex_src_lod:
995 assert(!lod_bias);
996 lod_bias = &tex->src[i].src;
997 break;
998 default:
999 assert(0);
1000 break;
1001 }
1002 }
1003
1004 emit(tex, tex->op, tex->sampler_index, dst_swiz, dst, get_src(state, coord),
1005 lod_bias ? get_src(state, lod_bias) : SRC_DISABLE);
1006 }
1007
1008 static void
1009 emit_intrinsic(struct state *state, nir_intrinsic_instr * intr)
1010 {
1011 switch (intr->intrinsic) {
1012 case nir_intrinsic_store_output:
1013 emit(output, nir_intrinsic_base(intr), get_src(state, &intr->src[0]));
1014 break;
1015 case nir_intrinsic_discard_if:
1016 emit(discard, get_src(state, &intr->src[0]));
1017 break;
1018 case nir_intrinsic_discard:
1019 emit(discard, SRC_DISABLE);
1020 break;
1021 case nir_intrinsic_load_uniform: {
1022 unsigned dst_swiz;
1023 hw_dst dst = ra_dest(state, &intr->dest, &dst_swiz);
1024 /* TODO: might have a problem with dst_swiz .. */
1025 emit(load_ubo, dst, get_src(state, &intr->src[0]), const_src(state, &UNIFORM_BASE(nir_intrinsic_base(intr) * 16), 1));
1026 } break;
1027 case nir_intrinsic_load_front_face:
1028 case nir_intrinsic_load_frag_coord:
1029 assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
1030 break;
1031 case nir_intrinsic_load_input:
1032 case nir_intrinsic_load_instance_id:
1033 break;
1034 default:
1035 assert(0);
1036 }
1037 }
1038
1039 static void
1040 emit_instr(struct state *state, nir_instr * instr)
1041 {
1042 switch (instr->type) {
1043 case nir_instr_type_alu:
1044 emit_alu(state, nir_instr_as_alu(instr));
1045 break;
1046 case nir_instr_type_tex:
1047 emit_tex(state, nir_instr_as_tex(instr));
1048 break;
1049 case nir_instr_type_intrinsic:
1050 emit_intrinsic(state, nir_instr_as_intrinsic(instr));
1051 break;
1052 case nir_instr_type_jump:
1053 assert(nir_instr_is_last(instr));
1054 case nir_instr_type_load_const:
1055 case nir_instr_type_ssa_undef:
1056 break;
1057 default:
1058 assert(0);
1059 break;
1060 }
1061 }
1062
1063 static void
1064 emit_block(struct state *state, nir_block * block)
1065 {
1066 emit(block_start, block->index);
1067
1068 nir_foreach_instr(instr, block)
1069 emit_instr(state, instr);
1070
1071 /* succs->index < block->index is for the loop case */
1072 nir_block *succs = block->successors[0];
1073 if (nir_block_ends_in_jump(block) || succs->index < block->index)
1074 emit(jump, succs->index, SRC_DISABLE);
1075 }
1076
1077 static void
1078 emit_cf_list(struct state *state, struct exec_list *list);
1079
1080 static void
1081 emit_if(struct state *state, nir_if * nif)
1082 {
1083 emit(jump, nir_if_first_else_block(nif)->index, get_src(state, &nif->condition));
1084 emit_cf_list(state, &nif->then_list);
1085
1086 /* jump at end of then_list to skip else_list
1087 * not needed if then_list already ends with a jump or else_list is empty
1088 */
1089 if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
1090 !nir_cf_list_is_empty_block(&nif->else_list))
1091 emit(jump, nir_if_last_else_block(nif)->successors[0]->index, SRC_DISABLE);
1092
1093 emit_cf_list(state, &nif->else_list);
1094 }
1095
1096 static void
1097 emit_cf_list(struct state *state, struct exec_list *list)
1098 {
1099 foreach_list_typed(nir_cf_node, node, node, list) {
1100 switch (node->type) {
1101 case nir_cf_node_block:
1102 emit_block(state, nir_cf_node_as_block(node));
1103 break;
1104 case nir_cf_node_if:
1105 emit_if(state, nir_cf_node_as_if(node));
1106 break;
1107 case nir_cf_node_loop:
1108 emit_cf_list(state, &nir_cf_node_as_loop(node)->body);
1109 break;
1110 default:
1111 assert(0);
1112 break;
1113 }
1114 }
1115 }
1116
1117 /* based on nir_lower_vec_to_movs */
1118 static unsigned
1119 insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
1120 {
1121 assert(start_idx < nir_op_infos[vec->op].num_inputs);
1122 unsigned write_mask = (1u << start_idx);
1123
1124 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
1125 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
1126
1127 mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
1128 mov->src[0].negate = vec->src[start_idx].negate;
1129 mov->src[0].abs = vec->src[start_idx].abs;
1130
1131 unsigned num_components = 1;
1132
1133 for (unsigned i = start_idx + 1; i < 4; i++) {
1134 if (!(vec->dest.write_mask & (1 << i)))
1135 continue;
1136
1137 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
1138 vec->src[i].negate == vec->src[start_idx].negate &&
1139 vec->src[i].abs == vec->src[start_idx].abs) {
1140 write_mask |= (1 << i);
1141 mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
1142 num_components++;
1143 }
1144 }
1145
1146 mov->dest.write_mask = (1 << num_components) - 1;
1147 nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
1148
1149 /* replace vec srcs with inserted mov */
1150 for (unsigned i = 0, j = 0; i < 4; i++) {
1151 if (!(write_mask & (1 << i)))
1152 continue;
1153
1154 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, nir_src_for_ssa(&mov->dest.dest.ssa));
1155 vec->src[i].swizzle[0] = j++;
1156 }
1157
1158 nir_instr_insert_before(&vec->instr, &mov->instr);
1159
1160 return write_mask;
1161 }
1162
1163 /*
1164 * for vecN instructions:
1165 * -merge constant sources into a single src
1166 * -insert movs (nir_lower_vec_to_movs equivalent)
1167 * for non-vecN instructions:
1168 * -try to merge constants as single constant
1169 * -insert movs for multiple constants (pre-HALTI5)
1170 */
1171 static void
1172 lower_alu(struct state *state, nir_alu_instr *alu)
1173 {
1174 const nir_op_info *info = &nir_op_infos[alu->op];
1175
1176 nir_builder b;
1177 nir_builder_init(&b, state->impl);
1178 b.cursor = nir_before_instr(&alu->instr);
1179
1180 switch (alu->op) {
1181 case nir_op_vec2:
1182 case nir_op_vec3:
1183 case nir_op_vec4: {
1184 nir_const_value value[4];
1185 unsigned num_components = 0;
1186
1187 for (unsigned i = 0; i < info->num_inputs; i++) {
1188 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1189 if (cv)
1190 value[num_components++] = cv[alu->src[i].swizzle[0]];
1191 }
1192
1193 if (num_components <= 1) /* nothing to do */
1194 break;
1195
1196 nir_ssa_def *def = nir_build_imm(&b, num_components, 32, value);
1197
1198 if (num_components == info->num_inputs) {
1199 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(def));
1200 nir_instr_remove(&alu->instr);
1201 return;
1202 }
1203
1204 for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
1205 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1206 if (!cv)
1207 continue;
1208
1209 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1210 alu->src[i].swizzle[0] = j++;
1211 }
1212 } break;
1213 default: {
1214 if (!option(single_const_src))
1215 return;
1216
1217 /* pre-GC7000L can only have 1 uniform src per instruction */
1218 nir_const_value value[4] = {};
1219 uint8_t swizzle[4][4] = {};
1220 unsigned swiz_max = 0, num_const = 0;
1221
1222 for (unsigned i = 0; i < info->num_inputs; i++) {
1223 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1224 if (!cv)
1225 continue;
1226
1227 unsigned num_components = info->input_sizes[i] ?: alu->dest.dest.ssa.num_components;
1228 for (unsigned j = 0; j < num_components; j++) {
1229 int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
1230 swizzle[i][j] = idx;
1231 swiz_max = MAX2(swiz_max, (unsigned) idx);
1232 }
1233 num_const++;
1234 }
1235
1236 /* nothing to do */
1237 if (num_const <= 1)
1238 return;
1239
1240 /* resolve with single combined const src */
1241 if (swiz_max < 4) {
1242 nir_ssa_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
1243
1244 for (unsigned i = 0; i < info->num_inputs; i++) {
1245 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1246 if (!cv)
1247 continue;
1248
1249 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1250
1251 for (unsigned j = 0; j < 4; j++)
1252 alu->src[i].swizzle[j] = swizzle[i][j];
1253 }
1254 return;
1255 }
1256
1257 /* resolve with movs */
1258 num_const = 0;
1259 for (unsigned i = 0; i < info->num_inputs; i++) {
1260 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1261 if (!cv)
1262 continue;
1263
1264 num_const++;
1265 if (num_const == 1)
1266 continue;
1267
1268 nir_ssa_def *mov = nir_mov(&b, alu->src[i].src.ssa);
1269 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
1270 }
1271 } return;
1272 }
1273
1274 unsigned finished_write_mask = 0;
1275 for (unsigned i = 0; i < 4; i++) {
1276 if (!(alu->dest.write_mask & (1 << i)))
1277 continue;
1278
1279 nir_ssa_def *ssa = alu->src[i].src.ssa;
1280
1281 /* check that vecN instruction is only user of this */
1282 bool need_mov = list_length(&ssa->if_uses) != 0;
1283 nir_foreach_use(use_src, ssa) {
1284 if (use_src->parent_instr != &alu->instr)
1285 need_mov = true;
1286 }
1287
1288 nir_instr *instr = ssa->parent_instr;
1289 switch (instr->type) {
1290 case nir_instr_type_alu:
1291 case nir_instr_type_tex:
1292 break;
1293 case nir_instr_type_intrinsic:
1294 if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
1295 need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa);
1296 break;
1297 }
1298 default:
1299 need_mov = true;
1300 }
1301
1302 if (need_mov && !(finished_write_mask & (1 << i)))
1303 finished_write_mask |= insert_vec_mov(alu, i, state->shader);
1304 }
1305 }
1306
1307 static bool
1308 emit_shader(nir_shader *shader, const struct emit_options *options,
1309 unsigned *num_temps, unsigned *num_consts)
1310 {
1311 struct state state = {
1312 .options = options,
1313 .shader = shader,
1314 .impl = nir_shader_get_entrypoint(shader),
1315 };
1316
1317 nir_builder b;
1318 nir_builder_init(&b, state.impl);
1319
1320 /* convert non-dynamic uniform loads to constants, etc */
1321 nir_foreach_block(block, state.impl) {
1322 nir_foreach_instr_safe(instr, block) {
1323 switch(instr->type) {
1324 case nir_instr_type_alu:
1325 /* deals with vecN and const srcs */
1326 lower_alu(&state, nir_instr_as_alu(instr));
1327 break;
1328 case nir_instr_type_load_const: {
1329 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
1330 for (unsigned i = 0; i < load_const->def.num_components; i++)
1331 load_const->value[i] = CONST(load_const->value[i].u32);
1332 } break;
1333 case nir_instr_type_intrinsic: {
1334 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1335 if (intr->intrinsic != nir_intrinsic_load_uniform)
1336 break;
1337 nir_const_value *off = nir_src_as_const_value(intr->src[0]);
1338 if (!off || off[0].u64 >> 32 != ETNA_IMMEDIATE_CONSTANT)
1339 break;
1340
1341 unsigned base = nir_intrinsic_base(intr) + off[0].u32 / 16;
1342 nir_const_value value[4];
1343
1344 for (unsigned i = 0; i < intr->dest.ssa.num_components; i++) {
1345 if (nir_intrinsic_base(intr) < 0)
1346 value[i] = TEXSCALE(~nir_intrinsic_base(intr), i);
1347 else
1348 value[i] = UNIFORM(base * 4 + i);
1349 }
1350
1351 b.cursor = nir_after_instr(instr);
1352 nir_ssa_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
1353
1354 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(def));
1355 nir_instr_remove(instr);
1356 } break;
1357 default:
1358 break;
1359 }
1360 }
1361 }
1362
1363 /* add mov for any store output using sysval/const */
1364 nir_foreach_block(block, state.impl) {
1365 nir_foreach_instr_safe(instr, block) {
1366 if (instr->type != nir_instr_type_intrinsic)
1367 continue;
1368
1369 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1370
1371 switch (intr->intrinsic) {
1372 case nir_intrinsic_store_output: {
1373 nir_src *src = &intr->src[0];
1374 if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr)) {
1375 b.cursor = nir_before_instr(instr);
1376 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(nir_mov(&b, src->ssa)));
1377 }
1378 } break;
1379 default:
1380 break;
1381 }
1382 }
1383 }
1384
1385 /* call directly to avoid validation (load_const don't pass validation at this point) */
1386 nir_convert_from_ssa(shader, true);
1387 nir_opt_dce(shader);
1388
1389 ra_assign(&state, shader);
1390
1391 emit_cf_list(&state, &nir_shader_get_entrypoint(shader)->body);
1392
1393 *num_temps = ra_finish(&state);
1394 *num_consts = state.const_count;
1395 return true;
1396 }