etnaviv: delete not used struct
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir_emit.h
1 /*
2 * Copyright (c) 2019 Zodiac Inflight Innovations
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jonathan Marek <jonathan@marek.ca>
25 */
26
27 #include "etnaviv_asm.h"
28 #include "etnaviv_context.h"
29
30 #include "compiler/nir/nir.h"
31 #include "compiler/nir/nir_builder.h"
32 #include "compiler/nir/nir_worklist.h"
33 #include "util/register_allocate.h"
34
35 #define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
36 #define SRC_DISABLE ((hw_src){})
37 #define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_UNIFORM_0, .reg=idx, .swiz=s})
38 #define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_TEMP, .reg=idx, .swiz=s})
39
40 #define emit(type, args...) etna_emit_##type(state->c, args)
41
42 typedef struct etna_inst_dst hw_dst;
43 typedef struct etna_inst_src hw_src;
44
45 enum {
46 BYPASS_DST = 1,
47 BYPASS_SRC = 2,
48 };
49
50 struct state {
51 struct etna_compile *c;
52
53 unsigned const_count;
54
55 nir_shader *shader;
56 nir_function_impl *impl;
57
58 /* ra state */
59 struct ra_graph *g;
60 struct ra_regs *regs;
61 unsigned *live_map;
62 unsigned num_nodes;
63 };
64
65 #define compile_error(ctx, args...) ({ \
66 printf(args); \
67 ctx->error = true; \
68 assert(0); \
69 })
70
71 static inline hw_src
72 src_swizzle(hw_src src, unsigned swizzle)
73 {
74 if (src.rgroup != INST_RGROUP_IMMEDIATE)
75 src.swiz = inst_swiz_compose(src.swiz, swizzle);
76
77 return src;
78 }
79
80 static inline bool is_sysval(nir_instr *instr)
81 {
82 if (instr->type != nir_instr_type_intrinsic)
83 return false;
84
85 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
86 return intr->intrinsic == nir_intrinsic_load_front_face ||
87 intr->intrinsic == nir_intrinsic_load_frag_coord;
88 }
89
90 /* constants are represented as 64-bit ints
91 * 32-bit for the value and 32-bit for the type (imm, uniform, etc)
92 */
93
94 #define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
95 #define CONST(x) CONST_VAL(ETNA_IMMEDIATE_CONSTANT, x)
96 #define UNIFORM(x) CONST_VAL(ETNA_IMMEDIATE_UNIFORM, x)
97 #define TEXSCALE(x, i) CONST_VAL(ETNA_IMMEDIATE_TEXRECT_SCALE_X + (i), x)
98
99 static int
100 const_add(uint64_t *c, uint64_t value)
101 {
102 for (unsigned i = 0; i < 4; i++) {
103 if (c[i] == value || !c[i]) {
104 c[i] = value;
105 return i;
106 }
107 }
108 return -1;
109 }
110
111 static hw_src
112 const_src(struct state *state, nir_const_value *value, unsigned num_components)
113 {
114 /* use inline immediates if possible */
115 if (state->c->specs->halti >= 2 && num_components == 1 &&
116 value[0].u64 >> 32 == ETNA_IMMEDIATE_CONSTANT) {
117 uint32_t bits = value[0].u32;
118
119 /* "float" - shifted by 12 */
120 if ((bits & 0xfff) == 0)
121 return etna_immediate_src(0, bits >> 12);
122
123 /* "unsigned" - raw 20 bit value */
124 if (bits < (1 << 20))
125 return etna_immediate_src(2, bits);
126
127 /* "signed" - sign extended 20-bit (sign included) value */
128 if (bits >= 0xfff80000)
129 return etna_immediate_src(1, bits);
130 }
131
132 unsigned i;
133 int swiz = -1;
134 for (i = 0; swiz < 0; i++) {
135 uint64_t *a = &state->c->consts[i*4];
136 uint64_t save[4];
137 memcpy(save, a, sizeof(save));
138 swiz = 0;
139 for (unsigned j = 0; j < num_components; j++) {
140 int c = const_add(a, value[j].u64);
141 if (c < 0) {
142 memcpy(a, save, sizeof(save));
143 swiz = -1;
144 break;
145 }
146 swiz |= c << j * 2;
147 }
148 }
149
150 assert(i <= ETNA_MAX_IMM / 4);
151 state->const_count = MAX2(state->const_count, i);
152
153 return SRC_CONST(i - 1, swiz);
154 }
155
156 /* Swizzles and write masks can be used to layer virtual non-interfering
157 * registers on top of the real VEC4 registers. For example, the virtual
158 * VEC3_XYZ register and the virtual SCALAR_W register that use the same
159 * physical VEC4 base register do not interfere.
160 */
161 enum {
162 REG_CLASS_VIRT_SCALAR,
163 REG_CLASS_VIRT_VEC2,
164 REG_CLASS_VIRT_VEC3,
165 REG_CLASS_VEC4,
166 /* special vec2 class for fast transcendentals, limited to XY or ZW */
167 REG_CLASS_VIRT_VEC2T,
168 /* special classes for LOAD - contiguous components */
169 REG_CLASS_VIRT_VEC2C,
170 REG_CLASS_VIRT_VEC3C,
171 NUM_REG_CLASSES,
172 } reg_class;
173
174 enum {
175 REG_TYPE_VEC4,
176 REG_TYPE_VIRT_VEC3_XYZ,
177 REG_TYPE_VIRT_VEC3_XYW,
178 REG_TYPE_VIRT_VEC3_XZW,
179 REG_TYPE_VIRT_VEC3_YZW,
180 REG_TYPE_VIRT_VEC2_XY,
181 REG_TYPE_VIRT_VEC2_XZ,
182 REG_TYPE_VIRT_VEC2_XW,
183 REG_TYPE_VIRT_VEC2_YZ,
184 REG_TYPE_VIRT_VEC2_YW,
185 REG_TYPE_VIRT_VEC2_ZW,
186 REG_TYPE_VIRT_SCALAR_X,
187 REG_TYPE_VIRT_SCALAR_Y,
188 REG_TYPE_VIRT_SCALAR_Z,
189 REG_TYPE_VIRT_SCALAR_W,
190 REG_TYPE_VIRT_VEC2T_XY,
191 REG_TYPE_VIRT_VEC2T_ZW,
192 REG_TYPE_VIRT_VEC2C_XY,
193 REG_TYPE_VIRT_VEC2C_YZ,
194 REG_TYPE_VIRT_VEC2C_ZW,
195 REG_TYPE_VIRT_VEC3C_XYZ,
196 REG_TYPE_VIRT_VEC3C_YZW,
197 NUM_REG_TYPES,
198 } reg_type;
199
200 /* writemask when used as dest */
201 static const uint8_t
202 reg_writemask[NUM_REG_TYPES] = {
203 [REG_TYPE_VEC4] = 0xf,
204 [REG_TYPE_VIRT_SCALAR_X] = 0x1,
205 [REG_TYPE_VIRT_SCALAR_Y] = 0x2,
206 [REG_TYPE_VIRT_VEC2_XY] = 0x3,
207 [REG_TYPE_VIRT_VEC2T_XY] = 0x3,
208 [REG_TYPE_VIRT_VEC2C_XY] = 0x3,
209 [REG_TYPE_VIRT_SCALAR_Z] = 0x4,
210 [REG_TYPE_VIRT_VEC2_XZ] = 0x5,
211 [REG_TYPE_VIRT_VEC2_YZ] = 0x6,
212 [REG_TYPE_VIRT_VEC2C_YZ] = 0x6,
213 [REG_TYPE_VIRT_VEC3_XYZ] = 0x7,
214 [REG_TYPE_VIRT_VEC3C_XYZ] = 0x7,
215 [REG_TYPE_VIRT_SCALAR_W] = 0x8,
216 [REG_TYPE_VIRT_VEC2_XW] = 0x9,
217 [REG_TYPE_VIRT_VEC2_YW] = 0xa,
218 [REG_TYPE_VIRT_VEC3_XYW] = 0xb,
219 [REG_TYPE_VIRT_VEC2_ZW] = 0xc,
220 [REG_TYPE_VIRT_VEC2T_ZW] = 0xc,
221 [REG_TYPE_VIRT_VEC2C_ZW] = 0xc,
222 [REG_TYPE_VIRT_VEC3_XZW] = 0xd,
223 [REG_TYPE_VIRT_VEC3_YZW] = 0xe,
224 [REG_TYPE_VIRT_VEC3C_YZW] = 0xe,
225 };
226
227 /* how to swizzle when used as a src */
228 static const uint8_t
229 reg_swiz[NUM_REG_TYPES] = {
230 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
231 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
232 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
233 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
234 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
235 [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
236 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
237 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
238 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
239 [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(Y, Z, Y, Z),
240 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
241 [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
242 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
243 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
244 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
245 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
246 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
247 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
248 [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(Z, W, Z, W),
249 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
250 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
251 [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(Y, Z, W, X),
252 };
253
254 /* how to swizzle when used as a dest */
255 static const uint8_t
256 reg_dst_swiz[NUM_REG_TYPES] = {
257 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
258 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
259 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
260 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
261 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
262 [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
263 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
264 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
265 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
266 [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(X, X, Y, Y),
267 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
268 [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
269 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
270 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
271 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
272 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
273 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
274 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
275 [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(X, X, X, Y),
276 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
277 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
278 [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(X, X, Y, Z),
279 };
280
281 static inline int reg_get_type(int virt_reg)
282 {
283 return virt_reg % NUM_REG_TYPES;
284 }
285
286 static inline int reg_get_base(struct state *state, int virt_reg)
287 {
288 /* offset by 1 to avoid reserved position register */
289 if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
290 return (virt_reg / NUM_REG_TYPES + 1) % ETNA_MAX_TEMPS;
291 return virt_reg / NUM_REG_TYPES;
292 }
293
294 /* use "r63.z" for depth reg, it will wrap around to r0.z by reg_get_base
295 * (fs registers are offset by 1 to avoid reserving r0)
296 */
297 #define REG_FRAG_DEPTH ((ETNA_MAX_TEMPS - 1) * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Z)
298
299 static inline int reg_get_class(int virt_reg)
300 {
301 switch (reg_get_type(virt_reg)) {
302 case REG_TYPE_VEC4:
303 return REG_CLASS_VEC4;
304 case REG_TYPE_VIRT_VEC3_XYZ:
305 case REG_TYPE_VIRT_VEC3_XYW:
306 case REG_TYPE_VIRT_VEC3_XZW:
307 case REG_TYPE_VIRT_VEC3_YZW:
308 return REG_CLASS_VIRT_VEC3;
309 case REG_TYPE_VIRT_VEC2_XY:
310 case REG_TYPE_VIRT_VEC2_XZ:
311 case REG_TYPE_VIRT_VEC2_XW:
312 case REG_TYPE_VIRT_VEC2_YZ:
313 case REG_TYPE_VIRT_VEC2_YW:
314 case REG_TYPE_VIRT_VEC2_ZW:
315 return REG_CLASS_VIRT_VEC2;
316 case REG_TYPE_VIRT_SCALAR_X:
317 case REG_TYPE_VIRT_SCALAR_Y:
318 case REG_TYPE_VIRT_SCALAR_Z:
319 case REG_TYPE_VIRT_SCALAR_W:
320 return REG_CLASS_VIRT_SCALAR;
321 case REG_TYPE_VIRT_VEC2T_XY:
322 case REG_TYPE_VIRT_VEC2T_ZW:
323 return REG_CLASS_VIRT_VEC2T;
324 case REG_TYPE_VIRT_VEC2C_XY:
325 case REG_TYPE_VIRT_VEC2C_YZ:
326 case REG_TYPE_VIRT_VEC2C_ZW:
327 return REG_CLASS_VIRT_VEC2C;
328 case REG_TYPE_VIRT_VEC3C_XYZ:
329 case REG_TYPE_VIRT_VEC3C_YZW:
330 return REG_CLASS_VIRT_VEC3C;
331 }
332
333 assert(false);
334 return 0;
335 }
336
337 /* get unique ssa/reg index for nir_src */
338 static unsigned
339 src_index(nir_function_impl *impl, nir_src *src)
340 {
341 return src->is_ssa ? src->ssa->index : (src->reg.reg->index + impl->ssa_alloc);
342 }
343
344 /* get unique ssa/reg index for nir_dest */
345 static unsigned
346 dest_index(nir_function_impl *impl, nir_dest *dest)
347 {
348 return dest->is_ssa ? dest->ssa.index : (dest->reg.reg->index + impl->ssa_alloc);
349 }
350
351 /* nir_src to allocated register */
352 static hw_src
353 ra_src(struct state *state, nir_src *src)
354 {
355 unsigned reg = ra_get_node_reg(state->g, state->live_map[src_index(state->impl, src)]);
356 return SRC_REG(reg_get_base(state, reg), reg_swiz[reg_get_type(reg)]);
357 }
358
359 static hw_src
360 get_src(struct state *state, nir_src *src)
361 {
362 if (!src->is_ssa)
363 return ra_src(state, src);
364
365 nir_instr *instr = src->ssa->parent_instr;
366
367 if (instr->pass_flags & BYPASS_SRC) {
368 assert(instr->type == nir_instr_type_alu);
369 nir_alu_instr *alu = nir_instr_as_alu(instr);
370 assert(alu->op == nir_op_mov);
371 return src_swizzle(get_src(state, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
372 }
373
374 switch (instr->type) {
375 case nir_instr_type_load_const:
376 return const_src(state, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
377 case nir_instr_type_intrinsic: {
378 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
379 switch (intr->intrinsic) {
380 case nir_intrinsic_load_input:
381 case nir_intrinsic_load_instance_id:
382 case nir_intrinsic_load_uniform:
383 case nir_intrinsic_load_ubo:
384 return ra_src(state, src);
385 case nir_intrinsic_load_front_face:
386 return (hw_src) { .use = 1, .rgroup = INST_RGROUP_INTERNAL };
387 case nir_intrinsic_load_frag_coord:
388 return SRC_REG(0, INST_SWIZ_IDENTITY);
389 default:
390 compile_error(state->c, "Unhandled NIR intrinsic type: %s\n",
391 nir_intrinsic_infos[intr->intrinsic].name);
392 break;
393 }
394 } break;
395 case nir_instr_type_alu:
396 case nir_instr_type_tex:
397 return ra_src(state, src);
398 case nir_instr_type_ssa_undef: {
399 /* return zero to deal with broken Blur demo */
400 nir_const_value value = CONST(0);
401 return src_swizzle(const_src(state, &value, 1), SWIZZLE(X,X,X,X));
402 }
403 default:
404 compile_error(state->c, "Unhandled NIR instruction type: %d\n", instr->type);
405 break;
406 }
407
408 return SRC_DISABLE;
409 }
410
411 static void
412 update_swiz_mask(nir_alu_instr *alu, nir_dest *dest, unsigned *swiz, unsigned *mask)
413 {
414 if (!swiz)
415 return;
416
417 bool is_vec = dest != NULL;
418 unsigned swizzle = 0, write_mask = 0;
419 for (unsigned i = 0; i < 4; i++) {
420 /* channel not written */
421 if (!(alu->dest.write_mask & (1 << i)))
422 continue;
423 /* src is different (only check for vecN) */
424 if (is_vec && alu->src[i].src.ssa != &dest->ssa)
425 continue;
426
427 unsigned src_swiz = is_vec ? alu->src[i].swizzle[0] : alu->src[0].swizzle[i];
428 swizzle |= (*swiz >> src_swiz * 2 & 3) << i * 2;
429 /* this channel isn't written through this chain */
430 if (*mask & (1 << src_swiz))
431 write_mask |= 1 << i;
432 }
433 *swiz = swizzle;
434 *mask = write_mask;
435 }
436
437 static bool
438 vec_dest_has_swizzle(nir_alu_instr *vec, nir_ssa_def *ssa)
439 {
440 for (unsigned i = 0; i < 4; i++) {
441 if (!(vec->dest.write_mask & (1 << i)) || vec->src[i].src.ssa != ssa)
442 continue;
443
444 if (vec->src[i].swizzle[0] != i)
445 return true;
446 }
447
448 /* don't deal with possible bypassed vec/mov chain */
449 nir_foreach_use(use_src, ssa) {
450 nir_instr *instr = use_src->parent_instr;
451 if (instr->type != nir_instr_type_alu)
452 continue;
453
454 nir_alu_instr *alu = nir_instr_as_alu(instr);
455
456 switch (alu->op) {
457 case nir_op_mov:
458 case nir_op_vec2:
459 case nir_op_vec3:
460 case nir_op_vec4:
461 return true;
462 default:
463 break;
464 }
465 }
466 return false;
467 }
468
469 static nir_dest *
470 real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
471 {
472 if (!dest || !dest->is_ssa)
473 return dest;
474
475 bool can_bypass_src = !list_length(&dest->ssa.if_uses);
476 nir_instr *p_instr = dest->ssa.parent_instr;
477
478 /* if used by a vecN, the "real" destination becomes the vecN destination
479 * lower_alu guarantees that values used by a vecN are only used by that vecN
480 * we can apply the same logic to movs in a some cases too
481 */
482 nir_foreach_use(use_src, &dest->ssa) {
483 nir_instr *instr = use_src->parent_instr;
484
485 /* src bypass check: for now only deal with tex src mov case
486 * note: for alu don't bypass mov for multiple uniform sources
487 */
488 switch (instr->type) {
489 case nir_instr_type_tex:
490 if (p_instr->type == nir_instr_type_alu &&
491 nir_instr_as_alu(p_instr)->op == nir_op_mov) {
492 break;
493 }
494 default:
495 can_bypass_src = false;
496 break;
497 }
498
499 if (instr->type != nir_instr_type_alu)
500 continue;
501
502 nir_alu_instr *alu = nir_instr_as_alu(instr);
503
504 switch (alu->op) {
505 case nir_op_vec2:
506 case nir_op_vec3:
507 case nir_op_vec4:
508 assert(list_length(&dest->ssa.if_uses) == 0);
509 nir_foreach_use(use_src, &dest->ssa)
510 assert(use_src->parent_instr == instr);
511
512 update_swiz_mask(alu, dest, swiz, mask);
513 break;
514 case nir_op_mov: {
515 switch (dest->ssa.parent_instr->type) {
516 case nir_instr_type_alu:
517 case nir_instr_type_tex:
518 break;
519 default:
520 continue;
521 }
522 if (list_length(&dest->ssa.if_uses) || list_length(&dest->ssa.uses) > 1)
523 continue;
524
525 update_swiz_mask(alu, NULL, swiz, mask);
526 break;
527 };
528 default:
529 continue;
530 }
531
532 assert(!(instr->pass_flags & BYPASS_SRC));
533 instr->pass_flags |= BYPASS_DST;
534 return real_dest(&alu->dest.dest, swiz, mask);
535 }
536
537 if (can_bypass_src && !(p_instr->pass_flags & BYPASS_DST)) {
538 p_instr->pass_flags |= BYPASS_SRC;
539 return NULL;
540 }
541
542 return dest;
543 }
544
545 /* get allocated dest register for nir_dest
546 * *p_swiz tells how the components need to be placed into register
547 */
548 static hw_dst
549 ra_dest(struct state *state, nir_dest *dest, unsigned *p_swiz)
550 {
551 unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
552 dest = real_dest(dest, &swiz, &mask);
553
554 unsigned r = ra_get_node_reg(state->g, state->live_map[dest_index(state->impl, dest)]);
555 unsigned t = reg_get_type(r);
556
557 *p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
558
559 return (hw_dst) {
560 .use = 1,
561 .reg = reg_get_base(state, r),
562 .write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
563 };
564 }
565
566 /* if instruction dest needs a register, return nir_dest for it */
567 static nir_dest *
568 dest_for_instr(nir_instr *instr)
569 {
570 nir_dest *dest = NULL;
571
572 switch (instr->type) {
573 case nir_instr_type_alu:
574 dest = &nir_instr_as_alu(instr)->dest.dest;
575 break;
576 case nir_instr_type_tex:
577 dest = &nir_instr_as_tex(instr)->dest;
578 break;
579 case nir_instr_type_intrinsic: {
580 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
581 if (intr->intrinsic == nir_intrinsic_load_uniform ||
582 intr->intrinsic == nir_intrinsic_load_ubo ||
583 intr->intrinsic == nir_intrinsic_load_input ||
584 intr->intrinsic == nir_intrinsic_load_instance_id)
585 dest = &intr->dest;
586 } break;
587 case nir_instr_type_deref:
588 return NULL;
589 default:
590 break;
591 }
592 return real_dest(dest, NULL, NULL);
593 }
594
595 struct live_def {
596 nir_instr *instr;
597 nir_dest *dest; /* cached dest_for_instr */
598 unsigned live_start, live_end; /* live range */
599 };
600
601 static void
602 range_include(struct live_def *def, unsigned index)
603 {
604 if (def->live_start > index)
605 def->live_start = index;
606 if (def->live_end < index)
607 def->live_end = index;
608 }
609
610 struct live_defs_state {
611 unsigned num_defs;
612 unsigned bitset_words;
613
614 nir_function_impl *impl;
615 nir_block *block; /* current block pointer */
616 unsigned index; /* current live index */
617
618 struct live_def *defs;
619 unsigned *live_map; /* to map ssa/reg index into defs array */
620
621 nir_block_worklist worklist;
622 };
623
624 static bool
625 init_liveness_block(nir_block *block,
626 struct live_defs_state *state)
627 {
628 block->live_in = reralloc(block, block->live_in, BITSET_WORD,
629 state->bitset_words);
630 memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
631
632 block->live_out = reralloc(block, block->live_out, BITSET_WORD,
633 state->bitset_words);
634 memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
635
636 nir_block_worklist_push_head(&state->worklist, block);
637
638 return true;
639 }
640
641 static bool
642 set_src_live(nir_src *src, void *void_state)
643 {
644 struct live_defs_state *state = void_state;
645
646 if (src->is_ssa) {
647 nir_instr *instr = src->ssa->parent_instr;
648
649 if (is_sysval(instr) || instr->type == nir_instr_type_deref)
650 return true;
651
652 switch (instr->type) {
653 case nir_instr_type_load_const:
654 case nir_instr_type_ssa_undef:
655 return true;
656 case nir_instr_type_alu: {
657 /* alu op bypass */
658 nir_alu_instr *alu = nir_instr_as_alu(instr);
659 if (instr->pass_flags & BYPASS_SRC) {
660 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
661 set_src_live(&alu->src[i].src, state);
662 return true;
663 }
664 } break;
665 default:
666 break;
667 }
668 }
669
670 unsigned i = state->live_map[src_index(state->impl, src)];
671 assert(i != ~0u);
672
673 BITSET_SET(state->block->live_in, i);
674 range_include(&state->defs[i], state->index);
675
676 return true;
677 }
678
679 static bool
680 propagate_across_edge(nir_block *pred, nir_block *succ,
681 struct live_defs_state *state)
682 {
683 BITSET_WORD progress = 0;
684 for (unsigned i = 0; i < state->bitset_words; ++i) {
685 progress |= succ->live_in[i] & ~pred->live_out[i];
686 pred->live_out[i] |= succ->live_in[i];
687 }
688 return progress != 0;
689 }
690
691 static unsigned
692 live_defs(nir_function_impl *impl, struct live_def *defs, unsigned *live_map)
693 {
694 struct live_defs_state state;
695 unsigned block_live_index[impl->num_blocks + 1];
696
697 state.impl = impl;
698 state.defs = defs;
699 state.live_map = live_map;
700
701 state.num_defs = 0;
702 nir_foreach_block(block, impl) {
703 block_live_index[block->index] = state.num_defs;
704 nir_foreach_instr(instr, block) {
705 nir_dest *dest = dest_for_instr(instr);
706 if (!dest)
707 continue;
708
709 unsigned idx = dest_index(impl, dest);
710 /* register is already in defs */
711 if (live_map[idx] != ~0u)
712 continue;
713
714 defs[state.num_defs] = (struct live_def) {instr, dest, state.num_defs, 0};
715
716 /* input live from the start */
717 if (instr->type == nir_instr_type_intrinsic) {
718 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
719 if (intr->intrinsic == nir_intrinsic_load_input ||
720 intr->intrinsic == nir_intrinsic_load_instance_id)
721 defs[state.num_defs].live_start = 0;
722 }
723
724 live_map[idx] = state.num_defs;
725 state.num_defs++;
726 }
727 }
728 block_live_index[impl->num_blocks] = state.num_defs;
729
730 nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
731
732 /* We now know how many unique ssa definitions we have and we can go
733 * ahead and allocate live_in and live_out sets and add all of the
734 * blocks to the worklist.
735 */
736 state.bitset_words = BITSET_WORDS(state.num_defs);
737 nir_foreach_block(block, impl) {
738 init_liveness_block(block, &state);
739 }
740
741 /* We're now ready to work through the worklist and update the liveness
742 * sets of each of the blocks. By the time we get to this point, every
743 * block in the function implementation has been pushed onto the
744 * worklist in reverse order. As long as we keep the worklist
745 * up-to-date as we go, everything will get covered.
746 */
747 while (!nir_block_worklist_is_empty(&state.worklist)) {
748 /* We pop them off in the reverse order we pushed them on. This way
749 * the first walk of the instructions is backwards so we only walk
750 * once in the case of no control flow.
751 */
752 nir_block *block = nir_block_worklist_pop_head(&state.worklist);
753 state.block = block;
754
755 memcpy(block->live_in, block->live_out,
756 state.bitset_words * sizeof(BITSET_WORD));
757
758 state.index = block_live_index[block->index + 1];
759
760 nir_if *following_if = nir_block_get_following_if(block);
761 if (following_if)
762 set_src_live(&following_if->condition, &state);
763
764 nir_foreach_instr_reverse(instr, block) {
765 /* when we come across the next "live" instruction, decrement index */
766 if (state.index && instr == defs[state.index - 1].instr) {
767 state.index--;
768 /* the only source of writes to registers is phis:
769 * we don't expect any partial write_mask alus
770 * so clearing live_in here is OK
771 */
772 BITSET_CLEAR(block->live_in, state.index);
773 }
774
775 /* don't set_src_live for not-emitted instructions */
776 if (instr->pass_flags)
777 continue;
778
779 unsigned index = state.index;
780
781 /* output live till the end */
782 if (instr->type == nir_instr_type_intrinsic) {
783 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
784 if (intr->intrinsic == nir_intrinsic_store_deref)
785 state.index = ~0u;
786 }
787
788 nir_foreach_src(instr, set_src_live, &state);
789
790 state.index = index;
791 }
792 assert(state.index == block_live_index[block->index]);
793
794 /* Walk over all of the predecessors of the current block updating
795 * their live in with the live out of this one. If anything has
796 * changed, add the predecessor to the work list so that we ensure
797 * that the new information is used.
798 */
799 set_foreach(block->predecessors, entry) {
800 nir_block *pred = (nir_block *)entry->key;
801 if (propagate_across_edge(pred, block, &state))
802 nir_block_worklist_push_tail(&state.worklist, pred);
803 }
804 }
805
806 nir_block_worklist_fini(&state.worklist);
807
808 /* apply live_in/live_out to ranges */
809
810 nir_foreach_block(block, impl) {
811 int i;
812
813 BITSET_FOREACH_SET(i, block->live_in, state.num_defs)
814 range_include(&state.defs[i], block_live_index[block->index]);
815
816 BITSET_FOREACH_SET(i, block->live_out, state.num_defs)
817 range_include(&state.defs[i], block_live_index[block->index + 1]);
818 }
819
820 return state.num_defs;
821 }
822
823 /* precomputed by register_allocate */
824 static unsigned int *q_values[] = {
825 (unsigned int[]) {1, 2, 3, 4, 2, 2, 3, },
826 (unsigned int[]) {3, 5, 6, 6, 5, 5, 6, },
827 (unsigned int[]) {3, 4, 4, 4, 4, 4, 4, },
828 (unsigned int[]) {1, 1, 1, 1, 1, 1, 1, },
829 (unsigned int[]) {1, 2, 2, 2, 1, 2, 2, },
830 (unsigned int[]) {2, 3, 3, 3, 2, 3, 3, },
831 (unsigned int[]) {2, 2, 2, 2, 2, 2, 2, },
832 };
833
834 static void
835 ra_assign(struct state *state, nir_shader *shader)
836 {
837 struct ra_regs *regs = ra_alloc_reg_set(NULL, ETNA_MAX_TEMPS *
838 NUM_REG_TYPES, false);
839
840 /* classes always be created from index 0, so equal to the class enum
841 * which represents a register with (c+1) components
842 */
843 for (int c = 0; c < NUM_REG_CLASSES; c++)
844 ra_alloc_reg_class(regs);
845 /* add each register of each class */
846 for (int r = 0; r < NUM_REG_TYPES * ETNA_MAX_TEMPS; r++)
847 ra_class_add_reg(regs, reg_get_class(r), r);
848 /* set conflicts */
849 for (int r = 0; r < ETNA_MAX_TEMPS; r++) {
850 for (int i = 0; i < NUM_REG_TYPES; i++) {
851 for (int j = 0; j < i; j++) {
852 if (reg_writemask[i] & reg_writemask[j]) {
853 ra_add_reg_conflict(regs, NUM_REG_TYPES * r + i,
854 NUM_REG_TYPES * r + j);
855 }
856 }
857 }
858 }
859 ra_set_finalize(regs, q_values);
860
861 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
862
863 /* liveness and interference */
864
865 nir_index_blocks(impl);
866 nir_index_ssa_defs(impl);
867 nir_foreach_block(block, impl) {
868 nir_foreach_instr(instr, block)
869 instr->pass_flags = 0;
870 }
871
872 /* this gives an approximation/upper limit on how many nodes are needed
873 * (some ssa values do not represent an allocated register)
874 */
875 unsigned max_nodes = impl->ssa_alloc + impl->reg_alloc;
876 unsigned *live_map = ralloc_array(NULL, unsigned, max_nodes);
877 memset(live_map, 0xff, sizeof(unsigned) * max_nodes);
878 struct live_def *defs = rzalloc_array(NULL, struct live_def, max_nodes);
879
880 unsigned num_nodes = live_defs(impl, defs, live_map);
881 struct ra_graph *g = ra_alloc_interference_graph(regs, num_nodes);
882
883 /* set classes from num_components */
884 for (unsigned i = 0; i < num_nodes; i++) {
885 nir_instr *instr = defs[i].instr;
886 nir_dest *dest = defs[i].dest;
887 unsigned c = nir_dest_num_components(*dest) - 1;
888
889 if (instr->type == nir_instr_type_alu &&
890 state->c->specs->has_new_transcendentals) {
891 switch (nir_instr_as_alu(instr)->op) {
892 case nir_op_fdiv:
893 case nir_op_flog2:
894 case nir_op_fsin:
895 case nir_op_fcos:
896 assert(dest->is_ssa);
897 c = REG_CLASS_VIRT_VEC2T;
898 default:
899 break;
900 }
901 }
902
903 if (instr->type == nir_instr_type_intrinsic) {
904 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
905 /* can't have dst swizzle or sparse writemask on UBO loads */
906 if (intr->intrinsic == nir_intrinsic_load_ubo) {
907 assert(dest == &intr->dest);
908 if (dest->ssa.num_components == 2)
909 c = REG_CLASS_VIRT_VEC2C;
910 if (dest->ssa.num_components == 3)
911 c = REG_CLASS_VIRT_VEC3C;
912 }
913 }
914
915 ra_set_node_class(g, i, c);
916 }
917
918 nir_foreach_block(block, impl) {
919 nir_foreach_instr(instr, block) {
920 if (instr->type != nir_instr_type_intrinsic)
921 continue;
922
923 nir_dest *dest = dest_for_instr(instr);
924 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
925 unsigned reg;
926
927 switch (intr->intrinsic) {
928 case nir_intrinsic_store_deref: {
929 /* don't want outputs to be swizzled
930 * TODO: better would be to set the type to X/XY/XYZ/XYZW
931 * TODO: what if fragcoord.z is read after writing fragdepth?
932 */
933 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
934 unsigned index = live_map[src_index(impl, &intr->src[1])];
935
936 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
937 deref->var->data.location == FRAG_RESULT_DEPTH) {
938 ra_set_node_reg(g, index, REG_FRAG_DEPTH);
939 } else {
940 ra_set_node_class(g, index, REG_CLASS_VEC4);
941 }
942 } continue;
943 case nir_intrinsic_load_input:
944 reg = nir_intrinsic_base(intr) * NUM_REG_TYPES + (unsigned[]) {
945 REG_TYPE_VIRT_SCALAR_X,
946 REG_TYPE_VIRT_VEC2_XY,
947 REG_TYPE_VIRT_VEC3_XYZ,
948 REG_TYPE_VEC4,
949 }[nir_dest_num_components(*dest) - 1];
950 break;
951 case nir_intrinsic_load_instance_id:
952 reg = state->c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;
953 break;
954 default:
955 continue;
956 }
957
958 ra_set_node_reg(g, live_map[dest_index(impl, dest)], reg);
959 }
960 }
961
962 /* add interference for intersecting live ranges */
963 for (unsigned i = 0; i < num_nodes; i++) {
964 assert(defs[i].live_start < defs[i].live_end);
965 for (unsigned j = 0; j < i; j++) {
966 if (defs[i].live_start >= defs[j].live_end || defs[j].live_start >= defs[i].live_end)
967 continue;
968 ra_add_node_interference(g, i, j);
969 }
970 }
971
972 ralloc_free(defs);
973
974 /* Allocate registers */
975 ASSERTED bool ok = ra_allocate(g);
976 assert(ok);
977
978 state->g = g;
979 state->regs = regs;
980 state->live_map = live_map;
981 state->num_nodes = num_nodes;
982 }
983
984 static unsigned
985 ra_finish(struct state *state)
986 {
987 /* TODO: better way to get number of registers used? */
988 unsigned j = 0;
989 for (unsigned i = 0; i < state->num_nodes; i++) {
990 j = MAX2(j, reg_get_base(state, ra_get_node_reg(state->g, i)) + 1);
991 }
992
993 ralloc_free(state->g);
994 ralloc_free(state->regs);
995 ralloc_free(state->live_map);
996
997 return j;
998 }
999
1000 static void
1001 emit_alu(struct state *state, nir_alu_instr * alu)
1002 {
1003 const nir_op_info *info = &nir_op_infos[alu->op];
1004
1005 /* marked as dead instruction (vecN and other bypassed instr) */
1006 if (alu->instr.pass_flags)
1007 return;
1008
1009 assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
1010
1011 unsigned dst_swiz;
1012 hw_dst dst = ra_dest(state, &alu->dest.dest, &dst_swiz);
1013
1014 /* compose alu write_mask with RA write mask */
1015 if (!alu->dest.dest.is_ssa)
1016 dst.write_mask = inst_write_mask_compose(alu->dest.write_mask, dst.write_mask);
1017
1018 switch (alu->op) {
1019 case nir_op_fdot2:
1020 case nir_op_fdot3:
1021 case nir_op_fdot4:
1022 /* not per-component - don't compose dst_swiz */
1023 dst_swiz = INST_SWIZ_IDENTITY;
1024 break;
1025 default:
1026 break;
1027 }
1028
1029 hw_src srcs[3];
1030
1031 for (int i = 0; i < info->num_inputs; i++) {
1032 nir_alu_src *asrc = &alu->src[i];
1033 hw_src src;
1034
1035 src = src_swizzle(get_src(state, &asrc->src), ALU_SWIZ(asrc));
1036 src = src_swizzle(src, dst_swiz);
1037
1038 if (src.rgroup != INST_RGROUP_IMMEDIATE) {
1039 src.neg = asrc->negate || (alu->op == nir_op_fneg);
1040 src.abs = asrc->abs || (alu->op == nir_op_fabs);
1041 } else {
1042 assert(!asrc->negate && alu->op != nir_op_fneg);
1043 assert(!asrc->abs && alu->op != nir_op_fabs);
1044 }
1045
1046 srcs[i] = src;
1047 }
1048
1049 emit(alu, alu->op, dst, srcs, alu->dest.saturate || (alu->op == nir_op_fsat));
1050 }
1051
1052 static void
1053 emit_tex(struct state *state, nir_tex_instr * tex)
1054 {
1055 unsigned dst_swiz;
1056 hw_dst dst = ra_dest(state, &tex->dest, &dst_swiz);
1057 nir_src *coord = NULL, *lod_bias = NULL, *compare = NULL;
1058
1059 for (unsigned i = 0; i < tex->num_srcs; i++) {
1060 switch (tex->src[i].src_type) {
1061 case nir_tex_src_coord:
1062 coord = &tex->src[i].src;
1063 break;
1064 case nir_tex_src_bias:
1065 case nir_tex_src_lod:
1066 assert(!lod_bias);
1067 lod_bias = &tex->src[i].src;
1068 break;
1069 case nir_tex_src_comparator:
1070 compare = &tex->src[i].src;
1071 break;
1072 default:
1073 compile_error(state->c, "Unhandled NIR tex src type: %d\n",
1074 tex->src[i].src_type);
1075 break;
1076 }
1077 }
1078
1079 emit(tex, tex->op, tex->sampler_index, dst_swiz, dst, get_src(state, coord),
1080 lod_bias ? get_src(state, lod_bias) : SRC_DISABLE,
1081 compare ? get_src(state, compare) : SRC_DISABLE);
1082 }
1083
1084 static void
1085 emit_intrinsic(struct state *state, nir_intrinsic_instr * intr)
1086 {
1087 switch (intr->intrinsic) {
1088 case nir_intrinsic_store_deref:
1089 emit(output, nir_src_as_deref(intr->src[0])->var, get_src(state, &intr->src[1]));
1090 break;
1091 case nir_intrinsic_discard_if:
1092 emit(discard, get_src(state, &intr->src[0]));
1093 break;
1094 case nir_intrinsic_discard:
1095 emit(discard, SRC_DISABLE);
1096 break;
1097 case nir_intrinsic_load_uniform: {
1098 unsigned dst_swiz;
1099 struct etna_inst_dst dst = ra_dest(state, &intr->dest, &dst_swiz);
1100
1101 /* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
1102 emit_inst(state->c, &(struct etna_inst) {
1103 .opcode = INST_OPCODE_MOVAR,
1104 .dst.write_mask = 0x1,
1105 .src[2] = get_src(state, &intr->src[0]),
1106 });
1107 emit_inst(state->c, &(struct etna_inst) {
1108 .opcode = INST_OPCODE_MOV,
1109 .dst = dst,
1110 .src[2] = {
1111 .use = 1,
1112 .rgroup = INST_RGROUP_UNIFORM_0,
1113 .reg = nir_intrinsic_base(intr),
1114 .swiz = dst_swiz,
1115 .amode = INST_AMODE_ADD_A_X,
1116 },
1117 });
1118 } break;
1119 case nir_intrinsic_load_ubo: {
1120 /* TODO: if offset is of the form (x + C) then add C to the base instead */
1121 unsigned idx = nir_src_as_const_value(intr->src[0])[0].u32;
1122 unsigned dst_swiz;
1123 emit_inst(state->c, &(struct etna_inst) {
1124 .opcode = INST_OPCODE_LOAD,
1125 .type = INST_TYPE_U32,
1126 .dst = ra_dest(state, &intr->dest, &dst_swiz),
1127 .src[0] = get_src(state, &intr->src[1]),
1128 .src[1] = const_src(state, &CONST_VAL(ETNA_IMMEDIATE_UBO0_ADDR + idx, 0), 1),
1129 });
1130 } break;
1131 case nir_intrinsic_load_front_face:
1132 case nir_intrinsic_load_frag_coord:
1133 assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
1134 break;
1135 case nir_intrinsic_load_input:
1136 case nir_intrinsic_load_instance_id:
1137 break;
1138 default:
1139 compile_error(state->c, "Unhandled NIR intrinsic type: %s\n",
1140 nir_intrinsic_infos[intr->intrinsic].name);
1141 }
1142 }
1143
1144 static void
1145 emit_instr(struct state *state, nir_instr * instr)
1146 {
1147 switch (instr->type) {
1148 case nir_instr_type_alu:
1149 emit_alu(state, nir_instr_as_alu(instr));
1150 break;
1151 case nir_instr_type_tex:
1152 emit_tex(state, nir_instr_as_tex(instr));
1153 break;
1154 case nir_instr_type_intrinsic:
1155 emit_intrinsic(state, nir_instr_as_intrinsic(instr));
1156 break;
1157 case nir_instr_type_jump:
1158 assert(nir_instr_is_last(instr));
1159 case nir_instr_type_load_const:
1160 case nir_instr_type_ssa_undef:
1161 case nir_instr_type_deref:
1162 break;
1163 default:
1164 compile_error(state->c, "Unhandled NIR instruction type: %d\n", instr->type);
1165 break;
1166 }
1167 }
1168
1169 static void
1170 emit_block(struct state *state, nir_block * block)
1171 {
1172 emit(block_start, block->index);
1173
1174 nir_foreach_instr(instr, block)
1175 emit_instr(state, instr);
1176
1177 /* succs->index < block->index is for the loop case */
1178 nir_block *succs = block->successors[0];
1179 if (nir_block_ends_in_jump(block) || succs->index < block->index)
1180 emit(jump, succs->index, SRC_DISABLE);
1181 }
1182
1183 static void
1184 emit_cf_list(struct state *state, struct exec_list *list);
1185
1186 static void
1187 emit_if(struct state *state, nir_if * nif)
1188 {
1189 emit(jump, nir_if_first_else_block(nif)->index, get_src(state, &nif->condition));
1190 emit_cf_list(state, &nif->then_list);
1191
1192 /* jump at end of then_list to skip else_list
1193 * not needed if then_list already ends with a jump or else_list is empty
1194 */
1195 if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
1196 !nir_cf_list_is_empty_block(&nif->else_list))
1197 emit(jump, nir_if_last_else_block(nif)->successors[0]->index, SRC_DISABLE);
1198
1199 emit_cf_list(state, &nif->else_list);
1200 }
1201
1202 static void
1203 emit_cf_list(struct state *state, struct exec_list *list)
1204 {
1205 foreach_list_typed(nir_cf_node, node, node, list) {
1206 switch (node->type) {
1207 case nir_cf_node_block:
1208 emit_block(state, nir_cf_node_as_block(node));
1209 break;
1210 case nir_cf_node_if:
1211 emit_if(state, nir_cf_node_as_if(node));
1212 break;
1213 case nir_cf_node_loop:
1214 emit_cf_list(state, &nir_cf_node_as_loop(node)->body);
1215 break;
1216 default:
1217 compile_error(state->c, "Unknown NIR node type\n");
1218 break;
1219 }
1220 }
1221 }
1222
1223 /* based on nir_lower_vec_to_movs */
1224 static unsigned
1225 insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
1226 {
1227 assert(start_idx < nir_op_infos[vec->op].num_inputs);
1228 unsigned write_mask = (1u << start_idx);
1229
1230 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
1231 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
1232
1233 mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
1234 mov->src[0].negate = vec->src[start_idx].negate;
1235 mov->src[0].abs = vec->src[start_idx].abs;
1236
1237 unsigned num_components = 1;
1238
1239 for (unsigned i = start_idx + 1; i < 4; i++) {
1240 if (!(vec->dest.write_mask & (1 << i)))
1241 continue;
1242
1243 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
1244 vec->src[i].negate == vec->src[start_idx].negate &&
1245 vec->src[i].abs == vec->src[start_idx].abs) {
1246 write_mask |= (1 << i);
1247 mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
1248 num_components++;
1249 }
1250 }
1251
1252 mov->dest.write_mask = (1 << num_components) - 1;
1253 nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
1254
1255 /* replace vec srcs with inserted mov */
1256 for (unsigned i = 0, j = 0; i < 4; i++) {
1257 if (!(write_mask & (1 << i)))
1258 continue;
1259
1260 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, nir_src_for_ssa(&mov->dest.dest.ssa));
1261 vec->src[i].swizzle[0] = j++;
1262 }
1263
1264 nir_instr_insert_before(&vec->instr, &mov->instr);
1265
1266 return write_mask;
1267 }
1268
1269 /*
1270 * for vecN instructions:
1271 * -merge constant sources into a single src
1272 * -insert movs (nir_lower_vec_to_movs equivalent)
1273 * for non-vecN instructions:
1274 * -try to merge constants as single constant
1275 * -insert movs for multiple constants (pre-HALTI5)
1276 */
1277 static void
1278 lower_alu(struct state *state, nir_alu_instr *alu)
1279 {
1280 const nir_op_info *info = &nir_op_infos[alu->op];
1281
1282 nir_builder b;
1283 nir_builder_init(&b, state->impl);
1284 b.cursor = nir_before_instr(&alu->instr);
1285
1286 switch (alu->op) {
1287 case nir_op_vec2:
1288 case nir_op_vec3:
1289 case nir_op_vec4:
1290 break;
1291 default:
1292 /* pre-GC7000L can only have 1 uniform src per instruction */
1293 if (state->c->specs->halti >= 5)
1294 return;
1295
1296 nir_const_value value[4] = {};
1297 uint8_t swizzle[4][4] = {};
1298 unsigned swiz_max = 0, num_const = 0;
1299
1300 for (unsigned i = 0; i < info->num_inputs; i++) {
1301 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1302 if (!cv)
1303 continue;
1304
1305 unsigned num_components = info->input_sizes[i] ?: alu->dest.dest.ssa.num_components;
1306 for (unsigned j = 0; j < num_components; j++) {
1307 int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
1308 swizzle[i][j] = idx;
1309 swiz_max = MAX2(swiz_max, (unsigned) idx);
1310 }
1311 num_const++;
1312 }
1313
1314 /* nothing to do */
1315 if (num_const <= 1)
1316 return;
1317
1318 /* resolve with single combined const src */
1319 if (swiz_max < 4) {
1320 nir_ssa_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
1321
1322 for (unsigned i = 0; i < info->num_inputs; i++) {
1323 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1324 if (!cv)
1325 continue;
1326
1327 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1328
1329 for (unsigned j = 0; j < 4; j++)
1330 alu->src[i].swizzle[j] = swizzle[i][j];
1331 }
1332 return;
1333 }
1334
1335 /* resolve with movs */
1336 num_const = 0;
1337 for (unsigned i = 0; i < info->num_inputs; i++) {
1338 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1339 if (!cv)
1340 continue;
1341
1342 num_const++;
1343 if (num_const == 1)
1344 continue;
1345
1346 nir_ssa_def *mov = nir_mov(&b, alu->src[i].src.ssa);
1347 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
1348 }
1349 return;
1350 }
1351
1352 nir_const_value value[4];
1353 unsigned num_components = 0;
1354
1355 for (unsigned i = 0; i < info->num_inputs; i++) {
1356 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1357 if (cv)
1358 value[num_components++] = cv[alu->src[i].swizzle[0]];
1359 }
1360
1361 /* if there is more than one constant source to the vecN, combine them
1362 * into a single load_const (removing the vecN completely if all components
1363 * are constant)
1364 */
1365 if (num_components > 1) {
1366 nir_ssa_def *def = nir_build_imm(&b, num_components, 32, value);
1367
1368 if (num_components == info->num_inputs) {
1369 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(def));
1370 nir_instr_remove(&alu->instr);
1371 return;
1372 }
1373
1374 for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
1375 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1376 if (!cv)
1377 continue;
1378
1379 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1380 alu->src[i].swizzle[0] = j++;
1381 }
1382 }
1383
1384 unsigned finished_write_mask = 0;
1385 for (unsigned i = 0; i < 4; i++) {
1386 if (!(alu->dest.write_mask & (1 << i)))
1387 continue;
1388
1389 nir_ssa_def *ssa = alu->src[i].src.ssa;
1390
1391 /* check that vecN instruction is only user of this */
1392 bool need_mov = list_length(&ssa->if_uses) != 0;
1393 nir_foreach_use(use_src, ssa) {
1394 if (use_src->parent_instr != &alu->instr)
1395 need_mov = true;
1396 }
1397
1398 nir_instr *instr = ssa->parent_instr;
1399 switch (instr->type) {
1400 case nir_instr_type_alu:
1401 case nir_instr_type_tex:
1402 break;
1403 case nir_instr_type_intrinsic:
1404 if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
1405 need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa);
1406 break;
1407 }
1408 default:
1409 need_mov = true;
1410 }
1411
1412 if (need_mov && !(finished_write_mask & (1 << i)))
1413 finished_write_mask |= insert_vec_mov(alu, i, state->shader);
1414 }
1415 }
1416
1417 static bool
1418 emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
1419 {
1420 nir_shader *shader = c->nir;
1421
1422 struct state state = {
1423 .c = c,
1424 .shader = shader,
1425 .impl = nir_shader_get_entrypoint(shader),
1426 };
1427 bool have_indirect_uniform = false;
1428 unsigned indirect_max = 0;
1429
1430 nir_builder b;
1431 nir_builder_init(&b, state.impl);
1432
1433 /* convert non-dynamic uniform loads to constants, etc */
1434 nir_foreach_block(block, state.impl) {
1435 nir_foreach_instr_safe(instr, block) {
1436 switch(instr->type) {
1437 case nir_instr_type_alu:
1438 /* deals with vecN and const srcs */
1439 lower_alu(&state, nir_instr_as_alu(instr));
1440 break;
1441 case nir_instr_type_load_const: {
1442 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
1443 for (unsigned i = 0; i < load_const->def.num_components; i++)
1444 load_const->value[i] = CONST(load_const->value[i].u32);
1445 } break;
1446 case nir_instr_type_intrinsic: {
1447 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1448 /* TODO: load_ubo can also become a constant in some cases
1449 * (at the moment it can end up emitting a LOAD with two
1450 * uniform sources, which could be a problem on HALTI2)
1451 */
1452 if (intr->intrinsic != nir_intrinsic_load_uniform)
1453 break;
1454 nir_const_value *off = nir_src_as_const_value(intr->src[0]);
1455 if (!off || off[0].u64 >> 32 != ETNA_IMMEDIATE_CONSTANT) {
1456 have_indirect_uniform = true;
1457 indirect_max = nir_intrinsic_base(intr) + nir_intrinsic_range(intr);
1458 break;
1459 }
1460
1461 unsigned base = nir_intrinsic_base(intr);
1462 /* pre halti2 uniform offset will be float */
1463 if (c->specs->halti < 2)
1464 base += (unsigned) off[0].f32;
1465 else
1466 base += off[0].u32;
1467 nir_const_value value[4];
1468
1469 for (unsigned i = 0; i < intr->dest.ssa.num_components; i++) {
1470 if (nir_intrinsic_base(intr) < 0)
1471 value[i] = TEXSCALE(~nir_intrinsic_base(intr), i);
1472 else
1473 value[i] = UNIFORM(base * 4 + i);
1474 }
1475
1476 b.cursor = nir_after_instr(instr);
1477 nir_ssa_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
1478
1479 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(def));
1480 nir_instr_remove(instr);
1481 } break;
1482 default:
1483 break;
1484 }
1485 }
1486 }
1487
1488 /* TODO: only emit required indirect uniform ranges */
1489 if (have_indirect_uniform) {
1490 for (unsigned i = 0; i < indirect_max * 4; i++)
1491 c->consts[i] = UNIFORM(i).u64;
1492 state.const_count = indirect_max;
1493 }
1494
1495 /* add mov for any store output using sysval/const */
1496 nir_foreach_block(block, state.impl) {
1497 nir_foreach_instr_safe(instr, block) {
1498 if (instr->type != nir_instr_type_intrinsic)
1499 continue;
1500
1501 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1502
1503 switch (intr->intrinsic) {
1504 case nir_intrinsic_store_deref: {
1505 nir_src *src = &intr->src[1];
1506 if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr)) {
1507 b.cursor = nir_before_instr(instr);
1508 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(nir_mov(&b, src->ssa)));
1509 }
1510 } break;
1511 default:
1512 break;
1513 }
1514 }
1515 }
1516
1517 /* call directly to avoid validation (load_const don't pass validation at this point) */
1518 nir_convert_from_ssa(shader, true);
1519 nir_opt_dce(shader);
1520
1521 ra_assign(&state, shader);
1522
1523 emit_cf_list(&state, &nir_shader_get_entrypoint(shader)->body);
1524
1525 *num_temps = ra_finish(&state);
1526 *num_consts = state.const_count;
1527 return true;
1528 }