fecbaafac0badf4396f64228d66aba42f84d16e2
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir_emit.h
1 /*
2 * Copyright (c) 2019 Zodiac Inflight Innovations
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jonathan Marek <jonathan@marek.ca>
25 */
26
27 #include "etnaviv_asm.h"
28 #include "etnaviv_context.h"
29 #include "etnaviv_compiler_nir.h"
30
31 #include "compiler/nir/nir.h"
32 #include "compiler/nir/nir_builder.h"
33 #include "compiler/nir/nir_worklist.h"
34 #include "util/register_allocate.h"
35
36 #define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
37 #define SRC_DISABLE ((hw_src){})
38 #define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_UNIFORM_0, .reg=idx, .swiz=s})
39 #define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = INST_RGROUP_TEMP, .reg=idx, .swiz=s})
40
41 #define emit(type, args...) etna_emit_##type(state->c, args)
42
43 typedef struct etna_inst_dst hw_dst;
44 typedef struct etna_inst_src hw_src;
45
46 enum {
47 BYPASS_DST = 1,
48 BYPASS_SRC = 2,
49 };
50
51 struct state {
52 struct etna_compile *c;
53
54 unsigned const_count;
55
56 nir_shader *shader;
57 nir_function_impl *impl;
58
59 /* ra state */
60 struct ra_graph *g;
61 struct ra_regs *regs;
62 unsigned *live_map;
63 unsigned num_nodes;
64 };
65
66 static inline hw_src
67 src_swizzle(hw_src src, unsigned swizzle)
68 {
69 if (src.rgroup != INST_RGROUP_IMMEDIATE)
70 src.swiz = inst_swiz_compose(src.swiz, swizzle);
71
72 return src;
73 }
74
75 static inline bool is_sysval(nir_instr *instr)
76 {
77 if (instr->type != nir_instr_type_intrinsic)
78 return false;
79
80 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
81 return intr->intrinsic == nir_intrinsic_load_front_face ||
82 intr->intrinsic == nir_intrinsic_load_frag_coord;
83 }
84
85 /* constants are represented as 64-bit ints
86 * 32-bit for the value and 32-bit for the type (imm, uniform, etc)
87 */
88
89 #define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
90 #define CONST(x) CONST_VAL(ETNA_IMMEDIATE_CONSTANT, x)
91 #define UNIFORM(x) CONST_VAL(ETNA_IMMEDIATE_UNIFORM, x)
92 #define TEXSCALE(x, i) CONST_VAL(ETNA_IMMEDIATE_TEXRECT_SCALE_X + (i), x)
93
94 static int
95 const_add(uint64_t *c, uint64_t value)
96 {
97 for (unsigned i = 0; i < 4; i++) {
98 if (c[i] == value || !c[i]) {
99 c[i] = value;
100 return i;
101 }
102 }
103 return -1;
104 }
105
106 static hw_src
107 const_src(struct state *state, nir_const_value *value, unsigned num_components)
108 {
109 /* use inline immediates if possible */
110 if (state->c->specs->halti >= 2 && num_components == 1 &&
111 value[0].u64 >> 32 == ETNA_IMMEDIATE_CONSTANT) {
112 uint32_t bits = value[0].u32;
113
114 /* "float" - shifted by 12 */
115 if ((bits & 0xfff) == 0)
116 return etna_immediate_src(0, bits >> 12);
117
118 /* "unsigned" - raw 20 bit value */
119 if (bits < (1 << 20))
120 return etna_immediate_src(2, bits);
121
122 /* "signed" - sign extended 20-bit (sign included) value */
123 if (bits >= 0xfff80000)
124 return etna_immediate_src(1, bits);
125 }
126
127 unsigned i;
128 int swiz = -1;
129 for (i = 0; swiz < 0; i++) {
130 uint64_t *a = &state->c->consts[i*4];
131 uint64_t save[4];
132 memcpy(save, a, sizeof(save));
133 swiz = 0;
134 for (unsigned j = 0; j < num_components; j++) {
135 int c = const_add(a, value[j].u64);
136 if (c < 0) {
137 memcpy(a, save, sizeof(save));
138 swiz = -1;
139 break;
140 }
141 swiz |= c << j * 2;
142 }
143 }
144
145 assert(i <= ETNA_MAX_IMM / 4);
146 state->const_count = MAX2(state->const_count, i);
147
148 return SRC_CONST(i - 1, swiz);
149 }
150
151 /* Swizzles and write masks can be used to layer virtual non-interfering
152 * registers on top of the real VEC4 registers. For example, the virtual
153 * VEC3_XYZ register and the virtual SCALAR_W register that use the same
154 * physical VEC4 base register do not interfere.
155 */
156 enum reg_class {
157 REG_CLASS_VIRT_SCALAR,
158 REG_CLASS_VIRT_VEC2,
159 REG_CLASS_VIRT_VEC3,
160 REG_CLASS_VEC4,
161 /* special vec2 class for fast transcendentals, limited to XY or ZW */
162 REG_CLASS_VIRT_VEC2T,
163 /* special classes for LOAD - contiguous components */
164 REG_CLASS_VIRT_VEC2C,
165 REG_CLASS_VIRT_VEC3C,
166 NUM_REG_CLASSES,
167 };
168
169 enum reg_type {
170 REG_TYPE_VEC4,
171 REG_TYPE_VIRT_VEC3_XYZ,
172 REG_TYPE_VIRT_VEC3_XYW,
173 REG_TYPE_VIRT_VEC3_XZW,
174 REG_TYPE_VIRT_VEC3_YZW,
175 REG_TYPE_VIRT_VEC2_XY,
176 REG_TYPE_VIRT_VEC2_XZ,
177 REG_TYPE_VIRT_VEC2_XW,
178 REG_TYPE_VIRT_VEC2_YZ,
179 REG_TYPE_VIRT_VEC2_YW,
180 REG_TYPE_VIRT_VEC2_ZW,
181 REG_TYPE_VIRT_SCALAR_X,
182 REG_TYPE_VIRT_SCALAR_Y,
183 REG_TYPE_VIRT_SCALAR_Z,
184 REG_TYPE_VIRT_SCALAR_W,
185 REG_TYPE_VIRT_VEC2T_XY,
186 REG_TYPE_VIRT_VEC2T_ZW,
187 REG_TYPE_VIRT_VEC2C_XY,
188 REG_TYPE_VIRT_VEC2C_YZ,
189 REG_TYPE_VIRT_VEC2C_ZW,
190 REG_TYPE_VIRT_VEC3C_XYZ,
191 REG_TYPE_VIRT_VEC3C_YZW,
192 NUM_REG_TYPES,
193 };
194
195 /* writemask when used as dest */
196 static const uint8_t
197 reg_writemask[NUM_REG_TYPES] = {
198 [REG_TYPE_VEC4] = 0xf,
199 [REG_TYPE_VIRT_SCALAR_X] = 0x1,
200 [REG_TYPE_VIRT_SCALAR_Y] = 0x2,
201 [REG_TYPE_VIRT_VEC2_XY] = 0x3,
202 [REG_TYPE_VIRT_VEC2T_XY] = 0x3,
203 [REG_TYPE_VIRT_VEC2C_XY] = 0x3,
204 [REG_TYPE_VIRT_SCALAR_Z] = 0x4,
205 [REG_TYPE_VIRT_VEC2_XZ] = 0x5,
206 [REG_TYPE_VIRT_VEC2_YZ] = 0x6,
207 [REG_TYPE_VIRT_VEC2C_YZ] = 0x6,
208 [REG_TYPE_VIRT_VEC3_XYZ] = 0x7,
209 [REG_TYPE_VIRT_VEC3C_XYZ] = 0x7,
210 [REG_TYPE_VIRT_SCALAR_W] = 0x8,
211 [REG_TYPE_VIRT_VEC2_XW] = 0x9,
212 [REG_TYPE_VIRT_VEC2_YW] = 0xa,
213 [REG_TYPE_VIRT_VEC3_XYW] = 0xb,
214 [REG_TYPE_VIRT_VEC2_ZW] = 0xc,
215 [REG_TYPE_VIRT_VEC2T_ZW] = 0xc,
216 [REG_TYPE_VIRT_VEC2C_ZW] = 0xc,
217 [REG_TYPE_VIRT_VEC3_XZW] = 0xd,
218 [REG_TYPE_VIRT_VEC3_YZW] = 0xe,
219 [REG_TYPE_VIRT_VEC3C_YZW] = 0xe,
220 };
221
222 /* how to swizzle when used as a src */
223 static const uint8_t
224 reg_swiz[NUM_REG_TYPES] = {
225 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
226 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
227 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
228 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
229 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
230 [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
231 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
232 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
233 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
234 [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(Y, Z, Y, Z),
235 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
236 [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
237 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
238 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
239 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
240 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
241 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
242 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
243 [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(Z, W, Z, W),
244 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
245 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
246 [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(Y, Z, W, X),
247 };
248
249 /* how to swizzle when used as a dest */
250 static const uint8_t
251 reg_dst_swiz[NUM_REG_TYPES] = {
252 [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
253 [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
254 [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
255 [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
256 [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
257 [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
258 [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
259 [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
260 [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
261 [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(X, X, Y, Y),
262 [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
263 [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
264 [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
265 [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
266 [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
267 [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
268 [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
269 [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
270 [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(X, X, X, Y),
271 [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
272 [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
273 [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(X, X, Y, Z),
274 };
275
276 static inline int reg_get_type(int virt_reg)
277 {
278 return virt_reg % NUM_REG_TYPES;
279 }
280
281 static inline int reg_get_base(struct state *state, int virt_reg)
282 {
283 /* offset by 1 to avoid reserved position register */
284 if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
285 return (virt_reg / NUM_REG_TYPES + 1) % ETNA_MAX_TEMPS;
286 return virt_reg / NUM_REG_TYPES;
287 }
288
289 /* use "r63.z" for depth reg, it will wrap around to r0.z by reg_get_base
290 * (fs registers are offset by 1 to avoid reserving r0)
291 */
292 #define REG_FRAG_DEPTH ((ETNA_MAX_TEMPS - 1) * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Z)
293
294 static inline int reg_get_class(int virt_reg)
295 {
296 switch (reg_get_type(virt_reg)) {
297 case REG_TYPE_VEC4:
298 return REG_CLASS_VEC4;
299 case REG_TYPE_VIRT_VEC3_XYZ:
300 case REG_TYPE_VIRT_VEC3_XYW:
301 case REG_TYPE_VIRT_VEC3_XZW:
302 case REG_TYPE_VIRT_VEC3_YZW:
303 return REG_CLASS_VIRT_VEC3;
304 case REG_TYPE_VIRT_VEC2_XY:
305 case REG_TYPE_VIRT_VEC2_XZ:
306 case REG_TYPE_VIRT_VEC2_XW:
307 case REG_TYPE_VIRT_VEC2_YZ:
308 case REG_TYPE_VIRT_VEC2_YW:
309 case REG_TYPE_VIRT_VEC2_ZW:
310 return REG_CLASS_VIRT_VEC2;
311 case REG_TYPE_VIRT_SCALAR_X:
312 case REG_TYPE_VIRT_SCALAR_Y:
313 case REG_TYPE_VIRT_SCALAR_Z:
314 case REG_TYPE_VIRT_SCALAR_W:
315 return REG_CLASS_VIRT_SCALAR;
316 case REG_TYPE_VIRT_VEC2T_XY:
317 case REG_TYPE_VIRT_VEC2T_ZW:
318 return REG_CLASS_VIRT_VEC2T;
319 case REG_TYPE_VIRT_VEC2C_XY:
320 case REG_TYPE_VIRT_VEC2C_YZ:
321 case REG_TYPE_VIRT_VEC2C_ZW:
322 return REG_CLASS_VIRT_VEC2C;
323 case REG_TYPE_VIRT_VEC3C_XYZ:
324 case REG_TYPE_VIRT_VEC3C_YZW:
325 return REG_CLASS_VIRT_VEC3C;
326 }
327
328 assert(false);
329 return 0;
330 }
331
332 /* get unique ssa/reg index for nir_src */
333 static unsigned
334 src_index(nir_function_impl *impl, nir_src *src)
335 {
336 return src->is_ssa ? src->ssa->index : (src->reg.reg->index + impl->ssa_alloc);
337 }
338
339 /* get unique ssa/reg index for nir_dest */
340 static unsigned
341 dest_index(nir_function_impl *impl, nir_dest *dest)
342 {
343 return dest->is_ssa ? dest->ssa.index : (dest->reg.reg->index + impl->ssa_alloc);
344 }
345
346 /* nir_src to allocated register */
347 static hw_src
348 ra_src(struct state *state, nir_src *src)
349 {
350 unsigned reg = ra_get_node_reg(state->g, state->live_map[src_index(state->impl, src)]);
351 return SRC_REG(reg_get_base(state, reg), reg_swiz[reg_get_type(reg)]);
352 }
353
354 static hw_src
355 get_src(struct state *state, nir_src *src)
356 {
357 if (!src->is_ssa)
358 return ra_src(state, src);
359
360 nir_instr *instr = src->ssa->parent_instr;
361
362 if (instr->pass_flags & BYPASS_SRC) {
363 assert(instr->type == nir_instr_type_alu);
364 nir_alu_instr *alu = nir_instr_as_alu(instr);
365 assert(alu->op == nir_op_mov);
366 return src_swizzle(get_src(state, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
367 }
368
369 switch (instr->type) {
370 case nir_instr_type_load_const:
371 return const_src(state, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
372 case nir_instr_type_intrinsic: {
373 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
374 switch (intr->intrinsic) {
375 case nir_intrinsic_load_input:
376 case nir_intrinsic_load_instance_id:
377 case nir_intrinsic_load_uniform:
378 case nir_intrinsic_load_ubo:
379 return ra_src(state, src);
380 case nir_intrinsic_load_front_face:
381 return (hw_src) { .use = 1, .rgroup = INST_RGROUP_INTERNAL };
382 case nir_intrinsic_load_frag_coord:
383 return SRC_REG(0, INST_SWIZ_IDENTITY);
384 default:
385 compile_error(state->c, "Unhandled NIR intrinsic type: %s\n",
386 nir_intrinsic_infos[intr->intrinsic].name);
387 break;
388 }
389 } break;
390 case nir_instr_type_alu:
391 case nir_instr_type_tex:
392 return ra_src(state, src);
393 case nir_instr_type_ssa_undef: {
394 /* return zero to deal with broken Blur demo */
395 nir_const_value value = CONST(0);
396 return src_swizzle(const_src(state, &value, 1), SWIZZLE(X,X,X,X));
397 }
398 default:
399 compile_error(state->c, "Unhandled NIR instruction type: %d\n", instr->type);
400 break;
401 }
402
403 return SRC_DISABLE;
404 }
405
406 static void
407 update_swiz_mask(nir_alu_instr *alu, nir_dest *dest, unsigned *swiz, unsigned *mask)
408 {
409 if (!swiz)
410 return;
411
412 bool is_vec = dest != NULL;
413 unsigned swizzle = 0, write_mask = 0;
414 for (unsigned i = 0; i < 4; i++) {
415 /* channel not written */
416 if (!(alu->dest.write_mask & (1 << i)))
417 continue;
418 /* src is different (only check for vecN) */
419 if (is_vec && alu->src[i].src.ssa != &dest->ssa)
420 continue;
421
422 unsigned src_swiz = is_vec ? alu->src[i].swizzle[0] : alu->src[0].swizzle[i];
423 swizzle |= (*swiz >> src_swiz * 2 & 3) << i * 2;
424 /* this channel isn't written through this chain */
425 if (*mask & (1 << src_swiz))
426 write_mask |= 1 << i;
427 }
428 *swiz = swizzle;
429 *mask = write_mask;
430 }
431
432 static bool
433 vec_dest_has_swizzle(nir_alu_instr *vec, nir_ssa_def *ssa)
434 {
435 for (unsigned i = 0; i < 4; i++) {
436 if (!(vec->dest.write_mask & (1 << i)) || vec->src[i].src.ssa != ssa)
437 continue;
438
439 if (vec->src[i].swizzle[0] != i)
440 return true;
441 }
442
443 /* don't deal with possible bypassed vec/mov chain */
444 nir_foreach_use(use_src, ssa) {
445 nir_instr *instr = use_src->parent_instr;
446 if (instr->type != nir_instr_type_alu)
447 continue;
448
449 nir_alu_instr *alu = nir_instr_as_alu(instr);
450
451 switch (alu->op) {
452 case nir_op_mov:
453 case nir_op_vec2:
454 case nir_op_vec3:
455 case nir_op_vec4:
456 return true;
457 default:
458 break;
459 }
460 }
461 return false;
462 }
463
464 static nir_dest *
465 real_dest(nir_dest *dest, unsigned *swiz, unsigned *mask)
466 {
467 if (!dest || !dest->is_ssa)
468 return dest;
469
470 bool can_bypass_src = !list_length(&dest->ssa.if_uses);
471 nir_instr *p_instr = dest->ssa.parent_instr;
472
473 /* if used by a vecN, the "real" destination becomes the vecN destination
474 * lower_alu guarantees that values used by a vecN are only used by that vecN
475 * we can apply the same logic to movs in a some cases too
476 */
477 nir_foreach_use(use_src, &dest->ssa) {
478 nir_instr *instr = use_src->parent_instr;
479
480 /* src bypass check: for now only deal with tex src mov case
481 * note: for alu don't bypass mov for multiple uniform sources
482 */
483 switch (instr->type) {
484 case nir_instr_type_tex:
485 if (p_instr->type == nir_instr_type_alu &&
486 nir_instr_as_alu(p_instr)->op == nir_op_mov) {
487 break;
488 }
489 default:
490 can_bypass_src = false;
491 break;
492 }
493
494 if (instr->type != nir_instr_type_alu)
495 continue;
496
497 nir_alu_instr *alu = nir_instr_as_alu(instr);
498
499 switch (alu->op) {
500 case nir_op_vec2:
501 case nir_op_vec3:
502 case nir_op_vec4:
503 assert(list_length(&dest->ssa.if_uses) == 0);
504 nir_foreach_use(use_src, &dest->ssa)
505 assert(use_src->parent_instr == instr);
506
507 update_swiz_mask(alu, dest, swiz, mask);
508 break;
509 case nir_op_mov: {
510 switch (dest->ssa.parent_instr->type) {
511 case nir_instr_type_alu:
512 case nir_instr_type_tex:
513 break;
514 default:
515 continue;
516 }
517 if (list_length(&dest->ssa.if_uses) || list_length(&dest->ssa.uses) > 1)
518 continue;
519
520 update_swiz_mask(alu, NULL, swiz, mask);
521 break;
522 };
523 default:
524 continue;
525 }
526
527 assert(!(instr->pass_flags & BYPASS_SRC));
528 instr->pass_flags |= BYPASS_DST;
529 return real_dest(&alu->dest.dest, swiz, mask);
530 }
531
532 if (can_bypass_src && !(p_instr->pass_flags & BYPASS_DST)) {
533 p_instr->pass_flags |= BYPASS_SRC;
534 return NULL;
535 }
536
537 return dest;
538 }
539
540 /* get allocated dest register for nir_dest
541 * *p_swiz tells how the components need to be placed into register
542 */
543 static hw_dst
544 ra_dest(struct state *state, nir_dest *dest, unsigned *p_swiz)
545 {
546 unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
547 dest = real_dest(dest, &swiz, &mask);
548
549 unsigned r = ra_get_node_reg(state->g, state->live_map[dest_index(state->impl, dest)]);
550 unsigned t = reg_get_type(r);
551
552 *p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
553
554 return (hw_dst) {
555 .use = 1,
556 .reg = reg_get_base(state, r),
557 .write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
558 };
559 }
560
561 /* if instruction dest needs a register, return nir_dest for it */
562 static nir_dest *
563 dest_for_instr(nir_instr *instr)
564 {
565 nir_dest *dest = NULL;
566
567 switch (instr->type) {
568 case nir_instr_type_alu:
569 dest = &nir_instr_as_alu(instr)->dest.dest;
570 break;
571 case nir_instr_type_tex:
572 dest = &nir_instr_as_tex(instr)->dest;
573 break;
574 case nir_instr_type_intrinsic: {
575 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
576 if (intr->intrinsic == nir_intrinsic_load_uniform ||
577 intr->intrinsic == nir_intrinsic_load_ubo ||
578 intr->intrinsic == nir_intrinsic_load_input ||
579 intr->intrinsic == nir_intrinsic_load_instance_id)
580 dest = &intr->dest;
581 } break;
582 case nir_instr_type_deref:
583 return NULL;
584 default:
585 break;
586 }
587 return real_dest(dest, NULL, NULL);
588 }
589
590 struct live_def {
591 nir_instr *instr;
592 nir_dest *dest; /* cached dest_for_instr */
593 unsigned live_start, live_end; /* live range */
594 };
595
596 static void
597 range_include(struct live_def *def, unsigned index)
598 {
599 if (def->live_start > index)
600 def->live_start = index;
601 if (def->live_end < index)
602 def->live_end = index;
603 }
604
605 struct live_defs_state {
606 unsigned num_defs;
607 unsigned bitset_words;
608
609 nir_function_impl *impl;
610 nir_block *block; /* current block pointer */
611 unsigned index; /* current live index */
612
613 struct live_def *defs;
614 unsigned *live_map; /* to map ssa/reg index into defs array */
615
616 nir_block_worklist worklist;
617 };
618
619 static bool
620 init_liveness_block(nir_block *block,
621 struct live_defs_state *state)
622 {
623 block->live_in = reralloc(block, block->live_in, BITSET_WORD,
624 state->bitset_words);
625 memset(block->live_in, 0, state->bitset_words * sizeof(BITSET_WORD));
626
627 block->live_out = reralloc(block, block->live_out, BITSET_WORD,
628 state->bitset_words);
629 memset(block->live_out, 0, state->bitset_words * sizeof(BITSET_WORD));
630
631 nir_block_worklist_push_head(&state->worklist, block);
632
633 return true;
634 }
635
636 static bool
637 set_src_live(nir_src *src, void *void_state)
638 {
639 struct live_defs_state *state = void_state;
640
641 if (src->is_ssa) {
642 nir_instr *instr = src->ssa->parent_instr;
643
644 if (is_sysval(instr) || instr->type == nir_instr_type_deref)
645 return true;
646
647 switch (instr->type) {
648 case nir_instr_type_load_const:
649 case nir_instr_type_ssa_undef:
650 return true;
651 case nir_instr_type_alu: {
652 /* alu op bypass */
653 nir_alu_instr *alu = nir_instr_as_alu(instr);
654 if (instr->pass_flags & BYPASS_SRC) {
655 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
656 set_src_live(&alu->src[i].src, state);
657 return true;
658 }
659 } break;
660 default:
661 break;
662 }
663 }
664
665 unsigned i = state->live_map[src_index(state->impl, src)];
666 assert(i != ~0u);
667
668 BITSET_SET(state->block->live_in, i);
669 range_include(&state->defs[i], state->index);
670
671 return true;
672 }
673
674 static bool
675 propagate_across_edge(nir_block *pred, nir_block *succ,
676 struct live_defs_state *state)
677 {
678 BITSET_WORD progress = 0;
679 for (unsigned i = 0; i < state->bitset_words; ++i) {
680 progress |= succ->live_in[i] & ~pred->live_out[i];
681 pred->live_out[i] |= succ->live_in[i];
682 }
683 return progress != 0;
684 }
685
686 static unsigned
687 live_defs(nir_function_impl *impl, struct live_def *defs, unsigned *live_map)
688 {
689 struct live_defs_state state;
690 unsigned block_live_index[impl->num_blocks + 1];
691
692 state.impl = impl;
693 state.defs = defs;
694 state.live_map = live_map;
695
696 state.num_defs = 0;
697 nir_foreach_block(block, impl) {
698 block_live_index[block->index] = state.num_defs;
699 nir_foreach_instr(instr, block) {
700 nir_dest *dest = dest_for_instr(instr);
701 if (!dest)
702 continue;
703
704 unsigned idx = dest_index(impl, dest);
705 /* register is already in defs */
706 if (live_map[idx] != ~0u)
707 continue;
708
709 defs[state.num_defs] = (struct live_def) {instr, dest, state.num_defs, 0};
710
711 /* input live from the start */
712 if (instr->type == nir_instr_type_intrinsic) {
713 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
714 if (intr->intrinsic == nir_intrinsic_load_input ||
715 intr->intrinsic == nir_intrinsic_load_instance_id)
716 defs[state.num_defs].live_start = 0;
717 }
718
719 live_map[idx] = state.num_defs;
720 state.num_defs++;
721 }
722 }
723 block_live_index[impl->num_blocks] = state.num_defs;
724
725 nir_block_worklist_init(&state.worklist, impl->num_blocks, NULL);
726
727 /* We now know how many unique ssa definitions we have and we can go
728 * ahead and allocate live_in and live_out sets and add all of the
729 * blocks to the worklist.
730 */
731 state.bitset_words = BITSET_WORDS(state.num_defs);
732 nir_foreach_block(block, impl) {
733 init_liveness_block(block, &state);
734 }
735
736 /* We're now ready to work through the worklist and update the liveness
737 * sets of each of the blocks. By the time we get to this point, every
738 * block in the function implementation has been pushed onto the
739 * worklist in reverse order. As long as we keep the worklist
740 * up-to-date as we go, everything will get covered.
741 */
742 while (!nir_block_worklist_is_empty(&state.worklist)) {
743 /* We pop them off in the reverse order we pushed them on. This way
744 * the first walk of the instructions is backwards so we only walk
745 * once in the case of no control flow.
746 */
747 nir_block *block = nir_block_worklist_pop_head(&state.worklist);
748 state.block = block;
749
750 memcpy(block->live_in, block->live_out,
751 state.bitset_words * sizeof(BITSET_WORD));
752
753 state.index = block_live_index[block->index + 1];
754
755 nir_if *following_if = nir_block_get_following_if(block);
756 if (following_if)
757 set_src_live(&following_if->condition, &state);
758
759 nir_foreach_instr_reverse(instr, block) {
760 /* when we come across the next "live" instruction, decrement index */
761 if (state.index && instr == defs[state.index - 1].instr) {
762 state.index--;
763 /* the only source of writes to registers is phis:
764 * we don't expect any partial write_mask alus
765 * so clearing live_in here is OK
766 */
767 BITSET_CLEAR(block->live_in, state.index);
768 }
769
770 /* don't set_src_live for not-emitted instructions */
771 if (instr->pass_flags)
772 continue;
773
774 unsigned index = state.index;
775
776 /* output live till the end */
777 if (instr->type == nir_instr_type_intrinsic) {
778 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
779 if (intr->intrinsic == nir_intrinsic_store_deref)
780 state.index = ~0u;
781 }
782
783 nir_foreach_src(instr, set_src_live, &state);
784
785 state.index = index;
786 }
787 assert(state.index == block_live_index[block->index]);
788
789 /* Walk over all of the predecessors of the current block updating
790 * their live in with the live out of this one. If anything has
791 * changed, add the predecessor to the work list so that we ensure
792 * that the new information is used.
793 */
794 set_foreach(block->predecessors, entry) {
795 nir_block *pred = (nir_block *)entry->key;
796 if (propagate_across_edge(pred, block, &state))
797 nir_block_worklist_push_tail(&state.worklist, pred);
798 }
799 }
800
801 nir_block_worklist_fini(&state.worklist);
802
803 /* apply live_in/live_out to ranges */
804
805 nir_foreach_block(block, impl) {
806 int i;
807
808 BITSET_FOREACH_SET(i, block->live_in, state.num_defs)
809 range_include(&state.defs[i], block_live_index[block->index]);
810
811 BITSET_FOREACH_SET(i, block->live_out, state.num_defs)
812 range_include(&state.defs[i], block_live_index[block->index + 1]);
813 }
814
815 return state.num_defs;
816 }
817
818 /* precomputed by register_allocate */
819 static unsigned int *q_values[] = {
820 (unsigned int[]) {1, 2, 3, 4, 2, 2, 3, },
821 (unsigned int[]) {3, 5, 6, 6, 5, 5, 6, },
822 (unsigned int[]) {3, 4, 4, 4, 4, 4, 4, },
823 (unsigned int[]) {1, 1, 1, 1, 1, 1, 1, },
824 (unsigned int[]) {1, 2, 2, 2, 1, 2, 2, },
825 (unsigned int[]) {2, 3, 3, 3, 2, 3, 3, },
826 (unsigned int[]) {2, 2, 2, 2, 2, 2, 2, },
827 };
828
829 static void
830 ra_assign(struct state *state, nir_shader *shader)
831 {
832 struct ra_regs *regs = ra_alloc_reg_set(NULL, ETNA_MAX_TEMPS *
833 NUM_REG_TYPES, false);
834
835 /* classes always be created from index 0, so equal to the class enum
836 * which represents a register with (c+1) components
837 */
838 for (int c = 0; c < NUM_REG_CLASSES; c++)
839 ra_alloc_reg_class(regs);
840 /* add each register of each class */
841 for (int r = 0; r < NUM_REG_TYPES * ETNA_MAX_TEMPS; r++)
842 ra_class_add_reg(regs, reg_get_class(r), r);
843 /* set conflicts */
844 for (int r = 0; r < ETNA_MAX_TEMPS; r++) {
845 for (int i = 0; i < NUM_REG_TYPES; i++) {
846 for (int j = 0; j < i; j++) {
847 if (reg_writemask[i] & reg_writemask[j]) {
848 ra_add_reg_conflict(regs, NUM_REG_TYPES * r + i,
849 NUM_REG_TYPES * r + j);
850 }
851 }
852 }
853 }
854 ra_set_finalize(regs, q_values);
855
856 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
857
858 /* liveness and interference */
859
860 nir_index_blocks(impl);
861 nir_index_ssa_defs(impl);
862 nir_foreach_block(block, impl) {
863 nir_foreach_instr(instr, block)
864 instr->pass_flags = 0;
865 }
866
867 /* this gives an approximation/upper limit on how many nodes are needed
868 * (some ssa values do not represent an allocated register)
869 */
870 unsigned max_nodes = impl->ssa_alloc + impl->reg_alloc;
871 unsigned *live_map = ralloc_array(NULL, unsigned, max_nodes);
872 memset(live_map, 0xff, sizeof(unsigned) * max_nodes);
873 struct live_def *defs = rzalloc_array(NULL, struct live_def, max_nodes);
874
875 unsigned num_nodes = live_defs(impl, defs, live_map);
876 struct ra_graph *g = ra_alloc_interference_graph(regs, num_nodes);
877
878 /* set classes from num_components */
879 for (unsigned i = 0; i < num_nodes; i++) {
880 nir_instr *instr = defs[i].instr;
881 nir_dest *dest = defs[i].dest;
882 unsigned c = nir_dest_num_components(*dest) - 1;
883
884 if (instr->type == nir_instr_type_alu &&
885 state->c->specs->has_new_transcendentals) {
886 switch (nir_instr_as_alu(instr)->op) {
887 case nir_op_fdiv:
888 case nir_op_flog2:
889 case nir_op_fsin:
890 case nir_op_fcos:
891 assert(dest->is_ssa);
892 c = REG_CLASS_VIRT_VEC2T;
893 default:
894 break;
895 }
896 }
897
898 if (instr->type == nir_instr_type_intrinsic) {
899 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
900 /* can't have dst swizzle or sparse writemask on UBO loads */
901 if (intr->intrinsic == nir_intrinsic_load_ubo) {
902 assert(dest == &intr->dest);
903 if (dest->ssa.num_components == 2)
904 c = REG_CLASS_VIRT_VEC2C;
905 if (dest->ssa.num_components == 3)
906 c = REG_CLASS_VIRT_VEC3C;
907 }
908 }
909
910 ra_set_node_class(g, i, c);
911 }
912
913 nir_foreach_block(block, impl) {
914 nir_foreach_instr(instr, block) {
915 if (instr->type != nir_instr_type_intrinsic)
916 continue;
917
918 nir_dest *dest = dest_for_instr(instr);
919 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
920 unsigned reg;
921
922 switch (intr->intrinsic) {
923 case nir_intrinsic_store_deref: {
924 /* don't want outputs to be swizzled
925 * TODO: better would be to set the type to X/XY/XYZ/XYZW
926 * TODO: what if fragcoord.z is read after writing fragdepth?
927 */
928 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
929 unsigned index = live_map[src_index(impl, &intr->src[1])];
930
931 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
932 deref->var->data.location == FRAG_RESULT_DEPTH) {
933 ra_set_node_reg(g, index, REG_FRAG_DEPTH);
934 } else {
935 ra_set_node_class(g, index, REG_CLASS_VEC4);
936 }
937 } continue;
938 case nir_intrinsic_load_input:
939 reg = nir_intrinsic_base(intr) * NUM_REG_TYPES + (unsigned[]) {
940 REG_TYPE_VIRT_SCALAR_X,
941 REG_TYPE_VIRT_VEC2_XY,
942 REG_TYPE_VIRT_VEC3_XYZ,
943 REG_TYPE_VEC4,
944 }[nir_dest_num_components(*dest) - 1];
945 break;
946 case nir_intrinsic_load_instance_id:
947 reg = state->c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;
948 break;
949 default:
950 continue;
951 }
952
953 ra_set_node_reg(g, live_map[dest_index(impl, dest)], reg);
954 }
955 }
956
957 /* add interference for intersecting live ranges */
958 for (unsigned i = 0; i < num_nodes; i++) {
959 assert(defs[i].live_start < defs[i].live_end);
960 for (unsigned j = 0; j < i; j++) {
961 if (defs[i].live_start >= defs[j].live_end || defs[j].live_start >= defs[i].live_end)
962 continue;
963 ra_add_node_interference(g, i, j);
964 }
965 }
966
967 ralloc_free(defs);
968
969 /* Allocate registers */
970 ASSERTED bool ok = ra_allocate(g);
971 assert(ok);
972
973 state->g = g;
974 state->regs = regs;
975 state->live_map = live_map;
976 state->num_nodes = num_nodes;
977 }
978
979 static unsigned
980 ra_finish(struct state *state)
981 {
982 /* TODO: better way to get number of registers used? */
983 unsigned j = 0;
984 for (unsigned i = 0; i < state->num_nodes; i++) {
985 j = MAX2(j, reg_get_base(state, ra_get_node_reg(state->g, i)) + 1);
986 }
987
988 ralloc_free(state->g);
989 ralloc_free(state->regs);
990 ralloc_free(state->live_map);
991
992 return j;
993 }
994
995 static void
996 emit_alu(struct state *state, nir_alu_instr * alu)
997 {
998 const nir_op_info *info = &nir_op_infos[alu->op];
999
1000 /* marked as dead instruction (vecN and other bypassed instr) */
1001 if (alu->instr.pass_flags)
1002 return;
1003
1004 assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
1005
1006 unsigned dst_swiz;
1007 hw_dst dst = ra_dest(state, &alu->dest.dest, &dst_swiz);
1008
1009 /* compose alu write_mask with RA write mask */
1010 if (!alu->dest.dest.is_ssa)
1011 dst.write_mask = inst_write_mask_compose(alu->dest.write_mask, dst.write_mask);
1012
1013 switch (alu->op) {
1014 case nir_op_fdot2:
1015 case nir_op_fdot3:
1016 case nir_op_fdot4:
1017 /* not per-component - don't compose dst_swiz */
1018 dst_swiz = INST_SWIZ_IDENTITY;
1019 break;
1020 default:
1021 break;
1022 }
1023
1024 hw_src srcs[3];
1025
1026 for (int i = 0; i < info->num_inputs; i++) {
1027 nir_alu_src *asrc = &alu->src[i];
1028 hw_src src;
1029
1030 src = src_swizzle(get_src(state, &asrc->src), ALU_SWIZ(asrc));
1031 src = src_swizzle(src, dst_swiz);
1032
1033 if (src.rgroup != INST_RGROUP_IMMEDIATE) {
1034 src.neg = asrc->negate || (alu->op == nir_op_fneg);
1035 src.abs = asrc->abs || (alu->op == nir_op_fabs);
1036 } else {
1037 assert(!asrc->negate && alu->op != nir_op_fneg);
1038 assert(!asrc->abs && alu->op != nir_op_fabs);
1039 }
1040
1041 srcs[i] = src;
1042 }
1043
1044 emit(alu, alu->op, dst, srcs, alu->dest.saturate || (alu->op == nir_op_fsat));
1045 }
1046
1047 static void
1048 emit_tex(struct state *state, nir_tex_instr * tex)
1049 {
1050 unsigned dst_swiz;
1051 hw_dst dst = ra_dest(state, &tex->dest, &dst_swiz);
1052 nir_src *coord = NULL, *lod_bias = NULL, *compare = NULL;
1053
1054 for (unsigned i = 0; i < tex->num_srcs; i++) {
1055 switch (tex->src[i].src_type) {
1056 case nir_tex_src_coord:
1057 coord = &tex->src[i].src;
1058 break;
1059 case nir_tex_src_bias:
1060 case nir_tex_src_lod:
1061 assert(!lod_bias);
1062 lod_bias = &tex->src[i].src;
1063 break;
1064 case nir_tex_src_comparator:
1065 compare = &tex->src[i].src;
1066 break;
1067 default:
1068 compile_error(state->c, "Unhandled NIR tex src type: %d\n",
1069 tex->src[i].src_type);
1070 break;
1071 }
1072 }
1073
1074 emit(tex, tex->op, tex->sampler_index, dst_swiz, dst, get_src(state, coord),
1075 lod_bias ? get_src(state, lod_bias) : SRC_DISABLE,
1076 compare ? get_src(state, compare) : SRC_DISABLE);
1077 }
1078
1079 static void
1080 emit_intrinsic(struct state *state, nir_intrinsic_instr * intr)
1081 {
1082 switch (intr->intrinsic) {
1083 case nir_intrinsic_store_deref:
1084 emit(output, nir_src_as_deref(intr->src[0])->var, get_src(state, &intr->src[1]));
1085 break;
1086 case nir_intrinsic_discard_if:
1087 emit(discard, get_src(state, &intr->src[0]));
1088 break;
1089 case nir_intrinsic_discard:
1090 emit(discard, SRC_DISABLE);
1091 break;
1092 case nir_intrinsic_load_uniform: {
1093 unsigned dst_swiz;
1094 struct etna_inst_dst dst = ra_dest(state, &intr->dest, &dst_swiz);
1095
1096 /* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
1097 emit_inst(state->c, &(struct etna_inst) {
1098 .opcode = INST_OPCODE_MOVAR,
1099 .dst.write_mask = 0x1,
1100 .src[2] = get_src(state, &intr->src[0]),
1101 });
1102 emit_inst(state->c, &(struct etna_inst) {
1103 .opcode = INST_OPCODE_MOV,
1104 .dst = dst,
1105 .src[2] = {
1106 .use = 1,
1107 .rgroup = INST_RGROUP_UNIFORM_0,
1108 .reg = nir_intrinsic_base(intr),
1109 .swiz = dst_swiz,
1110 .amode = INST_AMODE_ADD_A_X,
1111 },
1112 });
1113 } break;
1114 case nir_intrinsic_load_ubo: {
1115 /* TODO: if offset is of the form (x + C) then add C to the base instead */
1116 unsigned idx = nir_src_as_const_value(intr->src[0])[0].u32;
1117 unsigned dst_swiz;
1118 emit_inst(state->c, &(struct etna_inst) {
1119 .opcode = INST_OPCODE_LOAD,
1120 .type = INST_TYPE_U32,
1121 .dst = ra_dest(state, &intr->dest, &dst_swiz),
1122 .src[0] = get_src(state, &intr->src[1]),
1123 .src[1] = const_src(state, &CONST_VAL(ETNA_IMMEDIATE_UBO0_ADDR + idx, 0), 1),
1124 });
1125 } break;
1126 case nir_intrinsic_load_front_face:
1127 case nir_intrinsic_load_frag_coord:
1128 assert(intr->dest.is_ssa); /* TODO - lower phis could cause this */
1129 break;
1130 case nir_intrinsic_load_input:
1131 case nir_intrinsic_load_instance_id:
1132 break;
1133 default:
1134 compile_error(state->c, "Unhandled NIR intrinsic type: %s\n",
1135 nir_intrinsic_infos[intr->intrinsic].name);
1136 }
1137 }
1138
1139 static void
1140 emit_instr(struct state *state, nir_instr * instr)
1141 {
1142 switch (instr->type) {
1143 case nir_instr_type_alu:
1144 emit_alu(state, nir_instr_as_alu(instr));
1145 break;
1146 case nir_instr_type_tex:
1147 emit_tex(state, nir_instr_as_tex(instr));
1148 break;
1149 case nir_instr_type_intrinsic:
1150 emit_intrinsic(state, nir_instr_as_intrinsic(instr));
1151 break;
1152 case nir_instr_type_jump:
1153 assert(nir_instr_is_last(instr));
1154 case nir_instr_type_load_const:
1155 case nir_instr_type_ssa_undef:
1156 case nir_instr_type_deref:
1157 break;
1158 default:
1159 compile_error(state->c, "Unhandled NIR instruction type: %d\n", instr->type);
1160 break;
1161 }
1162 }
1163
1164 static void
1165 emit_block(struct state *state, nir_block * block)
1166 {
1167 emit(block_start, block->index);
1168
1169 nir_foreach_instr(instr, block)
1170 emit_instr(state, instr);
1171
1172 /* succs->index < block->index is for the loop case */
1173 nir_block *succs = block->successors[0];
1174 if (nir_block_ends_in_jump(block) || succs->index < block->index)
1175 emit(jump, succs->index, SRC_DISABLE);
1176 }
1177
1178 static void
1179 emit_cf_list(struct state *state, struct exec_list *list);
1180
1181 static void
1182 emit_if(struct state *state, nir_if * nif)
1183 {
1184 emit(jump, nir_if_first_else_block(nif)->index, get_src(state, &nif->condition));
1185 emit_cf_list(state, &nif->then_list);
1186
1187 /* jump at end of then_list to skip else_list
1188 * not needed if then_list already ends with a jump or else_list is empty
1189 */
1190 if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
1191 !nir_cf_list_is_empty_block(&nif->else_list))
1192 emit(jump, nir_if_last_else_block(nif)->successors[0]->index, SRC_DISABLE);
1193
1194 emit_cf_list(state, &nif->else_list);
1195 }
1196
1197 static void
1198 emit_cf_list(struct state *state, struct exec_list *list)
1199 {
1200 foreach_list_typed(nir_cf_node, node, node, list) {
1201 switch (node->type) {
1202 case nir_cf_node_block:
1203 emit_block(state, nir_cf_node_as_block(node));
1204 break;
1205 case nir_cf_node_if:
1206 emit_if(state, nir_cf_node_as_if(node));
1207 break;
1208 case nir_cf_node_loop:
1209 emit_cf_list(state, &nir_cf_node_as_loop(node)->body);
1210 break;
1211 default:
1212 compile_error(state->c, "Unknown NIR node type\n");
1213 break;
1214 }
1215 }
1216 }
1217
1218 /* based on nir_lower_vec_to_movs */
1219 static unsigned
1220 insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
1221 {
1222 assert(start_idx < nir_op_infos[vec->op].num_inputs);
1223 unsigned write_mask = (1u << start_idx);
1224
1225 nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
1226 nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov);
1227
1228 mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
1229 mov->src[0].negate = vec->src[start_idx].negate;
1230 mov->src[0].abs = vec->src[start_idx].abs;
1231
1232 unsigned num_components = 1;
1233
1234 for (unsigned i = start_idx + 1; i < 4; i++) {
1235 if (!(vec->dest.write_mask & (1 << i)))
1236 continue;
1237
1238 if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
1239 vec->src[i].negate == vec->src[start_idx].negate &&
1240 vec->src[i].abs == vec->src[start_idx].abs) {
1241 write_mask |= (1 << i);
1242 mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
1243 num_components++;
1244 }
1245 }
1246
1247 mov->dest.write_mask = (1 << num_components) - 1;
1248 nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components, 32, NULL);
1249
1250 /* replace vec srcs with inserted mov */
1251 for (unsigned i = 0, j = 0; i < 4; i++) {
1252 if (!(write_mask & (1 << i)))
1253 continue;
1254
1255 nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, nir_src_for_ssa(&mov->dest.dest.ssa));
1256 vec->src[i].swizzle[0] = j++;
1257 }
1258
1259 nir_instr_insert_before(&vec->instr, &mov->instr);
1260
1261 return write_mask;
1262 }
1263
1264 /*
1265 * for vecN instructions:
1266 * -merge constant sources into a single src
1267 * -insert movs (nir_lower_vec_to_movs equivalent)
1268 * for non-vecN instructions:
1269 * -try to merge constants as single constant
1270 * -insert movs for multiple constants (pre-HALTI5)
1271 */
1272 static void
1273 lower_alu(struct state *state, nir_alu_instr *alu)
1274 {
1275 const nir_op_info *info = &nir_op_infos[alu->op];
1276
1277 nir_builder b;
1278 nir_builder_init(&b, state->impl);
1279 b.cursor = nir_before_instr(&alu->instr);
1280
1281 switch (alu->op) {
1282 case nir_op_vec2:
1283 case nir_op_vec3:
1284 case nir_op_vec4:
1285 break;
1286 default:
1287 /* pre-GC7000L can only have 1 uniform src per instruction */
1288 if (state->c->specs->halti >= 5)
1289 return;
1290
1291 nir_const_value value[4] = {};
1292 uint8_t swizzle[4][4] = {};
1293 unsigned swiz_max = 0, num_const = 0;
1294
1295 for (unsigned i = 0; i < info->num_inputs; i++) {
1296 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1297 if (!cv)
1298 continue;
1299
1300 unsigned num_components = info->input_sizes[i] ?: alu->dest.dest.ssa.num_components;
1301 for (unsigned j = 0; j < num_components; j++) {
1302 int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
1303 swizzle[i][j] = idx;
1304 swiz_max = MAX2(swiz_max, (unsigned) idx);
1305 }
1306 num_const++;
1307 }
1308
1309 /* nothing to do */
1310 if (num_const <= 1)
1311 return;
1312
1313 /* resolve with single combined const src */
1314 if (swiz_max < 4) {
1315 nir_ssa_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
1316
1317 for (unsigned i = 0; i < info->num_inputs; i++) {
1318 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1319 if (!cv)
1320 continue;
1321
1322 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1323
1324 for (unsigned j = 0; j < 4; j++)
1325 alu->src[i].swizzle[j] = swizzle[i][j];
1326 }
1327 return;
1328 }
1329
1330 /* resolve with movs */
1331 num_const = 0;
1332 for (unsigned i = 0; i < info->num_inputs; i++) {
1333 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1334 if (!cv)
1335 continue;
1336
1337 num_const++;
1338 if (num_const == 1)
1339 continue;
1340
1341 nir_ssa_def *mov = nir_mov(&b, alu->src[i].src.ssa);
1342 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
1343 }
1344 return;
1345 }
1346
1347 nir_const_value value[4];
1348 unsigned num_components = 0;
1349
1350 for (unsigned i = 0; i < info->num_inputs; i++) {
1351 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1352 if (cv)
1353 value[num_components++] = cv[alu->src[i].swizzle[0]];
1354 }
1355
1356 /* if there is more than one constant source to the vecN, combine them
1357 * into a single load_const (removing the vecN completely if all components
1358 * are constant)
1359 */
1360 if (num_components > 1) {
1361 nir_ssa_def *def = nir_build_imm(&b, num_components, 32, value);
1362
1363 if (num_components == info->num_inputs) {
1364 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(def));
1365 nir_instr_remove(&alu->instr);
1366 return;
1367 }
1368
1369 for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
1370 nir_const_value *cv = nir_src_as_const_value(alu->src[i].src);
1371 if (!cv)
1372 continue;
1373
1374 nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
1375 alu->src[i].swizzle[0] = j++;
1376 }
1377 }
1378
1379 unsigned finished_write_mask = 0;
1380 for (unsigned i = 0; i < 4; i++) {
1381 if (!(alu->dest.write_mask & (1 << i)))
1382 continue;
1383
1384 nir_ssa_def *ssa = alu->src[i].src.ssa;
1385
1386 /* check that vecN instruction is only user of this */
1387 bool need_mov = list_length(&ssa->if_uses) != 0;
1388 nir_foreach_use(use_src, ssa) {
1389 if (use_src->parent_instr != &alu->instr)
1390 need_mov = true;
1391 }
1392
1393 nir_instr *instr = ssa->parent_instr;
1394 switch (instr->type) {
1395 case nir_instr_type_alu:
1396 case nir_instr_type_tex:
1397 break;
1398 case nir_instr_type_intrinsic:
1399 if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
1400 need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->dest.ssa);
1401 break;
1402 }
1403 default:
1404 need_mov = true;
1405 }
1406
1407 if (need_mov && !(finished_write_mask & (1 << i)))
1408 finished_write_mask |= insert_vec_mov(alu, i, state->shader);
1409 }
1410 }
1411
1412 static bool
1413 emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
1414 {
1415 nir_shader *shader = c->nir;
1416
1417 struct state state = {
1418 .c = c,
1419 .shader = shader,
1420 .impl = nir_shader_get_entrypoint(shader),
1421 };
1422 bool have_indirect_uniform = false;
1423 unsigned indirect_max = 0;
1424
1425 nir_builder b;
1426 nir_builder_init(&b, state.impl);
1427
1428 /* convert non-dynamic uniform loads to constants, etc */
1429 nir_foreach_block(block, state.impl) {
1430 nir_foreach_instr_safe(instr, block) {
1431 switch(instr->type) {
1432 case nir_instr_type_alu:
1433 /* deals with vecN and const srcs */
1434 lower_alu(&state, nir_instr_as_alu(instr));
1435 break;
1436 case nir_instr_type_load_const: {
1437 nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
1438 for (unsigned i = 0; i < load_const->def.num_components; i++)
1439 load_const->value[i] = CONST(load_const->value[i].u32);
1440 } break;
1441 case nir_instr_type_intrinsic: {
1442 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1443 /* TODO: load_ubo can also become a constant in some cases
1444 * (at the moment it can end up emitting a LOAD with two
1445 * uniform sources, which could be a problem on HALTI2)
1446 */
1447 if (intr->intrinsic != nir_intrinsic_load_uniform)
1448 break;
1449 nir_const_value *off = nir_src_as_const_value(intr->src[0]);
1450 if (!off || off[0].u64 >> 32 != ETNA_IMMEDIATE_CONSTANT) {
1451 have_indirect_uniform = true;
1452 indirect_max = nir_intrinsic_base(intr) + nir_intrinsic_range(intr);
1453 break;
1454 }
1455
1456 unsigned base = nir_intrinsic_base(intr);
1457 /* pre halti2 uniform offset will be float */
1458 if (c->specs->halti < 2)
1459 base += (unsigned) off[0].f32;
1460 else
1461 base += off[0].u32;
1462 nir_const_value value[4];
1463
1464 for (unsigned i = 0; i < intr->dest.ssa.num_components; i++) {
1465 if (nir_intrinsic_base(intr) < 0)
1466 value[i] = TEXSCALE(~nir_intrinsic_base(intr), i);
1467 else
1468 value[i] = UNIFORM(base * 4 + i);
1469 }
1470
1471 b.cursor = nir_after_instr(instr);
1472 nir_ssa_def *def = nir_build_imm(&b, intr->dest.ssa.num_components, 32, value);
1473
1474 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(def));
1475 nir_instr_remove(instr);
1476 } break;
1477 default:
1478 break;
1479 }
1480 }
1481 }
1482
1483 /* TODO: only emit required indirect uniform ranges */
1484 if (have_indirect_uniform) {
1485 for (unsigned i = 0; i < indirect_max * 4; i++)
1486 c->consts[i] = UNIFORM(i).u64;
1487 state.const_count = indirect_max;
1488 }
1489
1490 /* add mov for any store output using sysval/const */
1491 nir_foreach_block(block, state.impl) {
1492 nir_foreach_instr_safe(instr, block) {
1493 if (instr->type != nir_instr_type_intrinsic)
1494 continue;
1495
1496 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1497
1498 switch (intr->intrinsic) {
1499 case nir_intrinsic_store_deref: {
1500 nir_src *src = &intr->src[1];
1501 if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr)) {
1502 b.cursor = nir_before_instr(instr);
1503 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(nir_mov(&b, src->ssa)));
1504 }
1505 } break;
1506 default:
1507 break;
1508 }
1509 }
1510 }
1511
1512 /* call directly to avoid validation (load_const don't pass validation at this point) */
1513 nir_convert_from_ssa(shader, true);
1514 nir_opt_dce(shader);
1515
1516 ra_assign(&state, shader);
1517
1518 emit_cf_list(&state, &nir_shader_get_entrypoint(shader)->body);
1519
1520 *num_temps = ra_finish(&state);
1521 *num_consts = state.const_count;
1522 return true;
1523 }