etnaviv: nir: use store_deref instead of store_output
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir.c
1 /*
2 * Copyright (c) 2012-2019 Etnaviv Project
3 * Copyright (c) 2019 Zodiac Inflight Innovations
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jonathan Marek <jonathan@marek.ca>
26 * Wladimir J. van der Laan <laanwj@gmail.com>
27 */
28
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_asm.h"
31 #include "etnaviv_context.h"
32 #include "etnaviv_debug.h"
33 #include "etnaviv_disasm.h"
34 #include "etnaviv_uniforms.h"
35 #include "etnaviv_util.h"
36
37 #include <math.h>
38 #include "util/u_memory.h"
39 #include "util/register_allocate.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "compiler/nir/nir_worklist.h"
42
43 #include "tgsi/tgsi_strings.h"
44 #include "util/u_half.h"
45
46 struct etna_compile {
47 nir_shader *nir;
48 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
49 const struct etna_specs *specs;
50 struct etna_shader_variant *variant;
51
52 /* block # to instr index */
53 unsigned *block_ptr;
54
55 /* Code generation */
56 int inst_ptr; /* current instruction pointer */
57 struct etna_inst code[ETNA_MAX_INSTRUCTIONS * ETNA_INST_SIZE];
58
59 /* There was an error during compilation */
60 bool error;
61 };
62
63 #define compile_error(ctx, args...) ({ \
64 printf(args); \
65 ctx->error = true; \
66 assert(0); \
67 })
68
69 /* io related lowering
70 * run after lower_int_to_float because it adds i2f/f2i ops
71 */
72 static void
73 etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
74 {
75 nir_foreach_function(function, shader) {
76 nir_builder b;
77 nir_builder_init(&b, function->impl);
78
79 nir_foreach_block(block, function->impl) {
80 nir_foreach_instr_safe(instr, block) {
81 if (instr->type == nir_instr_type_intrinsic) {
82 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
83
84 switch (intr->intrinsic) {
85 case nir_intrinsic_load_front_face: {
86 /* HW front_face is 0.0/1.0, not 0/~0u for bool
87 * lower with a comparison with 0
88 */
89 intr->dest.ssa.bit_size = 32;
90
91 b.cursor = nir_after_instr(instr);
92
93 nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0));
94 if (v->key.front_ccw)
95 nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
96
97 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
98 nir_src_for_ssa(ssa),
99 ssa->parent_instr);
100 } break;
101 case nir_intrinsic_store_deref: {
102 if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
103 break;
104
105 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
106 assert(deref->deref_type == nir_deref_type_var);
107
108 if (deref->var->data.location != FRAG_RESULT_COLOR &&
109 deref->var->data.location != FRAG_RESULT_DATA0)
110 break;
111
112 b.cursor = nir_before_instr(instr);
113
114 nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
115 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
116 alu->src[0].swizzle[0] = 2;
117 alu->src[0].swizzle[2] = 0;
118 nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
119 } break;
120 case nir_intrinsic_load_uniform: {
121 /* multiply by 16 and convert to int */
122 b.cursor = nir_before_instr(instr);
123 nir_ssa_def *ssa = nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16));
124 nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(ssa));
125 } break;
126 default:
127 break;
128 }
129 }
130
131 if (instr->type != nir_instr_type_tex)
132 continue;
133
134 nir_tex_instr *tex = nir_instr_as_tex(instr);
135 nir_src *coord = NULL;
136 nir_src *lod_bias = NULL;
137 unsigned lod_bias_idx;
138
139 assert(tex->sampler_index == tex->texture_index);
140
141 for (unsigned i = 0; i < tex->num_srcs; i++) {
142 switch (tex->src[i].src_type) {
143 case nir_tex_src_coord:
144 coord = &tex->src[i].src;
145 break;
146 case nir_tex_src_bias:
147 case nir_tex_src_lod:
148 assert(!lod_bias);
149 lod_bias = &tex->src[i].src;
150 lod_bias_idx = i;
151 break;
152 default:
153 assert(0);
154 break;
155 }
156 }
157
158 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
159 /* use a dummy load_uniform here to represent texcoord scale */
160 b.cursor = nir_before_instr(instr);
161 nir_intrinsic_instr *load =
162 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_uniform);
163 nir_intrinsic_set_base(load, ~tex->sampler_index);
164 load->num_components = 2;
165 load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
166 nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
167 nir_intrinsic_set_type(load, nir_type_float);
168
169 nir_builder_instr_insert(&b, &load->instr);
170
171 nir_ssa_def *new_coord = nir_fmul(&b, coord->ssa, &load->dest.ssa);
172 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(new_coord));
173 }
174
175 /* pre HALTI5 needs texture sources in a single source */
176
177 if (!lod_bias || v->shader->specs->halti >= 5)
178 continue;
179
180 assert(coord && lod_bias && tex->coord_components < 4);
181
182 nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
183 for (unsigned i = 0; i < tex->coord_components; i++) {
184 vec->src[i].src = nir_src_for_ssa(coord->ssa);
185 vec->src[i].swizzle[0] = i;
186 }
187 for (unsigned i = tex->coord_components; i < 4; i++)
188 vec->src[i].src = nir_src_for_ssa(lod_bias->ssa);
189
190 vec->dest.write_mask = 0xf;
191 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
192
193 nir_tex_instr_remove_src(tex, lod_bias_idx);
194 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
195 tex->coord_components = 4;
196
197 nir_instr_insert_before(&tex->instr, &vec->instr);
198 }
199 }
200 }
201 }
202
203 static bool
204 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
205 {
206 const struct etna_specs *specs = data;
207
208 if (instr->type != nir_instr_type_alu)
209 return false;
210
211 nir_alu_instr *alu = nir_instr_as_alu(instr);
212 switch (alu->op) {
213 case nir_op_frsq:
214 case nir_op_frcp:
215 case nir_op_flog2:
216 case nir_op_fexp2:
217 case nir_op_fsqrt:
218 case nir_op_fcos:
219 case nir_op_fsin:
220 case nir_op_fdiv:
221 case nir_op_imul:
222 return true;
223 /* TODO: can do better than alu_to_scalar for vector compares */
224 case nir_op_b32all_fequal2:
225 case nir_op_b32all_fequal3:
226 case nir_op_b32all_fequal4:
227 case nir_op_b32any_fnequal2:
228 case nir_op_b32any_fnequal3:
229 case nir_op_b32any_fnequal4:
230 case nir_op_b32all_iequal2:
231 case nir_op_b32all_iequal3:
232 case nir_op_b32all_iequal4:
233 case nir_op_b32any_inequal2:
234 case nir_op_b32any_inequal3:
235 case nir_op_b32any_inequal4:
236 return true;
237 case nir_op_fdot2:
238 if (!specs->has_halti2_instructions)
239 return true;
240 break;
241 default:
242 break;
243 }
244
245 return false;
246 }
247
248 static void
249 etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
250 {
251 nir_shader *shader = impl->function->shader;
252
253 nir_builder b;
254 nir_builder_init(&b, impl);
255
256 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
257 nir_foreach_block(block, impl) {
258 nir_foreach_instr_safe(instr, block) {
259 if (instr->type != nir_instr_type_alu)
260 continue;
261
262 nir_alu_instr *alu = nir_instr_as_alu(instr);
263 /* multiply sin/cos src by constant
264 * TODO: do this earlier (but it breaks const_prop opt)
265 */
266 if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
267 b.cursor = nir_before_instr(instr);
268
269 nir_ssa_def *imm = c->specs->has_new_transcendentals ?
270 nir_imm_float(&b, 1.0 / M_PI) :
271 nir_imm_float(&b, 2.0 / M_PI);
272
273 nir_instr_rewrite_src(instr, &alu->src[0].src,
274 nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
275 }
276
277 /* change transcendental ops to vec2 and insert vec1 mul for the result
278 * TODO: do this earlier (but it breaks with optimizations)
279 */
280 if (c->specs->has_new_transcendentals && (
281 alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
282 alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
283 nir_ssa_def *ssa = &alu->dest.dest.ssa;
284
285 assert(ssa->num_components == 1);
286
287 nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
288 mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
289 mul->src[1].swizzle[0] = 1;
290
291 mul->dest.write_mask = 1;
292 nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
293
294 ssa->num_components = 2;
295
296 mul->dest.saturate = alu->dest.saturate;
297 alu->dest.saturate = 0;
298
299 nir_instr_insert_after(instr, &mul->instr);
300
301 nir_ssa_def_rewrite_uses_after(ssa, nir_src_for_ssa(&mul->dest.dest.ssa), &mul->instr);
302 }
303 }
304 }
305 }
306
307 static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
308 {
309 nir_foreach_function(function, shader) {
310 if (function->impl)
311 etna_lower_alu_impl(function->impl, c);
312 }
313 }
314
315 static void
316 emit_inst(struct etna_compile *c, struct etna_inst *inst)
317 {
318 c->code[c->inst_ptr++] = *inst;
319 }
320
321 /* to map nir srcs should to etna_inst srcs */
322 enum {
323 SRC_0_1_2 = (0 << 0) | (1 << 2) | (2 << 4),
324 SRC_0_1_X = (0 << 0) | (1 << 2) | (3 << 4),
325 SRC_0_X_X = (0 << 0) | (3 << 2) | (3 << 4),
326 SRC_0_X_1 = (0 << 0) | (3 << 2) | (1 << 4),
327 SRC_0_1_0 = (0 << 0) | (1 << 2) | (0 << 4),
328 SRC_X_X_0 = (3 << 0) | (3 << 2) | (0 << 4),
329 SRC_0_X_0 = (0 << 0) | (3 << 2) | (0 << 4),
330 };
331
332 /* info to translate a nir op to etna_inst */
333 struct etna_op_info {
334 uint8_t opcode; /* INST_OPCODE_ */
335 uint8_t src; /* SRC_ enum */
336 uint8_t cond; /* INST_CONDITION_ */
337 uint8_t type; /* INST_TYPE_ */
338 };
339
340 static const struct etna_op_info etna_ops[] = {
341 [0 ... nir_num_opcodes - 1] = {0xff},
342 #undef TRUE
343 #undef FALSE
344 #define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
345 INST_OPCODE_##op, \
346 SRC_##src, \
347 INST_CONDITION_##cond, \
348 INST_TYPE_##type \
349 }
350 #define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
351 #define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
352 #define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
353 #define OP(nir, op, src) OPC(nir, op, src, TRUE)
354 #define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
355 #define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
356 OP(mov, MOV, X_X_0), OP(fneg, MOV, X_X_0), OP(fabs, MOV, X_X_0), OP(fsat, MOV, X_X_0),
357 OP(fmul, MUL, 0_1_X), OP(fadd, ADD, 0_X_1), OP(ffma, MAD, 0_1_2),
358 OP(fdot2, DP2, 0_1_X), OP(fdot3, DP3, 0_1_X), OP(fdot4, DP4, 0_1_X),
359 OPC(fmin, SELECT, 0_1_0, GT), OPC(fmax, SELECT, 0_1_0, LT),
360 OP(ffract, FRC, X_X_0), OP(frcp, RCP, X_X_0), OP(frsq, RSQ, X_X_0),
361 OP(fsqrt, SQRT, X_X_0), OP(fsin, SIN, X_X_0), OP(fcos, COS, X_X_0),
362 OP(fsign, SIGN, X_X_0), OP(ffloor, FLOOR, X_X_0), OP(fceil, CEIL, X_X_0),
363 OP(flog2, LOG, X_X_0), OP(fexp2, EXP, X_X_0),
364 OPC(seq, SET, 0_1_X, EQ), OPC(sne, SET, 0_1_X, NE), OPC(sge, SET, 0_1_X, GE), OPC(slt, SET, 0_1_X, LT),
365 OPC(fcsel, SELECT, 0_1_2, NZ),
366 OP(fdiv, DIV, 0_1_X),
367 OP(fddx, DSX, 0_X_0), OP(fddy, DSY, 0_X_0),
368
369 /* type convert */
370 IOP(i2f32, I2F, 0_X_X),
371 UOP(u2f32, I2F, 0_X_X),
372 IOP(f2i32, F2I, 0_X_X),
373 UOP(f2u32, F2I, 0_X_X),
374 UOP(b2f32, AND, 0_X_X), /* AND with fui(1.0f) */
375 UOP(b2i32, AND, 0_X_X), /* AND with 1 */
376 OPC(f2b32, CMP, 0_X_X, NE), /* != 0.0 */
377 UOPC(i2b32, CMP, 0_X_X, NE), /* != 0 */
378
379 /* arithmetic */
380 IOP(iadd, ADD, 0_X_1),
381 IOP(imul, IMULLO0, 0_1_X),
382 /* IOP(imad, IMADLO0, 0_1_2), */
383 IOP(ineg, ADD, X_X_0), /* ADD 0, -x */
384 IOP(iabs, IABS, X_X_0),
385 IOP(isign, SIGN, X_X_0),
386 IOPC(imin, SELECT, 0_1_0, GT),
387 IOPC(imax, SELECT, 0_1_0, LT),
388 UOPC(umin, SELECT, 0_1_0, GT),
389 UOPC(umax, SELECT, 0_1_0, LT),
390
391 /* select */
392 UOPC(b32csel, SELECT, 0_1_2, NZ),
393
394 /* compare with int result */
395 OPC(feq32, CMP, 0_1_X, EQ),
396 OPC(fne32, CMP, 0_1_X, NE),
397 OPC(fge32, CMP, 0_1_X, GE),
398 OPC(flt32, CMP, 0_1_X, LT),
399 IOPC(ieq32, CMP, 0_1_X, EQ),
400 IOPC(ine32, CMP, 0_1_X, NE),
401 IOPC(ige32, CMP, 0_1_X, GE),
402 IOPC(ilt32, CMP, 0_1_X, LT),
403 UOPC(uge32, CMP, 0_1_X, GE),
404 UOPC(ult32, CMP, 0_1_X, LT),
405
406 /* bit ops */
407 IOP(ior, OR, 0_X_1),
408 IOP(iand, AND, 0_X_1),
409 IOP(ixor, XOR, 0_X_1),
410 IOP(inot, NOT, X_X_0),
411 IOP(ishl, LSHIFT, 0_X_1),
412 IOP(ishr, RSHIFT, 0_X_1),
413 UOP(ushr, RSHIFT, 0_X_1),
414 };
415
416 static void
417 etna_emit_block_start(struct etna_compile *c, unsigned block)
418 {
419 c->block_ptr[block] = c->inst_ptr;
420 }
421
422 static void
423 etna_emit_alu(struct etna_compile *c, nir_op op, struct etna_inst_dst dst,
424 struct etna_inst_src src[3], bool saturate)
425 {
426 struct etna_op_info ei = etna_ops[op];
427 unsigned swiz_scalar = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1);
428
429 assert(ei.opcode != 0xff);
430
431 struct etna_inst inst = {
432 .opcode = ei.opcode,
433 .type = ei.type,
434 .cond = ei.cond,
435 .dst = dst,
436 .sat = saturate,
437 };
438
439 switch (op) {
440 case nir_op_fdiv:
441 case nir_op_flog2:
442 case nir_op_fsin:
443 case nir_op_fcos:
444 if (c->specs->has_new_transcendentals)
445 inst.tex.amode = 1;
446 /* fall through */
447 case nir_op_frsq:
448 case nir_op_frcp:
449 case nir_op_fexp2:
450 case nir_op_fsqrt:
451 case nir_op_imul:
452 /* scalar instructions we want src to be in x component */
453 src[0].swiz = inst_swiz_compose(src[0].swiz, swiz_scalar);
454 src[1].swiz = inst_swiz_compose(src[1].swiz, swiz_scalar);
455 break;
456 /* deal with instructions which don't have 1:1 mapping */
457 case nir_op_b2f32:
458 inst.src[2] = etna_immediate_float(1.0f);
459 break;
460 case nir_op_b2i32:
461 inst.src[2] = etna_immediate_int(1);
462 break;
463 case nir_op_f2b32:
464 inst.src[1] = etna_immediate_float(0.0f);
465 break;
466 case nir_op_i2b32:
467 inst.src[1] = etna_immediate_int(0);
468 break;
469 case nir_op_ineg:
470 inst.src[0] = etna_immediate_int(0);
471 src[0].neg = 1;
472 break;
473 default:
474 break;
475 }
476
477 /* set the "true" value for CMP instructions */
478 if (inst.opcode == INST_OPCODE_CMP)
479 inst.src[2] = etna_immediate_int(-1);
480
481 for (unsigned j = 0; j < 3; j++) {
482 unsigned i = ((ei.src >> j*2) & 3);
483 if (i < 3)
484 inst.src[j] = src[i];
485 }
486
487 emit_inst(c, &inst);
488 }
489
490 static void
491 etna_emit_tex(struct etna_compile *c, nir_texop op, unsigned texid, unsigned dst_swiz,
492 struct etna_inst_dst dst, struct etna_inst_src coord,
493 struct etna_inst_src lod_bias)
494 {
495 struct etna_inst inst = {
496 .dst = dst,
497 .tex.id = texid + (is_fs(c) ? 0 : c->specs->vertex_sampler_offset),
498 .tex.swiz = dst_swiz,
499 .src[0] = coord,
500 };
501
502 if (lod_bias.use)
503 inst.src[1] = lod_bias;
504
505 switch (op) {
506 case nir_texop_tex: inst.opcode = INST_OPCODE_TEXLD; break;
507 case nir_texop_txb: inst.opcode = INST_OPCODE_TEXLDB; break;
508 case nir_texop_txl: inst.opcode = INST_OPCODE_TEXLDL; break;
509 default:
510 assert(0);
511 }
512
513 emit_inst(c, &inst);
514 }
515
516 static void
517 etna_emit_jump(struct etna_compile *c, unsigned block, struct etna_inst_src condition)
518 {
519 if (!condition.use) {
520 emit_inst(c, &(struct etna_inst) {.opcode = INST_OPCODE_BRANCH, .imm = block });
521 return;
522 }
523
524 struct etna_inst inst = {
525 .opcode = INST_OPCODE_BRANCH,
526 .cond = INST_CONDITION_NOT,
527 .type = INST_TYPE_U32,
528 .src[0] = condition,
529 .imm = block,
530 };
531 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
532 emit_inst(c, &inst);
533 }
534
535 static void
536 etna_emit_discard(struct etna_compile *c, struct etna_inst_src condition)
537 {
538 if (!condition.use) {
539 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_TEXKILL });
540 return;
541 }
542
543 struct etna_inst inst = {
544 .opcode = INST_OPCODE_TEXKILL,
545 .cond = INST_CONDITION_NZ,
546 .type = (c->specs->halti < 2) ? INST_TYPE_F32 : INST_TYPE_U32,
547 .src[0] = condition,
548 };
549 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
550 emit_inst(c, &inst);
551 }
552
553 static void
554 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
555 {
556 struct etna_shader_io_file *sf = &c->variant->outfile;
557
558 if (is_fs(c)) {
559 switch (var->data.location) {
560 case FRAG_RESULT_COLOR:
561 case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
562 c->variant->ps_color_out_reg = src.reg;
563 break;
564 case FRAG_RESULT_DEPTH:
565 c->variant->ps_depth_out_reg = src.reg;
566 break;
567 default:
568 unreachable("Unsupported fs output");
569 }
570 return;
571 }
572
573 switch (var->data.location) {
574 case VARYING_SLOT_POS:
575 c->variant->vs_pos_out_reg = src.reg;
576 break;
577 case VARYING_SLOT_PSIZ:
578 c->variant->vs_pointsize_out_reg = src.reg;
579 break;
580 default:
581 sf->reg[sf->num_reg].reg = src.reg;
582 sf->reg[sf->num_reg].slot = var->data.location;
583 sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
584 sf->num_reg++;
585 break;
586 }
587 }
588
589 static void
590 etna_emit_load_ubo(struct etna_compile *c, struct etna_inst_dst dst,
591 struct etna_inst_src src, struct etna_inst_src base)
592 {
593 /* convert float offset back to integer */
594 if (c->specs->halti < 2) {
595 emit_inst(c, &(struct etna_inst) {
596 .opcode = INST_OPCODE_F2I,
597 .type = INST_TYPE_U32,
598 .dst = dst,
599 .src[0] = src,
600 });
601
602 emit_inst(c, &(struct etna_inst) {
603 .opcode = INST_OPCODE_LOAD,
604 .type = INST_TYPE_U32,
605 .dst = dst,
606 .src[0] = {
607 .use = 1,
608 .rgroup = INST_RGROUP_TEMP,
609 .reg = dst.reg,
610 .swiz = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1)
611 },
612 .src[1] = base,
613 });
614
615 return;
616 }
617
618 emit_inst(c, &(struct etna_inst) {
619 .opcode = INST_OPCODE_LOAD,
620 .type = INST_TYPE_U32,
621 .dst = dst,
622 .src[0] = src,
623 .src[1] = base,
624 });
625 }
626
627 #define OPT(nir, pass, ...) ({ \
628 bool this_progress = false; \
629 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
630 this_progress; \
631 })
632 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
633
634 static void
635 etna_optimize_loop(nir_shader *s)
636 {
637 bool progress;
638 do {
639 progress = false;
640
641 OPT_V(s, nir_lower_vars_to_ssa);
642 progress |= OPT(s, nir_opt_copy_prop_vars);
643 progress |= OPT(s, nir_copy_prop);
644 progress |= OPT(s, nir_opt_dce);
645 progress |= OPT(s, nir_opt_cse);
646 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
647 progress |= OPT(s, nir_opt_intrinsics);
648 progress |= OPT(s, nir_opt_algebraic);
649 progress |= OPT(s, nir_opt_constant_folding);
650 progress |= OPT(s, nir_opt_dead_cf);
651 if (OPT(s, nir_opt_trivial_continues)) {
652 progress = true;
653 /* If nir_opt_trivial_continues makes progress, then we need to clean
654 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
655 * to make progress.
656 */
657 OPT(s, nir_copy_prop);
658 OPT(s, nir_opt_dce);
659 }
660 progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
661 progress |= OPT(s, nir_opt_if, false);
662 progress |= OPT(s, nir_opt_remove_phis);
663 progress |= OPT(s, nir_opt_undef);
664 }
665 while (progress);
666 }
667
668 static int
669 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
670 {
671 return glsl_count_attribute_slots(type, false);
672 }
673
674 static void
675 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
676 {
677 struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
678
679 uinfo->imm_count = count * 4;
680 uinfo->imm_data = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_data));
681 uinfo->imm_contents = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_contents));
682
683 for (unsigned i = 0; i < uinfo->imm_count; i++) {
684 uinfo->imm_data[i] = consts[i];
685 uinfo->imm_contents[i] = consts[i] >> 32;
686 }
687
688 etna_set_shader_uniforms_dirty_flags(sobj);
689 }
690
691 #include "etnaviv_compiler_nir_emit.h"
692
693 bool
694 etna_compile_shader_nir(struct etna_shader_variant *v)
695 {
696 if (unlikely(!v))
697 return false;
698
699 struct etna_compile *c = CALLOC_STRUCT(etna_compile);
700 if (!c)
701 return false;
702
703 c->variant = v;
704 c->specs = v->shader->specs;
705 c->nir = nir_shader_clone(NULL, v->shader->nir);
706
707 nir_shader *s = c->nir;
708 const struct etna_specs *specs = c->specs;
709
710 v->stage = s->info.stage;
711 v->num_loops = 0; /* TODO */
712 v->vs_id_in_reg = -1;
713 v->vs_pos_out_reg = -1;
714 v->vs_pointsize_out_reg = -1;
715 v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
716 v->ps_depth_out_reg = -1;
717
718 /* setup input linking */
719 struct etna_shader_io_file *sf = &v->infile;
720 if (s->info.stage == MESA_SHADER_VERTEX) {
721 nir_foreach_variable(var, &s->inputs) {
722 unsigned idx = var->data.driver_location;
723 sf->reg[idx].reg = idx;
724 sf->reg[idx].slot = var->data.location;
725 sf->reg[idx].num_components = glsl_get_components(var->type);
726 sf->num_reg = MAX2(sf->num_reg, idx+1);
727 }
728 } else {
729 unsigned count = 0;
730 nir_foreach_variable(var, &s->inputs) {
731 unsigned idx = var->data.driver_location;
732 sf->reg[idx].reg = idx + 1;
733 sf->reg[idx].slot = var->data.location;
734 sf->reg[idx].num_components = glsl_get_components(var->type);
735 sf->num_reg = MAX2(sf->num_reg, idx+1);
736 count++;
737 }
738 assert(sf->num_reg == count);
739 }
740
741 NIR_PASS_V(s, nir_lower_io, ~nir_var_shader_out, etna_glsl_type_size,
742 (nir_lower_io_options)0);
743
744 OPT_V(s, nir_lower_regs_to_ssa);
745 OPT_V(s, nir_lower_vars_to_ssa);
746 OPT_V(s, nir_lower_indirect_derefs, nir_var_all);
747 OPT_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
748 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
749
750 etna_optimize_loop(s);
751
752 OPT_V(s, etna_lower_io, v);
753
754 /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
755 if (c->specs->halti < 2) {
756 /* use opt_algebraic between int_to_float and boot_to_float because
757 * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
758 */
759 OPT_V(s, nir_lower_int_to_float);
760 OPT_V(s, nir_opt_algebraic);
761 OPT_V(s, nir_lower_bool_to_float);
762 } else {
763 OPT_V(s, nir_lower_idiv);
764 OPT_V(s, nir_lower_bool_to_int32);
765 }
766
767 etna_optimize_loop(s);
768
769 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
770 nir_print_shader(s, stdout);
771
772 while( OPT(s, nir_opt_vectorize) );
773 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
774
775 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
776 NIR_PASS_V(s, nir_opt_algebraic_late);
777
778 NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
779 NIR_PASS_V(s, nir_copy_prop);
780 /* only HW supported integer source mod is ineg for iadd instruction (?) */
781 NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
782 /* need copy prop after uses_to_dest, and before src mods: see
783 * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
784 */
785
786 NIR_PASS_V(s, nir_opt_dce);
787
788 NIR_PASS_V(s, etna_lower_alu, c);
789
790 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
791 nir_print_shader(s, stdout);
792
793 uint64_t consts[ETNA_MAX_IMM] = {};
794
795 unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
796 c->block_ptr = block_ptr;
797 struct emit_options options = {
798 .max_temps = ETNA_MAX_TEMPS,
799 .max_consts = ETNA_MAX_IMM / 4,
800 .id_reg = sf->num_reg,
801 .single_const_src = c->specs->halti < 5,
802 .etna_new_transcendentals = c->specs->has_new_transcendentals,
803 .no_integers = c->specs->halti < 2,
804 .user = c,
805 .consts = consts,
806 };
807
808 unsigned num_consts;
809 ASSERTED bool ok = emit_shader(c->nir, &options, &v->num_temps, &num_consts);
810 assert(ok);
811
812 /* empty shader, emit NOP */
813 if (!c->inst_ptr)
814 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
815
816 /* assemble instructions, fixing up labels */
817 uint32_t *code = MALLOC(c->inst_ptr * 16 + 1024);
818 for (unsigned i = 0; i < c->inst_ptr; i++) {
819 struct etna_inst *inst = &c->code[i];
820 if (inst->opcode == INST_OPCODE_BRANCH)
821 inst->imm = block_ptr[inst->imm];
822
823 inst->halti5 = specs->halti >= 5;
824 etna_assemble(&code[i * 4], inst);
825 }
826
827 v->code_size = c->inst_ptr * 4;
828 v->code = code;
829 v->needs_icache = c->inst_ptr > specs->max_instructions;
830
831 copy_uniform_state_to_shader(v, consts, num_consts);
832
833 if (s->info.stage == MESA_SHADER_FRAGMENT) {
834 v->input_count_unk8 = 31; /* XXX what is this */
835 assert(v->ps_depth_out_reg <= 0);
836 ralloc_free(c->nir);
837 FREE(c);
838 return true;
839 }
840
841 v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
842
843 /* fill in "mystery meat" load balancing value. This value determines how
844 * work is scheduled between VS and PS
845 * in the unified shader architecture. More precisely, it is determined from
846 * the number of VS outputs, as well as chip-specific
847 * vertex output buffer size, vertex cache size, and the number of shader
848 * cores.
849 *
850 * XXX this is a conservative estimate, the "optimal" value is only known for
851 * sure at link time because some
852 * outputs may be unused and thus unmapped. Then again, in the general use
853 * case with GLSL the vertex and fragment
854 * shaders are linked already before submitting to Gallium, thus all outputs
855 * are used.
856 *
857 * note: TGSI compiler counts all outputs (including position and pointsize), here
858 * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
859 * TODO: might have a problem that we don't count pointsize when it is used
860 */
861
862 int half_out = v->outfile.num_reg / 2 + 1;
863 assert(half_out);
864
865 uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
866 2 * half_out * specs->vertex_cache_size)) +
867 9) /
868 10;
869 uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
870 v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
871 VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
872 VIVS_VS_LOAD_BALANCING_C(0x3f) |
873 VIVS_VS_LOAD_BALANCING_D(0x0f);
874
875 ralloc_free(c->nir);
876 FREE(c);
877 return true;
878 }
879
880 void
881 etna_destroy_shader_nir(struct etna_shader_variant *shader)
882 {
883 assert(shader);
884
885 FREE(shader->code);
886 FREE(shader->uniforms.imm_data);
887 FREE(shader->uniforms.imm_contents);
888 FREE(shader);
889 }
890
891 extern const char *tgsi_swizzle_names[];
892 void
893 etna_dump_shader_nir(const struct etna_shader_variant *shader)
894 {
895 if (shader->stage == MESA_SHADER_VERTEX)
896 printf("VERT\n");
897 else
898 printf("FRAG\n");
899
900 etna_disasm(shader->code, shader->code_size, PRINT_RAW);
901
902 printf("num loops: %i\n", shader->num_loops);
903 printf("num temps: %i\n", shader->num_temps);
904 printf("immediates:\n");
905 for (int idx = 0; idx < shader->uniforms.imm_count; ++idx) {
906 printf(" [%i].%s = %f (0x%08x) (%d)\n",
907 idx / 4,
908 tgsi_swizzle_names[idx % 4],
909 *((float *)&shader->uniforms.imm_data[idx]),
910 shader->uniforms.imm_data[idx],
911 shader->uniforms.imm_contents[idx]);
912 }
913 printf("inputs:\n");
914 for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
915 printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
916 (shader->stage == MESA_SHADER_VERTEX) ?
917 gl_vert_attrib_name(shader->infile.reg[idx].slot) :
918 gl_varying_slot_name(shader->infile.reg[idx].slot),
919 shader->infile.reg[idx].num_components);
920 }
921 printf("outputs:\n");
922 for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
923 printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
924 (shader->stage == MESA_SHADER_VERTEX) ?
925 gl_varying_slot_name(shader->outfile.reg[idx].slot) :
926 gl_frag_result_name(shader->outfile.reg[idx].slot),
927 shader->outfile.reg[idx].num_components);
928 }
929 printf("special:\n");
930 if (shader->stage == MESA_SHADER_VERTEX) {
931 printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
932 printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
933 printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
934 } else {
935 printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
936 printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
937 }
938 printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
939 }
940
941 static const struct etna_shader_inout *
942 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
943 const struct etna_shader_inout *in)
944 {
945 for (int i = 0; i < sobj->outfile.num_reg; i++)
946 if (sobj->outfile.reg[i].slot == in->slot)
947 return &sobj->outfile.reg[i];
948
949 return NULL;
950 }
951
952 bool
953 etna_link_shader_nir(struct etna_shader_link_info *info,
954 const struct etna_shader_variant *vs,
955 const struct etna_shader_variant *fs)
956 {
957 int comp_ofs = 0;
958 /* For each fragment input we need to find the associated vertex shader
959 * output, which can be found by matching on semantic name and index. A
960 * binary search could be used because the vs outputs are sorted by their
961 * semantic index and grouped by semantic type by fill_in_vs_outputs.
962 */
963 assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
964 info->pcoord_varying_comp_ofs = -1;
965
966 for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
967 const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
968 const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
969 struct etna_varying *varying;
970 bool interpolate_always = true;
971
972 assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
973
974 if (fsio->reg > info->num_varyings)
975 info->num_varyings = fsio->reg;
976
977 varying = &info->varyings[fsio->reg - 1];
978 varying->num_components = fsio->num_components;
979
980 if (!interpolate_always) /* colors affected by flat shading */
981 varying->pa_attributes = 0x200;
982 else /* texture coord or other bypasses flat shading */
983 varying->pa_attributes = 0x2f1;
984
985 varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
986 varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
987 varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
988 varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
989
990 /* point coord is an input to the PS without matching VS output,
991 * so it gets a varying slot without being assigned a VS register.
992 */
993 if (fsio->slot == VARYING_SLOT_PNTC) {
994 varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
995 varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
996
997 info->pcoord_varying_comp_ofs = comp_ofs;
998 } else {
999 if (vsio == NULL) { /* not found -- link error */
1000 BUG("Semantic value not found in vertex shader outputs\n");
1001 return true;
1002 }
1003 varying->reg = vsio->reg;
1004 }
1005
1006 comp_ofs += varying->num_components;
1007 }
1008
1009 assert(info->num_varyings == fs->infile.num_reg);
1010
1011 return false;
1012 }