etnaviv: move etna_lower_io(..) to etnaviv_nir.c
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir.c
1 /*
2 * Copyright (c) 2012-2019 Etnaviv Project
3 * Copyright (c) 2019 Zodiac Inflight Innovations
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jonathan Marek <jonathan@marek.ca>
26 * Wladimir J. van der Laan <laanwj@gmail.com>
27 */
28
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_asm.h"
31 #include "etnaviv_context.h"
32 #include "etnaviv_debug.h"
33 #include "etnaviv_disasm.h"
34 #include "etnaviv_nir.h"
35 #include "etnaviv_uniforms.h"
36 #include "etnaviv_util.h"
37
38 #include <math.h>
39 #include "util/u_memory.h"
40 #include "util/register_allocate.h"
41 #include "compiler/nir/nir_builder.h"
42 #include "compiler/nir/nir_worklist.h"
43
44 #include "tgsi/tgsi_strings.h"
45 #include "util/u_half.h"
46
47 struct etna_compile {
48 nir_shader *nir;
49 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
50 const struct etna_specs *specs;
51 struct etna_shader_variant *variant;
52
53 /* block # to instr index */
54 unsigned *block_ptr;
55
56 /* Code generation */
57 int inst_ptr; /* current instruction pointer */
58 struct etna_inst code[ETNA_MAX_INSTRUCTIONS * ETNA_INST_SIZE];
59
60 /* constants */
61 uint64_t consts[ETNA_MAX_IMM];
62
63 /* There was an error during compilation */
64 bool error;
65 };
66
67
68
69 static bool
70 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
71 {
72 const struct etna_specs *specs = data;
73
74 if (instr->type != nir_instr_type_alu)
75 return false;
76
77 nir_alu_instr *alu = nir_instr_as_alu(instr);
78 switch (alu->op) {
79 case nir_op_frsq:
80 case nir_op_frcp:
81 case nir_op_flog2:
82 case nir_op_fexp2:
83 case nir_op_fsqrt:
84 case nir_op_fcos:
85 case nir_op_fsin:
86 case nir_op_fdiv:
87 case nir_op_imul:
88 return true;
89 /* TODO: can do better than alu_to_scalar for vector compares */
90 case nir_op_b32all_fequal2:
91 case nir_op_b32all_fequal3:
92 case nir_op_b32all_fequal4:
93 case nir_op_b32any_fnequal2:
94 case nir_op_b32any_fnequal3:
95 case nir_op_b32any_fnequal4:
96 case nir_op_b32all_iequal2:
97 case nir_op_b32all_iequal3:
98 case nir_op_b32all_iequal4:
99 case nir_op_b32any_inequal2:
100 case nir_op_b32any_inequal3:
101 case nir_op_b32any_inequal4:
102 return true;
103 case nir_op_fdot2:
104 if (!specs->has_halti2_instructions)
105 return true;
106 break;
107 default:
108 break;
109 }
110
111 return false;
112 }
113
114 static void
115 etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
116 {
117 nir_shader *shader = impl->function->shader;
118
119 nir_builder b;
120 nir_builder_init(&b, impl);
121
122 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
123 nir_foreach_block(block, impl) {
124 nir_foreach_instr_safe(instr, block) {
125 if (instr->type != nir_instr_type_alu)
126 continue;
127
128 nir_alu_instr *alu = nir_instr_as_alu(instr);
129 /* multiply sin/cos src by constant
130 * TODO: do this earlier (but it breaks const_prop opt)
131 */
132 if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
133 b.cursor = nir_before_instr(instr);
134
135 nir_ssa_def *imm = c->specs->has_new_transcendentals ?
136 nir_imm_float(&b, 1.0 / M_PI) :
137 nir_imm_float(&b, 2.0 / M_PI);
138
139 nir_instr_rewrite_src(instr, &alu->src[0].src,
140 nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
141 }
142
143 /* change transcendental ops to vec2 and insert vec1 mul for the result
144 * TODO: do this earlier (but it breaks with optimizations)
145 */
146 if (c->specs->has_new_transcendentals && (
147 alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
148 alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
149 nir_ssa_def *ssa = &alu->dest.dest.ssa;
150
151 assert(ssa->num_components == 1);
152
153 nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
154 mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
155 mul->src[1].swizzle[0] = 1;
156
157 mul->dest.write_mask = 1;
158 nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
159
160 ssa->num_components = 2;
161
162 mul->dest.saturate = alu->dest.saturate;
163 alu->dest.saturate = 0;
164
165 nir_instr_insert_after(instr, &mul->instr);
166
167 nir_ssa_def_rewrite_uses_after(ssa, nir_src_for_ssa(&mul->dest.dest.ssa), &mul->instr);
168 }
169 }
170 }
171 }
172
173 static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
174 {
175 nir_foreach_function(function, shader) {
176 if (function->impl)
177 etna_lower_alu_impl(function->impl, c);
178 }
179 }
180
181 static void
182 emit_inst(struct etna_compile *c, struct etna_inst *inst)
183 {
184 c->code[c->inst_ptr++] = *inst;
185 }
186
187 /* to map nir srcs should to etna_inst srcs */
188 enum {
189 SRC_0_1_2 = (0 << 0) | (1 << 2) | (2 << 4),
190 SRC_0_1_X = (0 << 0) | (1 << 2) | (3 << 4),
191 SRC_0_X_X = (0 << 0) | (3 << 2) | (3 << 4),
192 SRC_0_X_1 = (0 << 0) | (3 << 2) | (1 << 4),
193 SRC_0_1_0 = (0 << 0) | (1 << 2) | (0 << 4),
194 SRC_X_X_0 = (3 << 0) | (3 << 2) | (0 << 4),
195 SRC_0_X_0 = (0 << 0) | (3 << 2) | (0 << 4),
196 };
197
198 /* info to translate a nir op to etna_inst */
199 struct etna_op_info {
200 uint8_t opcode; /* INST_OPCODE_ */
201 uint8_t src; /* SRC_ enum */
202 uint8_t cond; /* INST_CONDITION_ */
203 uint8_t type; /* INST_TYPE_ */
204 };
205
206 static const struct etna_op_info etna_ops[] = {
207 [0 ... nir_num_opcodes - 1] = {0xff},
208 #undef TRUE
209 #undef FALSE
210 #define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
211 INST_OPCODE_##op, \
212 SRC_##src, \
213 INST_CONDITION_##cond, \
214 INST_TYPE_##type \
215 }
216 #define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
217 #define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
218 #define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
219 #define OP(nir, op, src) OPC(nir, op, src, TRUE)
220 #define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
221 #define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
222 OP(mov, MOV, X_X_0), OP(fneg, MOV, X_X_0), OP(fabs, MOV, X_X_0), OP(fsat, MOV, X_X_0),
223 OP(fmul, MUL, 0_1_X), OP(fadd, ADD, 0_X_1), OP(ffma, MAD, 0_1_2),
224 OP(fdot2, DP2, 0_1_X), OP(fdot3, DP3, 0_1_X), OP(fdot4, DP4, 0_1_X),
225 OPC(fmin, SELECT, 0_1_0, GT), OPC(fmax, SELECT, 0_1_0, LT),
226 OP(ffract, FRC, X_X_0), OP(frcp, RCP, X_X_0), OP(frsq, RSQ, X_X_0),
227 OP(fsqrt, SQRT, X_X_0), OP(fsin, SIN, X_X_0), OP(fcos, COS, X_X_0),
228 OP(fsign, SIGN, X_X_0), OP(ffloor, FLOOR, X_X_0), OP(fceil, CEIL, X_X_0),
229 OP(flog2, LOG, X_X_0), OP(fexp2, EXP, X_X_0),
230 OPC(seq, SET, 0_1_X, EQ), OPC(sne, SET, 0_1_X, NE), OPC(sge, SET, 0_1_X, GE), OPC(slt, SET, 0_1_X, LT),
231 OPC(fcsel, SELECT, 0_1_2, NZ),
232 OP(fdiv, DIV, 0_1_X),
233 OP(fddx, DSX, 0_X_0), OP(fddy, DSY, 0_X_0),
234
235 /* type convert */
236 IOP(i2f32, I2F, 0_X_X),
237 UOP(u2f32, I2F, 0_X_X),
238 IOP(f2i32, F2I, 0_X_X),
239 UOP(f2u32, F2I, 0_X_X),
240 UOP(b2f32, AND, 0_X_X), /* AND with fui(1.0f) */
241 UOP(b2i32, AND, 0_X_X), /* AND with 1 */
242 OPC(f2b32, CMP, 0_X_X, NE), /* != 0.0 */
243 UOPC(i2b32, CMP, 0_X_X, NE), /* != 0 */
244
245 /* arithmetic */
246 IOP(iadd, ADD, 0_X_1),
247 IOP(imul, IMULLO0, 0_1_X),
248 /* IOP(imad, IMADLO0, 0_1_2), */
249 IOP(ineg, ADD, X_X_0), /* ADD 0, -x */
250 IOP(iabs, IABS, X_X_0),
251 IOP(isign, SIGN, X_X_0),
252 IOPC(imin, SELECT, 0_1_0, GT),
253 IOPC(imax, SELECT, 0_1_0, LT),
254 UOPC(umin, SELECT, 0_1_0, GT),
255 UOPC(umax, SELECT, 0_1_0, LT),
256
257 /* select */
258 UOPC(b32csel, SELECT, 0_1_2, NZ),
259
260 /* compare with int result */
261 OPC(feq32, CMP, 0_1_X, EQ),
262 OPC(fne32, CMP, 0_1_X, NE),
263 OPC(fge32, CMP, 0_1_X, GE),
264 OPC(flt32, CMP, 0_1_X, LT),
265 IOPC(ieq32, CMP, 0_1_X, EQ),
266 IOPC(ine32, CMP, 0_1_X, NE),
267 IOPC(ige32, CMP, 0_1_X, GE),
268 IOPC(ilt32, CMP, 0_1_X, LT),
269 UOPC(uge32, CMP, 0_1_X, GE),
270 UOPC(ult32, CMP, 0_1_X, LT),
271
272 /* bit ops */
273 IOP(ior, OR, 0_X_1),
274 IOP(iand, AND, 0_X_1),
275 IOP(ixor, XOR, 0_X_1),
276 IOP(inot, NOT, X_X_0),
277 IOP(ishl, LSHIFT, 0_X_1),
278 IOP(ishr, RSHIFT, 0_X_1),
279 UOP(ushr, RSHIFT, 0_X_1),
280 };
281
282 static void
283 etna_emit_block_start(struct etna_compile *c, unsigned block)
284 {
285 c->block_ptr[block] = c->inst_ptr;
286 }
287
288 static void
289 etna_emit_alu(struct etna_compile *c, nir_op op, struct etna_inst_dst dst,
290 struct etna_inst_src src[3], bool saturate)
291 {
292 struct etna_op_info ei = etna_ops[op];
293 unsigned swiz_scalar = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1);
294
295 assert(ei.opcode != 0xff);
296
297 struct etna_inst inst = {
298 .opcode = ei.opcode,
299 .type = ei.type,
300 .cond = ei.cond,
301 .dst = dst,
302 .sat = saturate,
303 };
304
305 switch (op) {
306 case nir_op_fdiv:
307 case nir_op_flog2:
308 case nir_op_fsin:
309 case nir_op_fcos:
310 if (c->specs->has_new_transcendentals)
311 inst.tex.amode = 1;
312 /* fall through */
313 case nir_op_frsq:
314 case nir_op_frcp:
315 case nir_op_fexp2:
316 case nir_op_fsqrt:
317 case nir_op_imul:
318 /* scalar instructions we want src to be in x component */
319 src[0].swiz = inst_swiz_compose(src[0].swiz, swiz_scalar);
320 src[1].swiz = inst_swiz_compose(src[1].swiz, swiz_scalar);
321 break;
322 /* deal with instructions which don't have 1:1 mapping */
323 case nir_op_b2f32:
324 inst.src[2] = etna_immediate_float(1.0f);
325 break;
326 case nir_op_b2i32:
327 inst.src[2] = etna_immediate_int(1);
328 break;
329 case nir_op_f2b32:
330 inst.src[1] = etna_immediate_float(0.0f);
331 break;
332 case nir_op_i2b32:
333 inst.src[1] = etna_immediate_int(0);
334 break;
335 case nir_op_ineg:
336 inst.src[0] = etna_immediate_int(0);
337 src[0].neg = 1;
338 break;
339 default:
340 break;
341 }
342
343 /* set the "true" value for CMP instructions */
344 if (inst.opcode == INST_OPCODE_CMP)
345 inst.src[2] = etna_immediate_int(-1);
346
347 for (unsigned j = 0; j < 3; j++) {
348 unsigned i = ((ei.src >> j*2) & 3);
349 if (i < 3)
350 inst.src[j] = src[i];
351 }
352
353 emit_inst(c, &inst);
354 }
355
356 static void
357 etna_emit_tex(struct etna_compile *c, nir_texop op, unsigned texid, unsigned dst_swiz,
358 struct etna_inst_dst dst, struct etna_inst_src coord,
359 struct etna_inst_src lod_bias, struct etna_inst_src compare)
360 {
361 struct etna_inst inst = {
362 .dst = dst,
363 .tex.id = texid + (is_fs(c) ? 0 : c->specs->vertex_sampler_offset),
364 .tex.swiz = dst_swiz,
365 .src[0] = coord,
366 };
367
368 if (lod_bias.use)
369 inst.src[1] = lod_bias;
370
371 if (compare.use)
372 inst.src[2] = compare;
373
374 switch (op) {
375 case nir_texop_tex: inst.opcode = INST_OPCODE_TEXLD; break;
376 case nir_texop_txb: inst.opcode = INST_OPCODE_TEXLDB; break;
377 case nir_texop_txl: inst.opcode = INST_OPCODE_TEXLDL; break;
378 default:
379 assert(0);
380 }
381
382 emit_inst(c, &inst);
383 }
384
385 static void
386 etna_emit_jump(struct etna_compile *c, unsigned block, struct etna_inst_src condition)
387 {
388 if (!condition.use) {
389 emit_inst(c, &(struct etna_inst) {.opcode = INST_OPCODE_BRANCH, .imm = block });
390 return;
391 }
392
393 struct etna_inst inst = {
394 .opcode = INST_OPCODE_BRANCH,
395 .cond = INST_CONDITION_NOT,
396 .type = INST_TYPE_U32,
397 .src[0] = condition,
398 .imm = block,
399 };
400 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
401 emit_inst(c, &inst);
402 }
403
404 static void
405 etna_emit_discard(struct etna_compile *c, struct etna_inst_src condition)
406 {
407 if (!condition.use) {
408 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_TEXKILL });
409 return;
410 }
411
412 struct etna_inst inst = {
413 .opcode = INST_OPCODE_TEXKILL,
414 .cond = INST_CONDITION_NZ,
415 .type = (c->specs->halti < 2) ? INST_TYPE_F32 : INST_TYPE_U32,
416 .src[0] = condition,
417 };
418 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
419 emit_inst(c, &inst);
420 }
421
422 static void
423 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
424 {
425 struct etna_shader_io_file *sf = &c->variant->outfile;
426
427 if (is_fs(c)) {
428 switch (var->data.location) {
429 case FRAG_RESULT_COLOR:
430 case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
431 c->variant->ps_color_out_reg = src.reg;
432 break;
433 case FRAG_RESULT_DEPTH:
434 c->variant->ps_depth_out_reg = src.reg;
435 break;
436 default:
437 unreachable("Unsupported fs output");
438 }
439 return;
440 }
441
442 switch (var->data.location) {
443 case VARYING_SLOT_POS:
444 c->variant->vs_pos_out_reg = src.reg;
445 break;
446 case VARYING_SLOT_PSIZ:
447 c->variant->vs_pointsize_out_reg = src.reg;
448 break;
449 default:
450 sf->reg[sf->num_reg].reg = src.reg;
451 sf->reg[sf->num_reg].slot = var->data.location;
452 sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
453 sf->num_reg++;
454 break;
455 }
456 }
457
458 #define OPT(nir, pass, ...) ({ \
459 bool this_progress = false; \
460 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
461 this_progress; \
462 })
463 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
464
465 static void
466 etna_optimize_loop(nir_shader *s)
467 {
468 bool progress;
469 do {
470 progress = false;
471
472 OPT_V(s, nir_lower_vars_to_ssa);
473 progress |= OPT(s, nir_opt_copy_prop_vars);
474 progress |= OPT(s, nir_copy_prop);
475 progress |= OPT(s, nir_opt_dce);
476 progress |= OPT(s, nir_opt_cse);
477 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
478 progress |= OPT(s, nir_opt_intrinsics);
479 progress |= OPT(s, nir_opt_algebraic);
480 progress |= OPT(s, nir_opt_constant_folding);
481 progress |= OPT(s, nir_opt_dead_cf);
482 if (OPT(s, nir_opt_trivial_continues)) {
483 progress = true;
484 /* If nir_opt_trivial_continues makes progress, then we need to clean
485 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
486 * to make progress.
487 */
488 OPT(s, nir_copy_prop);
489 OPT(s, nir_opt_dce);
490 }
491 progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
492 progress |= OPT(s, nir_opt_if, false);
493 progress |= OPT(s, nir_opt_remove_phis);
494 progress |= OPT(s, nir_opt_undef);
495 }
496 while (progress);
497 }
498
499 static int
500 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
501 {
502 return glsl_count_attribute_slots(type, false);
503 }
504
505 static void
506 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
507 {
508 struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
509
510 uinfo->imm_count = count * 4;
511 uinfo->imm_data = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_data));
512 uinfo->imm_contents = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_contents));
513
514 for (unsigned i = 0; i < uinfo->imm_count; i++) {
515 uinfo->imm_data[i] = consts[i];
516 uinfo->imm_contents[i] = consts[i] >> 32;
517 }
518
519 etna_set_shader_uniforms_dirty_flags(sobj);
520 }
521
522 #include "etnaviv_compiler_nir_emit.h"
523
524 static bool
525 etna_compile_check_limits(struct etna_shader_variant *v)
526 {
527 const struct etna_specs *specs = v->shader->specs;
528 int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
529 ? specs->max_vs_uniforms
530 : specs->max_ps_uniforms;
531
532 if (!specs->has_icache && v->needs_icache) {
533 DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
534 specs->max_instructions);
535 return false;
536 }
537
538 if (v->num_temps > specs->max_registers) {
539 DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
540 specs->max_registers);
541 return false;
542 }
543
544 if (v->uniforms.imm_count / 4 > max_uniforms) {
545 DBG("Number of uniforms (%d) exceeds maximum %d",
546 v->uniforms.imm_count / 4, max_uniforms);
547 return false;
548 }
549
550 return true;
551 }
552
553 static void
554 fill_vs_mystery(struct etna_shader_variant *v)
555 {
556 const struct etna_specs *specs = v->shader->specs;
557
558 v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
559
560 /* fill in "mystery meat" load balancing value. This value determines how
561 * work is scheduled between VS and PS
562 * in the unified shader architecture. More precisely, it is determined from
563 * the number of VS outputs, as well as chip-specific
564 * vertex output buffer size, vertex cache size, and the number of shader
565 * cores.
566 *
567 * XXX this is a conservative estimate, the "optimal" value is only known for
568 * sure at link time because some
569 * outputs may be unused and thus unmapped. Then again, in the general use
570 * case with GLSL the vertex and fragment
571 * shaders are linked already before submitting to Gallium, thus all outputs
572 * are used.
573 *
574 * note: TGSI compiler counts all outputs (including position and pointsize), here
575 * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
576 * TODO: might have a problem that we don't count pointsize when it is used
577 */
578
579 int half_out = v->outfile.num_reg / 2 + 1;
580 assert(half_out);
581
582 uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
583 2 * half_out * specs->vertex_cache_size)) +
584 9) /
585 10;
586 uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
587 v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
588 VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
589 VIVS_VS_LOAD_BALANCING_C(0x3f) |
590 VIVS_VS_LOAD_BALANCING_D(0x0f);
591 }
592
593 bool
594 etna_compile_shader_nir(struct etna_shader_variant *v)
595 {
596 if (unlikely(!v))
597 return false;
598
599 struct etna_compile *c = CALLOC_STRUCT(etna_compile);
600 if (!c)
601 return false;
602
603 c->variant = v;
604 c->specs = v->shader->specs;
605 c->nir = nir_shader_clone(NULL, v->shader->nir);
606
607 nir_shader *s = c->nir;
608 const struct etna_specs *specs = c->specs;
609
610 v->stage = s->info.stage;
611 v->num_loops = 0; /* TODO */
612 v->vs_id_in_reg = -1;
613 v->vs_pos_out_reg = -1;
614 v->vs_pointsize_out_reg = -1;
615 v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
616 v->ps_depth_out_reg = -1;
617
618 /* setup input linking */
619 struct etna_shader_io_file *sf = &v->infile;
620 if (s->info.stage == MESA_SHADER_VERTEX) {
621 nir_foreach_variable(var, &s->inputs) {
622 unsigned idx = var->data.driver_location;
623 sf->reg[idx].reg = idx;
624 sf->reg[idx].slot = var->data.location;
625 sf->reg[idx].num_components = glsl_get_components(var->type);
626 sf->num_reg = MAX2(sf->num_reg, idx+1);
627 }
628 } else {
629 unsigned count = 0;
630 nir_foreach_variable(var, &s->inputs) {
631 unsigned idx = var->data.driver_location;
632 sf->reg[idx].reg = idx + 1;
633 sf->reg[idx].slot = var->data.location;
634 sf->reg[idx].num_components = glsl_get_components(var->type);
635 sf->num_reg = MAX2(sf->num_reg, idx+1);
636 count++;
637 }
638 assert(sf->num_reg == count);
639 }
640
641 NIR_PASS_V(s, nir_lower_io, ~nir_var_shader_out, etna_glsl_type_size,
642 (nir_lower_io_options)0);
643
644 OPT_V(s, nir_lower_regs_to_ssa);
645 OPT_V(s, nir_lower_vars_to_ssa);
646 OPT_V(s, nir_lower_indirect_derefs, nir_var_all);
647 OPT_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
648 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
649
650 etna_optimize_loop(s);
651
652 OPT_V(s, etna_lower_io, v);
653
654 if (v->shader->specs->vs_need_z_div)
655 NIR_PASS_V(s, nir_lower_clip_halfz);
656
657 /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
658 if (c->specs->halti < 2) {
659 /* use opt_algebraic between int_to_float and boot_to_float because
660 * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
661 */
662 OPT_V(s, nir_lower_int_to_float);
663 OPT_V(s, nir_opt_algebraic);
664 OPT_V(s, nir_lower_bool_to_float);
665 } else {
666 OPT_V(s, nir_lower_idiv, nir_lower_idiv_fast);
667 OPT_V(s, nir_lower_bool_to_int32);
668 }
669
670 etna_optimize_loop(s);
671
672 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
673 nir_print_shader(s, stdout);
674
675 while( OPT(s, nir_opt_vectorize) );
676 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
677
678 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
679 NIR_PASS_V(s, nir_opt_algebraic_late);
680
681 NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
682 NIR_PASS_V(s, nir_copy_prop);
683 /* only HW supported integer source mod is ineg for iadd instruction (?) */
684 NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
685 /* need copy prop after uses_to_dest, and before src mods: see
686 * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
687 */
688
689 NIR_PASS_V(s, nir_opt_dce);
690
691 NIR_PASS_V(s, etna_lower_alu, c);
692
693 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
694 nir_print_shader(s, stdout);
695
696 unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
697 c->block_ptr = block_ptr;
698
699 unsigned num_consts;
700 ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
701 assert(ok);
702
703 /* empty shader, emit NOP */
704 if (!c->inst_ptr)
705 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
706
707 /* assemble instructions, fixing up labels */
708 uint32_t *code = MALLOC(c->inst_ptr * 16);
709 for (unsigned i = 0; i < c->inst_ptr; i++) {
710 struct etna_inst *inst = &c->code[i];
711 if (inst->opcode == INST_OPCODE_BRANCH)
712 inst->imm = block_ptr[inst->imm];
713
714 inst->halti5 = specs->halti >= 5;
715 etna_assemble(&code[i * 4], inst);
716 }
717
718 v->code_size = c->inst_ptr * 4;
719 v->code = code;
720 v->needs_icache = c->inst_ptr > specs->max_instructions;
721
722 copy_uniform_state_to_shader(v, c->consts, num_consts);
723
724 if (s->info.stage == MESA_SHADER_FRAGMENT) {
725 v->input_count_unk8 = 31; /* XXX what is this */
726 assert(v->ps_depth_out_reg <= 0);
727 } else {
728 fill_vs_mystery(v);
729 }
730
731 bool result = etna_compile_check_limits(v);
732 ralloc_free(c->nir);
733 FREE(c);
734 return result;
735 }
736
737 void
738 etna_destroy_shader_nir(struct etna_shader_variant *shader)
739 {
740 assert(shader);
741
742 FREE(shader->code);
743 FREE(shader->uniforms.imm_data);
744 FREE(shader->uniforms.imm_contents);
745 FREE(shader);
746 }
747
748 extern const char *tgsi_swizzle_names[];
749 void
750 etna_dump_shader_nir(const struct etna_shader_variant *shader)
751 {
752 if (shader->stage == MESA_SHADER_VERTEX)
753 printf("VERT\n");
754 else
755 printf("FRAG\n");
756
757 etna_disasm(shader->code, shader->code_size, PRINT_RAW);
758
759 printf("num loops: %i\n", shader->num_loops);
760 printf("num temps: %i\n", shader->num_temps);
761 printf("immediates:\n");
762 for (int idx = 0; idx < shader->uniforms.imm_count; ++idx) {
763 printf(" [%i].%s = %f (0x%08x) (%d)\n",
764 idx / 4,
765 tgsi_swizzle_names[idx % 4],
766 *((float *)&shader->uniforms.imm_data[idx]),
767 shader->uniforms.imm_data[idx],
768 shader->uniforms.imm_contents[idx]);
769 }
770 printf("inputs:\n");
771 for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
772 printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
773 (shader->stage == MESA_SHADER_VERTEX) ?
774 gl_vert_attrib_name(shader->infile.reg[idx].slot) :
775 gl_varying_slot_name(shader->infile.reg[idx].slot),
776 shader->infile.reg[idx].num_components);
777 }
778 printf("outputs:\n");
779 for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
780 printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
781 (shader->stage == MESA_SHADER_VERTEX) ?
782 gl_varying_slot_name(shader->outfile.reg[idx].slot) :
783 gl_frag_result_name(shader->outfile.reg[idx].slot),
784 shader->outfile.reg[idx].num_components);
785 }
786 printf("special:\n");
787 if (shader->stage == MESA_SHADER_VERTEX) {
788 printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
789 printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
790 printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
791 } else {
792 printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
793 printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
794 }
795 printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
796 }
797
798 static const struct etna_shader_inout *
799 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
800 const struct etna_shader_inout *in)
801 {
802 for (int i = 0; i < sobj->outfile.num_reg; i++)
803 if (sobj->outfile.reg[i].slot == in->slot)
804 return &sobj->outfile.reg[i];
805
806 return NULL;
807 }
808
809 bool
810 etna_link_shader_nir(struct etna_shader_link_info *info,
811 const struct etna_shader_variant *vs,
812 const struct etna_shader_variant *fs)
813 {
814 int comp_ofs = 0;
815 /* For each fragment input we need to find the associated vertex shader
816 * output, which can be found by matching on semantic name and index. A
817 * binary search could be used because the vs outputs are sorted by their
818 * semantic index and grouped by semantic type by fill_in_vs_outputs.
819 */
820 assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
821 info->pcoord_varying_comp_ofs = -1;
822
823 for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
824 const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
825 const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
826 struct etna_varying *varying;
827 bool interpolate_always = true;
828
829 assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
830
831 if (fsio->reg > info->num_varyings)
832 info->num_varyings = fsio->reg;
833
834 varying = &info->varyings[fsio->reg - 1];
835 varying->num_components = fsio->num_components;
836
837 if (!interpolate_always) /* colors affected by flat shading */
838 varying->pa_attributes = 0x200;
839 else /* texture coord or other bypasses flat shading */
840 varying->pa_attributes = 0x2f1;
841
842 varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
843 varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
844 varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
845 varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
846
847 /* point coord is an input to the PS without matching VS output,
848 * so it gets a varying slot without being assigned a VS register.
849 */
850 if (fsio->slot == VARYING_SLOT_PNTC) {
851 varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
852 varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
853
854 info->pcoord_varying_comp_ofs = comp_ofs;
855 } else {
856 if (vsio == NULL) { /* not found -- link error */
857 BUG("Semantic value not found in vertex shader outputs\n");
858 return true;
859 }
860 varying->reg = vsio->reg;
861 }
862
863 comp_ofs += varying->num_components;
864 }
865
866 assert(info->num_varyings == fs->infile.num_reg);
867
868 return false;
869 }