etnaviv: remove extra allocation for shader code
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir.c
1 /*
2 * Copyright (c) 2012-2019 Etnaviv Project
3 * Copyright (c) 2019 Zodiac Inflight Innovations
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jonathan Marek <jonathan@marek.ca>
26 * Wladimir J. van der Laan <laanwj@gmail.com>
27 */
28
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_asm.h"
31 #include "etnaviv_context.h"
32 #include "etnaviv_debug.h"
33 #include "etnaviv_disasm.h"
34 #include "etnaviv_uniforms.h"
35 #include "etnaviv_util.h"
36
37 #include <math.h>
38 #include "util/u_memory.h"
39 #include "util/register_allocate.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "compiler/nir/nir_worklist.h"
42
43 #include "tgsi/tgsi_strings.h"
44 #include "util/u_half.h"
45
46 struct etna_compile {
47 nir_shader *nir;
48 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
49 const struct etna_specs *specs;
50 struct etna_shader_variant *variant;
51
52 /* block # to instr index */
53 unsigned *block_ptr;
54
55 /* Code generation */
56 int inst_ptr; /* current instruction pointer */
57 struct etna_inst code[ETNA_MAX_INSTRUCTIONS * ETNA_INST_SIZE];
58
59 /* constants */
60 uint64_t consts[ETNA_MAX_IMM];
61
62 /* There was an error during compilation */
63 bool error;
64 };
65
66 #define compile_error(ctx, args...) ({ \
67 printf(args); \
68 ctx->error = true; \
69 assert(0); \
70 })
71
72 /* io related lowering
73 * run after lower_int_to_float because it adds i2f/f2i ops
74 */
75 static void
76 etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
77 {
78 nir_foreach_function(function, shader) {
79 nir_builder b;
80 nir_builder_init(&b, function->impl);
81
82 nir_foreach_block(block, function->impl) {
83 nir_foreach_instr_safe(instr, block) {
84 if (instr->type == nir_instr_type_intrinsic) {
85 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
86
87 switch (intr->intrinsic) {
88 case nir_intrinsic_load_front_face: {
89 /* HW front_face is 0.0/1.0, not 0/~0u for bool
90 * lower with a comparison with 0
91 */
92 intr->dest.ssa.bit_size = 32;
93
94 b.cursor = nir_after_instr(instr);
95
96 nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0));
97 if (v->key.front_ccw)
98 nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
99
100 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
101 nir_src_for_ssa(ssa),
102 ssa->parent_instr);
103 } break;
104 case nir_intrinsic_store_deref: {
105 if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
106 break;
107
108 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
109 assert(deref->deref_type == nir_deref_type_var);
110
111 if (deref->var->data.location != FRAG_RESULT_COLOR &&
112 deref->var->data.location != FRAG_RESULT_DATA0)
113 break;
114
115 b.cursor = nir_before_instr(instr);
116
117 nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
118 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
119 alu->src[0].swizzle[0] = 2;
120 alu->src[0].swizzle[2] = 0;
121 nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
122 } break;
123 case nir_intrinsic_load_uniform: {
124 /* multiply by 16 and convert to int */
125 b.cursor = nir_before_instr(instr);
126 nir_ssa_def *ssa = nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16));
127 nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(ssa));
128 } break;
129 default:
130 break;
131 }
132 }
133
134 if (instr->type != nir_instr_type_tex)
135 continue;
136
137 nir_tex_instr *tex = nir_instr_as_tex(instr);
138 nir_src *coord = NULL;
139 nir_src *lod_bias = NULL;
140 unsigned lod_bias_idx;
141
142 assert(tex->sampler_index == tex->texture_index);
143
144 for (unsigned i = 0; i < tex->num_srcs; i++) {
145 switch (tex->src[i].src_type) {
146 case nir_tex_src_coord:
147 coord = &tex->src[i].src;
148 break;
149 case nir_tex_src_bias:
150 case nir_tex_src_lod:
151 assert(!lod_bias);
152 lod_bias = &tex->src[i].src;
153 lod_bias_idx = i;
154 break;
155 default:
156 assert(0);
157 break;
158 }
159 }
160
161 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
162 /* use a dummy load_uniform here to represent texcoord scale */
163 b.cursor = nir_before_instr(instr);
164 nir_intrinsic_instr *load =
165 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_uniform);
166 nir_intrinsic_set_base(load, ~tex->sampler_index);
167 load->num_components = 2;
168 load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
169 nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
170 nir_intrinsic_set_type(load, nir_type_float);
171
172 nir_builder_instr_insert(&b, &load->instr);
173
174 nir_ssa_def *new_coord = nir_fmul(&b, coord->ssa, &load->dest.ssa);
175 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(new_coord));
176 }
177
178 /* pre HALTI5 needs texture sources in a single source */
179
180 if (!lod_bias || v->shader->specs->halti >= 5)
181 continue;
182
183 assert(coord && lod_bias && tex->coord_components < 4);
184
185 nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
186 for (unsigned i = 0; i < tex->coord_components; i++) {
187 vec->src[i].src = nir_src_for_ssa(coord->ssa);
188 vec->src[i].swizzle[0] = i;
189 }
190 for (unsigned i = tex->coord_components; i < 4; i++)
191 vec->src[i].src = nir_src_for_ssa(lod_bias->ssa);
192
193 vec->dest.write_mask = 0xf;
194 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
195
196 nir_tex_instr_remove_src(tex, lod_bias_idx);
197 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
198 tex->coord_components = 4;
199
200 nir_instr_insert_before(&tex->instr, &vec->instr);
201 }
202 }
203 }
204 }
205
206 static bool
207 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
208 {
209 const struct etna_specs *specs = data;
210
211 if (instr->type != nir_instr_type_alu)
212 return false;
213
214 nir_alu_instr *alu = nir_instr_as_alu(instr);
215 switch (alu->op) {
216 case nir_op_frsq:
217 case nir_op_frcp:
218 case nir_op_flog2:
219 case nir_op_fexp2:
220 case nir_op_fsqrt:
221 case nir_op_fcos:
222 case nir_op_fsin:
223 case nir_op_fdiv:
224 case nir_op_imul:
225 return true;
226 /* TODO: can do better than alu_to_scalar for vector compares */
227 case nir_op_b32all_fequal2:
228 case nir_op_b32all_fequal3:
229 case nir_op_b32all_fequal4:
230 case nir_op_b32any_fnequal2:
231 case nir_op_b32any_fnequal3:
232 case nir_op_b32any_fnequal4:
233 case nir_op_b32all_iequal2:
234 case nir_op_b32all_iequal3:
235 case nir_op_b32all_iequal4:
236 case nir_op_b32any_inequal2:
237 case nir_op_b32any_inequal3:
238 case nir_op_b32any_inequal4:
239 return true;
240 case nir_op_fdot2:
241 if (!specs->has_halti2_instructions)
242 return true;
243 break;
244 default:
245 break;
246 }
247
248 return false;
249 }
250
251 static void
252 etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
253 {
254 nir_shader *shader = impl->function->shader;
255
256 nir_builder b;
257 nir_builder_init(&b, impl);
258
259 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
260 nir_foreach_block(block, impl) {
261 nir_foreach_instr_safe(instr, block) {
262 if (instr->type != nir_instr_type_alu)
263 continue;
264
265 nir_alu_instr *alu = nir_instr_as_alu(instr);
266 /* multiply sin/cos src by constant
267 * TODO: do this earlier (but it breaks const_prop opt)
268 */
269 if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
270 b.cursor = nir_before_instr(instr);
271
272 nir_ssa_def *imm = c->specs->has_new_transcendentals ?
273 nir_imm_float(&b, 1.0 / M_PI) :
274 nir_imm_float(&b, 2.0 / M_PI);
275
276 nir_instr_rewrite_src(instr, &alu->src[0].src,
277 nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
278 }
279
280 /* change transcendental ops to vec2 and insert vec1 mul for the result
281 * TODO: do this earlier (but it breaks with optimizations)
282 */
283 if (c->specs->has_new_transcendentals && (
284 alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
285 alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
286 nir_ssa_def *ssa = &alu->dest.dest.ssa;
287
288 assert(ssa->num_components == 1);
289
290 nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
291 mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
292 mul->src[1].swizzle[0] = 1;
293
294 mul->dest.write_mask = 1;
295 nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
296
297 ssa->num_components = 2;
298
299 mul->dest.saturate = alu->dest.saturate;
300 alu->dest.saturate = 0;
301
302 nir_instr_insert_after(instr, &mul->instr);
303
304 nir_ssa_def_rewrite_uses_after(ssa, nir_src_for_ssa(&mul->dest.dest.ssa), &mul->instr);
305 }
306 }
307 }
308 }
309
310 static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
311 {
312 nir_foreach_function(function, shader) {
313 if (function->impl)
314 etna_lower_alu_impl(function->impl, c);
315 }
316 }
317
318 static void
319 emit_inst(struct etna_compile *c, struct etna_inst *inst)
320 {
321 c->code[c->inst_ptr++] = *inst;
322 }
323
324 /* to map nir srcs should to etna_inst srcs */
325 enum {
326 SRC_0_1_2 = (0 << 0) | (1 << 2) | (2 << 4),
327 SRC_0_1_X = (0 << 0) | (1 << 2) | (3 << 4),
328 SRC_0_X_X = (0 << 0) | (3 << 2) | (3 << 4),
329 SRC_0_X_1 = (0 << 0) | (3 << 2) | (1 << 4),
330 SRC_0_1_0 = (0 << 0) | (1 << 2) | (0 << 4),
331 SRC_X_X_0 = (3 << 0) | (3 << 2) | (0 << 4),
332 SRC_0_X_0 = (0 << 0) | (3 << 2) | (0 << 4),
333 };
334
335 /* info to translate a nir op to etna_inst */
336 struct etna_op_info {
337 uint8_t opcode; /* INST_OPCODE_ */
338 uint8_t src; /* SRC_ enum */
339 uint8_t cond; /* INST_CONDITION_ */
340 uint8_t type; /* INST_TYPE_ */
341 };
342
343 static const struct etna_op_info etna_ops[] = {
344 [0 ... nir_num_opcodes - 1] = {0xff},
345 #undef TRUE
346 #undef FALSE
347 #define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
348 INST_OPCODE_##op, \
349 SRC_##src, \
350 INST_CONDITION_##cond, \
351 INST_TYPE_##type \
352 }
353 #define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
354 #define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
355 #define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
356 #define OP(nir, op, src) OPC(nir, op, src, TRUE)
357 #define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
358 #define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
359 OP(mov, MOV, X_X_0), OP(fneg, MOV, X_X_0), OP(fabs, MOV, X_X_0), OP(fsat, MOV, X_X_0),
360 OP(fmul, MUL, 0_1_X), OP(fadd, ADD, 0_X_1), OP(ffma, MAD, 0_1_2),
361 OP(fdot2, DP2, 0_1_X), OP(fdot3, DP3, 0_1_X), OP(fdot4, DP4, 0_1_X),
362 OPC(fmin, SELECT, 0_1_0, GT), OPC(fmax, SELECT, 0_1_0, LT),
363 OP(ffract, FRC, X_X_0), OP(frcp, RCP, X_X_0), OP(frsq, RSQ, X_X_0),
364 OP(fsqrt, SQRT, X_X_0), OP(fsin, SIN, X_X_0), OP(fcos, COS, X_X_0),
365 OP(fsign, SIGN, X_X_0), OP(ffloor, FLOOR, X_X_0), OP(fceil, CEIL, X_X_0),
366 OP(flog2, LOG, X_X_0), OP(fexp2, EXP, X_X_0),
367 OPC(seq, SET, 0_1_X, EQ), OPC(sne, SET, 0_1_X, NE), OPC(sge, SET, 0_1_X, GE), OPC(slt, SET, 0_1_X, LT),
368 OPC(fcsel, SELECT, 0_1_2, NZ),
369 OP(fdiv, DIV, 0_1_X),
370 OP(fddx, DSX, 0_X_0), OP(fddy, DSY, 0_X_0),
371
372 /* type convert */
373 IOP(i2f32, I2F, 0_X_X),
374 UOP(u2f32, I2F, 0_X_X),
375 IOP(f2i32, F2I, 0_X_X),
376 UOP(f2u32, F2I, 0_X_X),
377 UOP(b2f32, AND, 0_X_X), /* AND with fui(1.0f) */
378 UOP(b2i32, AND, 0_X_X), /* AND with 1 */
379 OPC(f2b32, CMP, 0_X_X, NE), /* != 0.0 */
380 UOPC(i2b32, CMP, 0_X_X, NE), /* != 0 */
381
382 /* arithmetic */
383 IOP(iadd, ADD, 0_X_1),
384 IOP(imul, IMULLO0, 0_1_X),
385 /* IOP(imad, IMADLO0, 0_1_2), */
386 IOP(ineg, ADD, X_X_0), /* ADD 0, -x */
387 IOP(iabs, IABS, X_X_0),
388 IOP(isign, SIGN, X_X_0),
389 IOPC(imin, SELECT, 0_1_0, GT),
390 IOPC(imax, SELECT, 0_1_0, LT),
391 UOPC(umin, SELECT, 0_1_0, GT),
392 UOPC(umax, SELECT, 0_1_0, LT),
393
394 /* select */
395 UOPC(b32csel, SELECT, 0_1_2, NZ),
396
397 /* compare with int result */
398 OPC(feq32, CMP, 0_1_X, EQ),
399 OPC(fne32, CMP, 0_1_X, NE),
400 OPC(fge32, CMP, 0_1_X, GE),
401 OPC(flt32, CMP, 0_1_X, LT),
402 IOPC(ieq32, CMP, 0_1_X, EQ),
403 IOPC(ine32, CMP, 0_1_X, NE),
404 IOPC(ige32, CMP, 0_1_X, GE),
405 IOPC(ilt32, CMP, 0_1_X, LT),
406 UOPC(uge32, CMP, 0_1_X, GE),
407 UOPC(ult32, CMP, 0_1_X, LT),
408
409 /* bit ops */
410 IOP(ior, OR, 0_X_1),
411 IOP(iand, AND, 0_X_1),
412 IOP(ixor, XOR, 0_X_1),
413 IOP(inot, NOT, X_X_0),
414 IOP(ishl, LSHIFT, 0_X_1),
415 IOP(ishr, RSHIFT, 0_X_1),
416 UOP(ushr, RSHIFT, 0_X_1),
417 };
418
419 static void
420 etna_emit_block_start(struct etna_compile *c, unsigned block)
421 {
422 c->block_ptr[block] = c->inst_ptr;
423 }
424
425 static void
426 etna_emit_alu(struct etna_compile *c, nir_op op, struct etna_inst_dst dst,
427 struct etna_inst_src src[3], bool saturate)
428 {
429 struct etna_op_info ei = etna_ops[op];
430 unsigned swiz_scalar = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1);
431
432 assert(ei.opcode != 0xff);
433
434 struct etna_inst inst = {
435 .opcode = ei.opcode,
436 .type = ei.type,
437 .cond = ei.cond,
438 .dst = dst,
439 .sat = saturate,
440 };
441
442 switch (op) {
443 case nir_op_fdiv:
444 case nir_op_flog2:
445 case nir_op_fsin:
446 case nir_op_fcos:
447 if (c->specs->has_new_transcendentals)
448 inst.tex.amode = 1;
449 /* fall through */
450 case nir_op_frsq:
451 case nir_op_frcp:
452 case nir_op_fexp2:
453 case nir_op_fsqrt:
454 case nir_op_imul:
455 /* scalar instructions we want src to be in x component */
456 src[0].swiz = inst_swiz_compose(src[0].swiz, swiz_scalar);
457 src[1].swiz = inst_swiz_compose(src[1].swiz, swiz_scalar);
458 break;
459 /* deal with instructions which don't have 1:1 mapping */
460 case nir_op_b2f32:
461 inst.src[2] = etna_immediate_float(1.0f);
462 break;
463 case nir_op_b2i32:
464 inst.src[2] = etna_immediate_int(1);
465 break;
466 case nir_op_f2b32:
467 inst.src[1] = etna_immediate_float(0.0f);
468 break;
469 case nir_op_i2b32:
470 inst.src[1] = etna_immediate_int(0);
471 break;
472 case nir_op_ineg:
473 inst.src[0] = etna_immediate_int(0);
474 src[0].neg = 1;
475 break;
476 default:
477 break;
478 }
479
480 /* set the "true" value for CMP instructions */
481 if (inst.opcode == INST_OPCODE_CMP)
482 inst.src[2] = etna_immediate_int(-1);
483
484 for (unsigned j = 0; j < 3; j++) {
485 unsigned i = ((ei.src >> j*2) & 3);
486 if (i < 3)
487 inst.src[j] = src[i];
488 }
489
490 emit_inst(c, &inst);
491 }
492
493 static void
494 etna_emit_tex(struct etna_compile *c, nir_texop op, unsigned texid, unsigned dst_swiz,
495 struct etna_inst_dst dst, struct etna_inst_src coord,
496 struct etna_inst_src lod_bias)
497 {
498 struct etna_inst inst = {
499 .dst = dst,
500 .tex.id = texid + (is_fs(c) ? 0 : c->specs->vertex_sampler_offset),
501 .tex.swiz = dst_swiz,
502 .src[0] = coord,
503 };
504
505 if (lod_bias.use)
506 inst.src[1] = lod_bias;
507
508 switch (op) {
509 case nir_texop_tex: inst.opcode = INST_OPCODE_TEXLD; break;
510 case nir_texop_txb: inst.opcode = INST_OPCODE_TEXLDB; break;
511 case nir_texop_txl: inst.opcode = INST_OPCODE_TEXLDL; break;
512 default:
513 assert(0);
514 }
515
516 emit_inst(c, &inst);
517 }
518
519 static void
520 etna_emit_jump(struct etna_compile *c, unsigned block, struct etna_inst_src condition)
521 {
522 if (!condition.use) {
523 emit_inst(c, &(struct etna_inst) {.opcode = INST_OPCODE_BRANCH, .imm = block });
524 return;
525 }
526
527 struct etna_inst inst = {
528 .opcode = INST_OPCODE_BRANCH,
529 .cond = INST_CONDITION_NOT,
530 .type = INST_TYPE_U32,
531 .src[0] = condition,
532 .imm = block,
533 };
534 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
535 emit_inst(c, &inst);
536 }
537
538 static void
539 etna_emit_discard(struct etna_compile *c, struct etna_inst_src condition)
540 {
541 if (!condition.use) {
542 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_TEXKILL });
543 return;
544 }
545
546 struct etna_inst inst = {
547 .opcode = INST_OPCODE_TEXKILL,
548 .cond = INST_CONDITION_NZ,
549 .type = (c->specs->halti < 2) ? INST_TYPE_F32 : INST_TYPE_U32,
550 .src[0] = condition,
551 };
552 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
553 emit_inst(c, &inst);
554 }
555
556 static void
557 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
558 {
559 struct etna_shader_io_file *sf = &c->variant->outfile;
560
561 if (is_fs(c)) {
562 switch (var->data.location) {
563 case FRAG_RESULT_COLOR:
564 case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
565 c->variant->ps_color_out_reg = src.reg;
566 break;
567 case FRAG_RESULT_DEPTH:
568 c->variant->ps_depth_out_reg = src.reg;
569 break;
570 default:
571 unreachable("Unsupported fs output");
572 }
573 return;
574 }
575
576 switch (var->data.location) {
577 case VARYING_SLOT_POS:
578 c->variant->vs_pos_out_reg = src.reg;
579 break;
580 case VARYING_SLOT_PSIZ:
581 c->variant->vs_pointsize_out_reg = src.reg;
582 break;
583 default:
584 sf->reg[sf->num_reg].reg = src.reg;
585 sf->reg[sf->num_reg].slot = var->data.location;
586 sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
587 sf->num_reg++;
588 break;
589 }
590 }
591
592 static void
593 etna_emit_load_ubo(struct etna_compile *c, struct etna_inst_dst dst,
594 struct etna_inst_src src, struct etna_inst_src base)
595 {
596 /* convert float offset back to integer */
597 if (c->specs->halti < 2) {
598 emit_inst(c, &(struct etna_inst) {
599 .opcode = INST_OPCODE_F2I,
600 .type = INST_TYPE_U32,
601 .dst = dst,
602 .src[0] = src,
603 });
604
605 emit_inst(c, &(struct etna_inst) {
606 .opcode = INST_OPCODE_LOAD,
607 .type = INST_TYPE_U32,
608 .dst = dst,
609 .src[0] = {
610 .use = 1,
611 .rgroup = INST_RGROUP_TEMP,
612 .reg = dst.reg,
613 .swiz = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1)
614 },
615 .src[1] = base,
616 });
617
618 return;
619 }
620
621 emit_inst(c, &(struct etna_inst) {
622 .opcode = INST_OPCODE_LOAD,
623 .type = INST_TYPE_U32,
624 .dst = dst,
625 .src[0] = src,
626 .src[1] = base,
627 });
628 }
629
630 #define OPT(nir, pass, ...) ({ \
631 bool this_progress = false; \
632 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
633 this_progress; \
634 })
635 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
636
637 static void
638 etna_optimize_loop(nir_shader *s)
639 {
640 bool progress;
641 do {
642 progress = false;
643
644 OPT_V(s, nir_lower_vars_to_ssa);
645 progress |= OPT(s, nir_opt_copy_prop_vars);
646 progress |= OPT(s, nir_copy_prop);
647 progress |= OPT(s, nir_opt_dce);
648 progress |= OPT(s, nir_opt_cse);
649 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
650 progress |= OPT(s, nir_opt_intrinsics);
651 progress |= OPT(s, nir_opt_algebraic);
652 progress |= OPT(s, nir_opt_constant_folding);
653 progress |= OPT(s, nir_opt_dead_cf);
654 if (OPT(s, nir_opt_trivial_continues)) {
655 progress = true;
656 /* If nir_opt_trivial_continues makes progress, then we need to clean
657 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
658 * to make progress.
659 */
660 OPT(s, nir_copy_prop);
661 OPT(s, nir_opt_dce);
662 }
663 progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
664 progress |= OPT(s, nir_opt_if, false);
665 progress |= OPT(s, nir_opt_remove_phis);
666 progress |= OPT(s, nir_opt_undef);
667 }
668 while (progress);
669 }
670
671 static int
672 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
673 {
674 return glsl_count_attribute_slots(type, false);
675 }
676
677 static void
678 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
679 {
680 struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
681
682 uinfo->imm_count = count * 4;
683 uinfo->imm_data = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_data));
684 uinfo->imm_contents = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_contents));
685
686 for (unsigned i = 0; i < uinfo->imm_count; i++) {
687 uinfo->imm_data[i] = consts[i];
688 uinfo->imm_contents[i] = consts[i] >> 32;
689 }
690
691 etna_set_shader_uniforms_dirty_flags(sobj);
692 }
693
694 #include "etnaviv_compiler_nir_emit.h"
695
696 bool
697 etna_compile_shader_nir(struct etna_shader_variant *v)
698 {
699 if (unlikely(!v))
700 return false;
701
702 struct etna_compile *c = CALLOC_STRUCT(etna_compile);
703 if (!c)
704 return false;
705
706 c->variant = v;
707 c->specs = v->shader->specs;
708 c->nir = nir_shader_clone(NULL, v->shader->nir);
709
710 nir_shader *s = c->nir;
711 const struct etna_specs *specs = c->specs;
712
713 v->stage = s->info.stage;
714 v->num_loops = 0; /* TODO */
715 v->vs_id_in_reg = -1;
716 v->vs_pos_out_reg = -1;
717 v->vs_pointsize_out_reg = -1;
718 v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
719 v->ps_depth_out_reg = -1;
720
721 /* setup input linking */
722 struct etna_shader_io_file *sf = &v->infile;
723 if (s->info.stage == MESA_SHADER_VERTEX) {
724 nir_foreach_variable(var, &s->inputs) {
725 unsigned idx = var->data.driver_location;
726 sf->reg[idx].reg = idx;
727 sf->reg[idx].slot = var->data.location;
728 sf->reg[idx].num_components = glsl_get_components(var->type);
729 sf->num_reg = MAX2(sf->num_reg, idx+1);
730 }
731 } else {
732 unsigned count = 0;
733 nir_foreach_variable(var, &s->inputs) {
734 unsigned idx = var->data.driver_location;
735 sf->reg[idx].reg = idx + 1;
736 sf->reg[idx].slot = var->data.location;
737 sf->reg[idx].num_components = glsl_get_components(var->type);
738 sf->num_reg = MAX2(sf->num_reg, idx+1);
739 count++;
740 }
741 assert(sf->num_reg == count);
742 }
743
744 NIR_PASS_V(s, nir_lower_io, ~nir_var_shader_out, etna_glsl_type_size,
745 (nir_lower_io_options)0);
746
747 OPT_V(s, nir_lower_regs_to_ssa);
748 OPT_V(s, nir_lower_vars_to_ssa);
749 OPT_V(s, nir_lower_indirect_derefs, nir_var_all);
750 OPT_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
751 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
752
753 etna_optimize_loop(s);
754
755 OPT_V(s, etna_lower_io, v);
756
757 /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
758 if (c->specs->halti < 2) {
759 /* use opt_algebraic between int_to_float and boot_to_float because
760 * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
761 */
762 OPT_V(s, nir_lower_int_to_float);
763 OPT_V(s, nir_opt_algebraic);
764 OPT_V(s, nir_lower_bool_to_float);
765 } else {
766 OPT_V(s, nir_lower_idiv);
767 OPT_V(s, nir_lower_bool_to_int32);
768 }
769
770 etna_optimize_loop(s);
771
772 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
773 nir_print_shader(s, stdout);
774
775 while( OPT(s, nir_opt_vectorize) );
776 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
777
778 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
779 NIR_PASS_V(s, nir_opt_algebraic_late);
780
781 NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
782 NIR_PASS_V(s, nir_copy_prop);
783 /* only HW supported integer source mod is ineg for iadd instruction (?) */
784 NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
785 /* need copy prop after uses_to_dest, and before src mods: see
786 * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
787 */
788
789 NIR_PASS_V(s, nir_opt_dce);
790
791 NIR_PASS_V(s, etna_lower_alu, c);
792
793 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
794 nir_print_shader(s, stdout);
795
796 unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
797 c->block_ptr = block_ptr;
798
799 unsigned num_consts;
800 ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
801 assert(ok);
802
803 /* empty shader, emit NOP */
804 if (!c->inst_ptr)
805 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
806
807 /* assemble instructions, fixing up labels */
808 uint32_t *code = MALLOC(c->inst_ptr * 16);
809 for (unsigned i = 0; i < c->inst_ptr; i++) {
810 struct etna_inst *inst = &c->code[i];
811 if (inst->opcode == INST_OPCODE_BRANCH)
812 inst->imm = block_ptr[inst->imm];
813
814 inst->halti5 = specs->halti >= 5;
815 etna_assemble(&code[i * 4], inst);
816 }
817
818 v->code_size = c->inst_ptr * 4;
819 v->code = code;
820 v->needs_icache = c->inst_ptr > specs->max_instructions;
821
822 copy_uniform_state_to_shader(v, c->consts, num_consts);
823
824 if (s->info.stage == MESA_SHADER_FRAGMENT) {
825 v->input_count_unk8 = 31; /* XXX what is this */
826 assert(v->ps_depth_out_reg <= 0);
827 ralloc_free(c->nir);
828 FREE(c);
829 return true;
830 }
831
832 v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
833
834 /* fill in "mystery meat" load balancing value. This value determines how
835 * work is scheduled between VS and PS
836 * in the unified shader architecture. More precisely, it is determined from
837 * the number of VS outputs, as well as chip-specific
838 * vertex output buffer size, vertex cache size, and the number of shader
839 * cores.
840 *
841 * XXX this is a conservative estimate, the "optimal" value is only known for
842 * sure at link time because some
843 * outputs may be unused and thus unmapped. Then again, in the general use
844 * case with GLSL the vertex and fragment
845 * shaders are linked already before submitting to Gallium, thus all outputs
846 * are used.
847 *
848 * note: TGSI compiler counts all outputs (including position and pointsize), here
849 * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
850 * TODO: might have a problem that we don't count pointsize when it is used
851 */
852
853 int half_out = v->outfile.num_reg / 2 + 1;
854 assert(half_out);
855
856 uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
857 2 * half_out * specs->vertex_cache_size)) +
858 9) /
859 10;
860 uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
861 v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
862 VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
863 VIVS_VS_LOAD_BALANCING_C(0x3f) |
864 VIVS_VS_LOAD_BALANCING_D(0x0f);
865
866 ralloc_free(c->nir);
867 FREE(c);
868 return true;
869 }
870
871 void
872 etna_destroy_shader_nir(struct etna_shader_variant *shader)
873 {
874 assert(shader);
875
876 FREE(shader->code);
877 FREE(shader->uniforms.imm_data);
878 FREE(shader->uniforms.imm_contents);
879 FREE(shader);
880 }
881
882 extern const char *tgsi_swizzle_names[];
883 void
884 etna_dump_shader_nir(const struct etna_shader_variant *shader)
885 {
886 if (shader->stage == MESA_SHADER_VERTEX)
887 printf("VERT\n");
888 else
889 printf("FRAG\n");
890
891 etna_disasm(shader->code, shader->code_size, PRINT_RAW);
892
893 printf("num loops: %i\n", shader->num_loops);
894 printf("num temps: %i\n", shader->num_temps);
895 printf("immediates:\n");
896 for (int idx = 0; idx < shader->uniforms.imm_count; ++idx) {
897 printf(" [%i].%s = %f (0x%08x) (%d)\n",
898 idx / 4,
899 tgsi_swizzle_names[idx % 4],
900 *((float *)&shader->uniforms.imm_data[idx]),
901 shader->uniforms.imm_data[idx],
902 shader->uniforms.imm_contents[idx]);
903 }
904 printf("inputs:\n");
905 for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
906 printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
907 (shader->stage == MESA_SHADER_VERTEX) ?
908 gl_vert_attrib_name(shader->infile.reg[idx].slot) :
909 gl_varying_slot_name(shader->infile.reg[idx].slot),
910 shader->infile.reg[idx].num_components);
911 }
912 printf("outputs:\n");
913 for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
914 printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
915 (shader->stage == MESA_SHADER_VERTEX) ?
916 gl_varying_slot_name(shader->outfile.reg[idx].slot) :
917 gl_frag_result_name(shader->outfile.reg[idx].slot),
918 shader->outfile.reg[idx].num_components);
919 }
920 printf("special:\n");
921 if (shader->stage == MESA_SHADER_VERTEX) {
922 printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
923 printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
924 printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
925 } else {
926 printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
927 printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
928 }
929 printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
930 }
931
932 static const struct etna_shader_inout *
933 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
934 const struct etna_shader_inout *in)
935 {
936 for (int i = 0; i < sobj->outfile.num_reg; i++)
937 if (sobj->outfile.reg[i].slot == in->slot)
938 return &sobj->outfile.reg[i];
939
940 return NULL;
941 }
942
943 bool
944 etna_link_shader_nir(struct etna_shader_link_info *info,
945 const struct etna_shader_variant *vs,
946 const struct etna_shader_variant *fs)
947 {
948 int comp_ofs = 0;
949 /* For each fragment input we need to find the associated vertex shader
950 * output, which can be found by matching on semantic name and index. A
951 * binary search could be used because the vs outputs are sorted by their
952 * semantic index and grouped by semantic type by fill_in_vs_outputs.
953 */
954 assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
955 info->pcoord_varying_comp_ofs = -1;
956
957 for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
958 const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
959 const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
960 struct etna_varying *varying;
961 bool interpolate_always = true;
962
963 assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
964
965 if (fsio->reg > info->num_varyings)
966 info->num_varyings = fsio->reg;
967
968 varying = &info->varyings[fsio->reg - 1];
969 varying->num_components = fsio->num_components;
970
971 if (!interpolate_always) /* colors affected by flat shading */
972 varying->pa_attributes = 0x200;
973 else /* texture coord or other bypasses flat shading */
974 varying->pa_attributes = 0x2f1;
975
976 varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
977 varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
978 varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
979 varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
980
981 /* point coord is an input to the PS without matching VS output,
982 * so it gets a varying slot without being assigned a VS register.
983 */
984 if (fsio->slot == VARYING_SLOT_PNTC) {
985 varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
986 varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
987
988 info->pcoord_varying_comp_ofs = comp_ofs;
989 } else {
990 if (vsio == NULL) { /* not found -- link error */
991 BUG("Semantic value not found in vertex shader outputs\n");
992 return true;
993 }
994 varying->reg = vsio->reg;
995 }
996
997 comp_ofs += varying->num_components;
998 }
999
1000 assert(info->num_varyings == fs->infile.num_reg);
1001
1002 return false;
1003 }