etnaviv: compiled_framebuffer_state: get rid of SE_SCISSOR_*
[mesa.git] / src / gallium / drivers / etnaviv / etnaviv_compiler_nir.c
1 /*
2 * Copyright (c) 2012-2019 Etnaviv Project
3 * Copyright (c) 2019 Zodiac Inflight Innovations
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jonathan Marek <jonathan@marek.ca>
26 * Wladimir J. van der Laan <laanwj@gmail.com>
27 */
28
29 #include "etnaviv_compiler.h"
30 #include "etnaviv_asm.h"
31 #include "etnaviv_context.h"
32 #include "etnaviv_debug.h"
33 #include "etnaviv_disasm.h"
34 #include "etnaviv_uniforms.h"
35 #include "etnaviv_util.h"
36
37 #include <math.h>
38 #include "util/u_memory.h"
39 #include "util/register_allocate.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "compiler/nir/nir_worklist.h"
42
43 #include "tgsi/tgsi_strings.h"
44 #include "util/u_half.h"
45
46 struct etna_compile {
47 nir_shader *nir;
48 #define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
49 const struct etna_specs *specs;
50 struct etna_shader_variant *variant;
51
52 /* block # to instr index */
53 unsigned *block_ptr;
54
55 /* Code generation */
56 int inst_ptr; /* current instruction pointer */
57 struct etna_inst code[ETNA_MAX_INSTRUCTIONS * ETNA_INST_SIZE];
58
59 /* constants */
60 uint64_t consts[ETNA_MAX_IMM];
61
62 /* There was an error during compilation */
63 bool error;
64 };
65
66 /* io related lowering
67 * run after lower_int_to_float because it adds i2f/f2i ops
68 */
69 static void
70 etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
71 {
72 nir_foreach_function(function, shader) {
73 nir_builder b;
74 nir_builder_init(&b, function->impl);
75
76 nir_foreach_block(block, function->impl) {
77 nir_foreach_instr_safe(instr, block) {
78 if (instr->type == nir_instr_type_intrinsic) {
79 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
80
81 switch (intr->intrinsic) {
82 case nir_intrinsic_load_front_face: {
83 /* HW front_face is 0.0/1.0, not 0/~0u for bool
84 * lower with a comparison with 0
85 */
86 intr->dest.ssa.bit_size = 32;
87
88 b.cursor = nir_after_instr(instr);
89
90 nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0));
91 if (v->key.front_ccw)
92 nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
93
94 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
95 nir_src_for_ssa(ssa),
96 ssa->parent_instr);
97 } break;
98 case nir_intrinsic_store_deref: {
99 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
100 if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
101 break;
102
103 assert(deref->deref_type == nir_deref_type_var);
104
105 if (deref->var->data.location != FRAG_RESULT_COLOR &&
106 deref->var->data.location != FRAG_RESULT_DATA0)
107 break;
108
109 b.cursor = nir_before_instr(instr);
110
111 nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
112 nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
113 alu->src[0].swizzle[0] = 2;
114 alu->src[0].swizzle[2] = 0;
115 nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
116 } break;
117 case nir_intrinsic_load_uniform: {
118 /* convert indirect load_uniform to load_ubo when possible
119 * this is required on HALTI5+ because address register is not implemented
120 * address register loads also arent done optimally
121 */
122 if (v->shader->specs->halti < 2 || nir_src_is_const(intr->src[0]))
123 break;
124
125 nir_intrinsic_instr *load_ubo =
126 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
127 load_ubo->num_components = intr->num_components;
128 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
129 load_ubo->num_components, 32, NULL);
130
131 b.cursor = nir_before_instr(instr);
132 load_ubo->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
133 load_ubo->src[1] = nir_src_for_ssa(nir_iadd(&b,
134 nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16)),
135 nir_imm_int(&b, nir_intrinsic_base(intr) * 16)));
136 nir_builder_instr_insert(&b, &load_ubo->instr);
137 nir_ssa_def_rewrite_uses(&intr->dest.ssa,
138 nir_src_for_ssa(&load_ubo->dest.ssa));
139 nir_instr_remove(&intr->instr);
140 } break;
141 case nir_intrinsic_load_ubo: {
142 nir_const_value *idx = nir_src_as_const_value(intr->src[0]);
143 assert(idx);
144 /* offset index by 1, index 0 is used for converted load_uniform */
145 b.cursor = nir_before_instr(instr);
146 nir_instr_rewrite_src(instr, &intr->src[0],
147 nir_src_for_ssa(nir_imm_int(&b, idx[0].u32 + 1)));
148 } break;
149 case nir_intrinsic_load_vertex_id:
150 case nir_intrinsic_load_instance_id:
151 /* detect use of vertex_id/instance_id */
152 v->vs_id_in_reg = v->infile.num_reg;
153 break;
154 default:
155 break;
156 }
157 }
158
159 if (instr->type != nir_instr_type_tex)
160 continue;
161
162 nir_tex_instr *tex = nir_instr_as_tex(instr);
163 nir_src *coord = NULL;
164 nir_src *lod_bias = NULL;
165 unsigned lod_bias_idx;
166
167 assert(tex->sampler_index == tex->texture_index);
168
169 for (unsigned i = 0; i < tex->num_srcs; i++) {
170 switch (tex->src[i].src_type) {
171 case nir_tex_src_coord:
172 coord = &tex->src[i].src;
173 break;
174 case nir_tex_src_bias:
175 case nir_tex_src_lod:
176 assert(!lod_bias);
177 lod_bias = &tex->src[i].src;
178 lod_bias_idx = i;
179 break;
180 case nir_tex_src_comparator:
181 break;
182 default:
183 assert(0);
184 break;
185 }
186 }
187
188 if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
189 /* use a dummy load_uniform here to represent texcoord scale */
190 b.cursor = nir_before_instr(instr);
191 nir_intrinsic_instr *load =
192 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_uniform);
193 nir_intrinsic_set_base(load, ~tex->sampler_index);
194 load->num_components = 2;
195 load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
196 nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
197 nir_intrinsic_set_type(load, nir_type_float);
198
199 nir_builder_instr_insert(&b, &load->instr);
200
201 nir_ssa_def *new_coord = nir_fmul(&b, coord->ssa, &load->dest.ssa);
202 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(new_coord));
203 }
204
205 /* pre HALTI5 needs texture sources in a single source */
206
207 if (!lod_bias || v->shader->specs->halti >= 5)
208 continue;
209
210 assert(coord && lod_bias && tex->coord_components < 4);
211
212 nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
213 for (unsigned i = 0; i < tex->coord_components; i++) {
214 vec->src[i].src = nir_src_for_ssa(coord->ssa);
215 vec->src[i].swizzle[0] = i;
216 }
217 for (unsigned i = tex->coord_components; i < 4; i++)
218 vec->src[i].src = nir_src_for_ssa(lod_bias->ssa);
219
220 vec->dest.write_mask = 0xf;
221 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
222
223 nir_tex_instr_remove_src(tex, lod_bias_idx);
224 nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
225 tex->coord_components = 4;
226
227 nir_instr_insert_before(&tex->instr, &vec->instr);
228 }
229 }
230 }
231 }
232
233 static bool
234 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
235 {
236 const struct etna_specs *specs = data;
237
238 if (instr->type != nir_instr_type_alu)
239 return false;
240
241 nir_alu_instr *alu = nir_instr_as_alu(instr);
242 switch (alu->op) {
243 case nir_op_frsq:
244 case nir_op_frcp:
245 case nir_op_flog2:
246 case nir_op_fexp2:
247 case nir_op_fsqrt:
248 case nir_op_fcos:
249 case nir_op_fsin:
250 case nir_op_fdiv:
251 case nir_op_imul:
252 return true;
253 /* TODO: can do better than alu_to_scalar for vector compares */
254 case nir_op_b32all_fequal2:
255 case nir_op_b32all_fequal3:
256 case nir_op_b32all_fequal4:
257 case nir_op_b32any_fnequal2:
258 case nir_op_b32any_fnequal3:
259 case nir_op_b32any_fnequal4:
260 case nir_op_b32all_iequal2:
261 case nir_op_b32all_iequal3:
262 case nir_op_b32all_iequal4:
263 case nir_op_b32any_inequal2:
264 case nir_op_b32any_inequal3:
265 case nir_op_b32any_inequal4:
266 return true;
267 case nir_op_fdot2:
268 if (!specs->has_halti2_instructions)
269 return true;
270 break;
271 default:
272 break;
273 }
274
275 return false;
276 }
277
278 static void
279 etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
280 {
281 nir_shader *shader = impl->function->shader;
282
283 nir_builder b;
284 nir_builder_init(&b, impl);
285
286 /* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
287 nir_foreach_block(block, impl) {
288 nir_foreach_instr_safe(instr, block) {
289 if (instr->type != nir_instr_type_alu)
290 continue;
291
292 nir_alu_instr *alu = nir_instr_as_alu(instr);
293 /* multiply sin/cos src by constant
294 * TODO: do this earlier (but it breaks const_prop opt)
295 */
296 if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
297 b.cursor = nir_before_instr(instr);
298
299 nir_ssa_def *imm = c->specs->has_new_transcendentals ?
300 nir_imm_float(&b, 1.0 / M_PI) :
301 nir_imm_float(&b, 2.0 / M_PI);
302
303 nir_instr_rewrite_src(instr, &alu->src[0].src,
304 nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
305 }
306
307 /* change transcendental ops to vec2 and insert vec1 mul for the result
308 * TODO: do this earlier (but it breaks with optimizations)
309 */
310 if (c->specs->has_new_transcendentals && (
311 alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
312 alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
313 nir_ssa_def *ssa = &alu->dest.dest.ssa;
314
315 assert(ssa->num_components == 1);
316
317 nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
318 mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
319 mul->src[1].swizzle[0] = 1;
320
321 mul->dest.write_mask = 1;
322 nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
323
324 ssa->num_components = 2;
325
326 mul->dest.saturate = alu->dest.saturate;
327 alu->dest.saturate = 0;
328
329 nir_instr_insert_after(instr, &mul->instr);
330
331 nir_ssa_def_rewrite_uses_after(ssa, nir_src_for_ssa(&mul->dest.dest.ssa), &mul->instr);
332 }
333 }
334 }
335 }
336
337 static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
338 {
339 nir_foreach_function(function, shader) {
340 if (function->impl)
341 etna_lower_alu_impl(function->impl, c);
342 }
343 }
344
345 static void
346 emit_inst(struct etna_compile *c, struct etna_inst *inst)
347 {
348 c->code[c->inst_ptr++] = *inst;
349 }
350
351 /* to map nir srcs should to etna_inst srcs */
352 enum {
353 SRC_0_1_2 = (0 << 0) | (1 << 2) | (2 << 4),
354 SRC_0_1_X = (0 << 0) | (1 << 2) | (3 << 4),
355 SRC_0_X_X = (0 << 0) | (3 << 2) | (3 << 4),
356 SRC_0_X_1 = (0 << 0) | (3 << 2) | (1 << 4),
357 SRC_0_1_0 = (0 << 0) | (1 << 2) | (0 << 4),
358 SRC_X_X_0 = (3 << 0) | (3 << 2) | (0 << 4),
359 SRC_0_X_0 = (0 << 0) | (3 << 2) | (0 << 4),
360 };
361
362 /* info to translate a nir op to etna_inst */
363 struct etna_op_info {
364 uint8_t opcode; /* INST_OPCODE_ */
365 uint8_t src; /* SRC_ enum */
366 uint8_t cond; /* INST_CONDITION_ */
367 uint8_t type; /* INST_TYPE_ */
368 };
369
370 static const struct etna_op_info etna_ops[] = {
371 [0 ... nir_num_opcodes - 1] = {0xff},
372 #undef TRUE
373 #undef FALSE
374 #define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
375 INST_OPCODE_##op, \
376 SRC_##src, \
377 INST_CONDITION_##cond, \
378 INST_TYPE_##type \
379 }
380 #define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
381 #define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
382 #define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
383 #define OP(nir, op, src) OPC(nir, op, src, TRUE)
384 #define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
385 #define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
386 OP(mov, MOV, X_X_0), OP(fneg, MOV, X_X_0), OP(fabs, MOV, X_X_0), OP(fsat, MOV, X_X_0),
387 OP(fmul, MUL, 0_1_X), OP(fadd, ADD, 0_X_1), OP(ffma, MAD, 0_1_2),
388 OP(fdot2, DP2, 0_1_X), OP(fdot3, DP3, 0_1_X), OP(fdot4, DP4, 0_1_X),
389 OPC(fmin, SELECT, 0_1_0, GT), OPC(fmax, SELECT, 0_1_0, LT),
390 OP(ffract, FRC, X_X_0), OP(frcp, RCP, X_X_0), OP(frsq, RSQ, X_X_0),
391 OP(fsqrt, SQRT, X_X_0), OP(fsin, SIN, X_X_0), OP(fcos, COS, X_X_0),
392 OP(fsign, SIGN, X_X_0), OP(ffloor, FLOOR, X_X_0), OP(fceil, CEIL, X_X_0),
393 OP(flog2, LOG, X_X_0), OP(fexp2, EXP, X_X_0),
394 OPC(seq, SET, 0_1_X, EQ), OPC(sne, SET, 0_1_X, NE), OPC(sge, SET, 0_1_X, GE), OPC(slt, SET, 0_1_X, LT),
395 OPC(fcsel, SELECT, 0_1_2, NZ),
396 OP(fdiv, DIV, 0_1_X),
397 OP(fddx, DSX, 0_X_0), OP(fddy, DSY, 0_X_0),
398
399 /* type convert */
400 IOP(i2f32, I2F, 0_X_X),
401 UOP(u2f32, I2F, 0_X_X),
402 IOP(f2i32, F2I, 0_X_X),
403 UOP(f2u32, F2I, 0_X_X),
404 UOP(b2f32, AND, 0_X_X), /* AND with fui(1.0f) */
405 UOP(b2i32, AND, 0_X_X), /* AND with 1 */
406 OPC(f2b32, CMP, 0_X_X, NE), /* != 0.0 */
407 UOPC(i2b32, CMP, 0_X_X, NE), /* != 0 */
408
409 /* arithmetic */
410 IOP(iadd, ADD, 0_X_1),
411 IOP(imul, IMULLO0, 0_1_X),
412 /* IOP(imad, IMADLO0, 0_1_2), */
413 IOP(ineg, ADD, X_X_0), /* ADD 0, -x */
414 IOP(iabs, IABS, X_X_0),
415 IOP(isign, SIGN, X_X_0),
416 IOPC(imin, SELECT, 0_1_0, GT),
417 IOPC(imax, SELECT, 0_1_0, LT),
418 UOPC(umin, SELECT, 0_1_0, GT),
419 UOPC(umax, SELECT, 0_1_0, LT),
420
421 /* select */
422 UOPC(b32csel, SELECT, 0_1_2, NZ),
423
424 /* compare with int result */
425 OPC(feq32, CMP, 0_1_X, EQ),
426 OPC(fne32, CMP, 0_1_X, NE),
427 OPC(fge32, CMP, 0_1_X, GE),
428 OPC(flt32, CMP, 0_1_X, LT),
429 IOPC(ieq32, CMP, 0_1_X, EQ),
430 IOPC(ine32, CMP, 0_1_X, NE),
431 IOPC(ige32, CMP, 0_1_X, GE),
432 IOPC(ilt32, CMP, 0_1_X, LT),
433 UOPC(uge32, CMP, 0_1_X, GE),
434 UOPC(ult32, CMP, 0_1_X, LT),
435
436 /* bit ops */
437 IOP(ior, OR, 0_X_1),
438 IOP(iand, AND, 0_X_1),
439 IOP(ixor, XOR, 0_X_1),
440 IOP(inot, NOT, X_X_0),
441 IOP(ishl, LSHIFT, 0_X_1),
442 IOP(ishr, RSHIFT, 0_X_1),
443 UOP(ushr, RSHIFT, 0_X_1),
444 };
445
446 static void
447 etna_emit_block_start(struct etna_compile *c, unsigned block)
448 {
449 c->block_ptr[block] = c->inst_ptr;
450 }
451
452 static void
453 etna_emit_alu(struct etna_compile *c, nir_op op, struct etna_inst_dst dst,
454 struct etna_inst_src src[3], bool saturate)
455 {
456 struct etna_op_info ei = etna_ops[op];
457 unsigned swiz_scalar = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1);
458
459 assert(ei.opcode != 0xff);
460
461 struct etna_inst inst = {
462 .opcode = ei.opcode,
463 .type = ei.type,
464 .cond = ei.cond,
465 .dst = dst,
466 .sat = saturate,
467 };
468
469 switch (op) {
470 case nir_op_fdiv:
471 case nir_op_flog2:
472 case nir_op_fsin:
473 case nir_op_fcos:
474 if (c->specs->has_new_transcendentals)
475 inst.tex.amode = 1;
476 /* fall through */
477 case nir_op_frsq:
478 case nir_op_frcp:
479 case nir_op_fexp2:
480 case nir_op_fsqrt:
481 case nir_op_imul:
482 /* scalar instructions we want src to be in x component */
483 src[0].swiz = inst_swiz_compose(src[0].swiz, swiz_scalar);
484 src[1].swiz = inst_swiz_compose(src[1].swiz, swiz_scalar);
485 break;
486 /* deal with instructions which don't have 1:1 mapping */
487 case nir_op_b2f32:
488 inst.src[2] = etna_immediate_float(1.0f);
489 break;
490 case nir_op_b2i32:
491 inst.src[2] = etna_immediate_int(1);
492 break;
493 case nir_op_f2b32:
494 inst.src[1] = etna_immediate_float(0.0f);
495 break;
496 case nir_op_i2b32:
497 inst.src[1] = etna_immediate_int(0);
498 break;
499 case nir_op_ineg:
500 inst.src[0] = etna_immediate_int(0);
501 src[0].neg = 1;
502 break;
503 default:
504 break;
505 }
506
507 /* set the "true" value for CMP instructions */
508 if (inst.opcode == INST_OPCODE_CMP)
509 inst.src[2] = etna_immediate_int(-1);
510
511 for (unsigned j = 0; j < 3; j++) {
512 unsigned i = ((ei.src >> j*2) & 3);
513 if (i < 3)
514 inst.src[j] = src[i];
515 }
516
517 emit_inst(c, &inst);
518 }
519
520 static void
521 etna_emit_tex(struct etna_compile *c, nir_texop op, unsigned texid, unsigned dst_swiz,
522 struct etna_inst_dst dst, struct etna_inst_src coord,
523 struct etna_inst_src lod_bias, struct etna_inst_src compare)
524 {
525 struct etna_inst inst = {
526 .dst = dst,
527 .tex.id = texid + (is_fs(c) ? 0 : c->specs->vertex_sampler_offset),
528 .tex.swiz = dst_swiz,
529 .src[0] = coord,
530 };
531
532 if (lod_bias.use)
533 inst.src[1] = lod_bias;
534
535 if (compare.use)
536 inst.src[2] = compare;
537
538 switch (op) {
539 case nir_texop_tex: inst.opcode = INST_OPCODE_TEXLD; break;
540 case nir_texop_txb: inst.opcode = INST_OPCODE_TEXLDB; break;
541 case nir_texop_txl: inst.opcode = INST_OPCODE_TEXLDL; break;
542 default:
543 assert(0);
544 }
545
546 emit_inst(c, &inst);
547 }
548
549 static void
550 etna_emit_jump(struct etna_compile *c, unsigned block, struct etna_inst_src condition)
551 {
552 if (!condition.use) {
553 emit_inst(c, &(struct etna_inst) {.opcode = INST_OPCODE_BRANCH, .imm = block });
554 return;
555 }
556
557 struct etna_inst inst = {
558 .opcode = INST_OPCODE_BRANCH,
559 .cond = INST_CONDITION_NOT,
560 .type = INST_TYPE_U32,
561 .src[0] = condition,
562 .imm = block,
563 };
564 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
565 emit_inst(c, &inst);
566 }
567
568 static void
569 etna_emit_discard(struct etna_compile *c, struct etna_inst_src condition)
570 {
571 if (!condition.use) {
572 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_TEXKILL });
573 return;
574 }
575
576 struct etna_inst inst = {
577 .opcode = INST_OPCODE_TEXKILL,
578 .cond = INST_CONDITION_NZ,
579 .type = (c->specs->halti < 2) ? INST_TYPE_F32 : INST_TYPE_U32,
580 .src[0] = condition,
581 };
582 inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
583 emit_inst(c, &inst);
584 }
585
586 static void
587 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
588 {
589 struct etna_shader_io_file *sf = &c->variant->outfile;
590
591 if (is_fs(c)) {
592 switch (var->data.location) {
593 case FRAG_RESULT_COLOR:
594 case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
595 c->variant->ps_color_out_reg = src.reg;
596 break;
597 case FRAG_RESULT_DEPTH:
598 c->variant->ps_depth_out_reg = src.reg;
599 break;
600 default:
601 unreachable("Unsupported fs output");
602 }
603 return;
604 }
605
606 switch (var->data.location) {
607 case VARYING_SLOT_POS:
608 c->variant->vs_pos_out_reg = src.reg;
609 break;
610 case VARYING_SLOT_PSIZ:
611 c->variant->vs_pointsize_out_reg = src.reg;
612 break;
613 default:
614 sf->reg[sf->num_reg].reg = src.reg;
615 sf->reg[sf->num_reg].slot = var->data.location;
616 sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
617 sf->num_reg++;
618 break;
619 }
620 }
621
622 #define OPT(nir, pass, ...) ({ \
623 bool this_progress = false; \
624 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
625 this_progress; \
626 })
627 #define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
628
629 static void
630 etna_optimize_loop(nir_shader *s)
631 {
632 bool progress;
633 do {
634 progress = false;
635
636 OPT_V(s, nir_lower_vars_to_ssa);
637 progress |= OPT(s, nir_opt_copy_prop_vars);
638 progress |= OPT(s, nir_copy_prop);
639 progress |= OPT(s, nir_opt_dce);
640 progress |= OPT(s, nir_opt_cse);
641 progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
642 progress |= OPT(s, nir_opt_intrinsics);
643 progress |= OPT(s, nir_opt_algebraic);
644 progress |= OPT(s, nir_opt_constant_folding);
645 progress |= OPT(s, nir_opt_dead_cf);
646 if (OPT(s, nir_opt_trivial_continues)) {
647 progress = true;
648 /* If nir_opt_trivial_continues makes progress, then we need to clean
649 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
650 * to make progress.
651 */
652 OPT(s, nir_copy_prop);
653 OPT(s, nir_opt_dce);
654 }
655 progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
656 progress |= OPT(s, nir_opt_if, false);
657 progress |= OPT(s, nir_opt_remove_phis);
658 progress |= OPT(s, nir_opt_undef);
659 }
660 while (progress);
661 }
662
663 static int
664 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
665 {
666 return glsl_count_attribute_slots(type, false);
667 }
668
669 static void
670 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
671 {
672 struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
673
674 uinfo->imm_count = count * 4;
675 uinfo->imm_data = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_data));
676 uinfo->imm_contents = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_contents));
677
678 for (unsigned i = 0; i < uinfo->imm_count; i++) {
679 uinfo->imm_data[i] = consts[i];
680 uinfo->imm_contents[i] = consts[i] >> 32;
681 }
682
683 etna_set_shader_uniforms_dirty_flags(sobj);
684 }
685
686 #include "etnaviv_compiler_nir_emit.h"
687
688 static bool
689 etna_compile_check_limits(struct etna_shader_variant *v)
690 {
691 const struct etna_specs *specs = v->shader->specs;
692 int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
693 ? specs->max_vs_uniforms
694 : specs->max_ps_uniforms;
695
696 if (!specs->has_icache && v->needs_icache) {
697 DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
698 specs->max_instructions);
699 return false;
700 }
701
702 if (v->num_temps > specs->max_registers) {
703 DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
704 specs->max_registers);
705 return false;
706 }
707
708 if (v->uniforms.imm_count / 4 > max_uniforms) {
709 DBG("Number of uniforms (%d) exceeds maximum %d",
710 v->uniforms.imm_count / 4, max_uniforms);
711 return false;
712 }
713
714 return true;
715 }
716
717 static void
718 fill_vs_mystery(struct etna_shader_variant *v)
719 {
720 const struct etna_specs *specs = v->shader->specs;
721
722 v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
723
724 /* fill in "mystery meat" load balancing value. This value determines how
725 * work is scheduled between VS and PS
726 * in the unified shader architecture. More precisely, it is determined from
727 * the number of VS outputs, as well as chip-specific
728 * vertex output buffer size, vertex cache size, and the number of shader
729 * cores.
730 *
731 * XXX this is a conservative estimate, the "optimal" value is only known for
732 * sure at link time because some
733 * outputs may be unused and thus unmapped. Then again, in the general use
734 * case with GLSL the vertex and fragment
735 * shaders are linked already before submitting to Gallium, thus all outputs
736 * are used.
737 *
738 * note: TGSI compiler counts all outputs (including position and pointsize), here
739 * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
740 * TODO: might have a problem that we don't count pointsize when it is used
741 */
742
743 int half_out = v->outfile.num_reg / 2 + 1;
744 assert(half_out);
745
746 uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
747 2 * half_out * specs->vertex_cache_size)) +
748 9) /
749 10;
750 uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
751 v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
752 VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
753 VIVS_VS_LOAD_BALANCING_C(0x3f) |
754 VIVS_VS_LOAD_BALANCING_D(0x0f);
755 }
756
757 bool
758 etna_compile_shader_nir(struct etna_shader_variant *v)
759 {
760 if (unlikely(!v))
761 return false;
762
763 struct etna_compile *c = CALLOC_STRUCT(etna_compile);
764 if (!c)
765 return false;
766
767 c->variant = v;
768 c->specs = v->shader->specs;
769 c->nir = nir_shader_clone(NULL, v->shader->nir);
770
771 nir_shader *s = c->nir;
772 const struct etna_specs *specs = c->specs;
773
774 v->stage = s->info.stage;
775 v->num_loops = 0; /* TODO */
776 v->vs_id_in_reg = -1;
777 v->vs_pos_out_reg = -1;
778 v->vs_pointsize_out_reg = -1;
779 v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
780 v->ps_depth_out_reg = -1;
781
782 /* setup input linking */
783 struct etna_shader_io_file *sf = &v->infile;
784 if (s->info.stage == MESA_SHADER_VERTEX) {
785 nir_foreach_variable(var, &s->inputs) {
786 unsigned idx = var->data.driver_location;
787 sf->reg[idx].reg = idx;
788 sf->reg[idx].slot = var->data.location;
789 sf->reg[idx].num_components = glsl_get_components(var->type);
790 sf->num_reg = MAX2(sf->num_reg, idx+1);
791 }
792 } else {
793 unsigned count = 0;
794 nir_foreach_variable(var, &s->inputs) {
795 unsigned idx = var->data.driver_location;
796 sf->reg[idx].reg = idx + 1;
797 sf->reg[idx].slot = var->data.location;
798 sf->reg[idx].num_components = glsl_get_components(var->type);
799 sf->num_reg = MAX2(sf->num_reg, idx+1);
800 count++;
801 }
802 assert(sf->num_reg == count);
803 }
804
805 NIR_PASS_V(s, nir_lower_io, ~nir_var_shader_out, etna_glsl_type_size,
806 (nir_lower_io_options)0);
807
808 OPT_V(s, nir_lower_regs_to_ssa);
809 OPT_V(s, nir_lower_vars_to_ssa);
810 OPT_V(s, nir_lower_indirect_derefs, nir_var_all);
811 OPT_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
812 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
813
814 etna_optimize_loop(s);
815
816 OPT_V(s, etna_lower_io, v);
817
818 if (v->shader->specs->vs_need_z_div)
819 NIR_PASS_V(s, nir_lower_clip_halfz);
820
821 /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
822 if (c->specs->halti < 2) {
823 /* use opt_algebraic between int_to_float and boot_to_float because
824 * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
825 */
826 OPT_V(s, nir_lower_int_to_float);
827 OPT_V(s, nir_opt_algebraic);
828 OPT_V(s, nir_lower_bool_to_float);
829 } else {
830 OPT_V(s, nir_lower_idiv, nir_lower_idiv_fast);
831 OPT_V(s, nir_lower_bool_to_int32);
832 }
833
834 etna_optimize_loop(s);
835
836 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
837 nir_print_shader(s, stdout);
838
839 while( OPT(s, nir_opt_vectorize) );
840 OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
841
842 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
843 NIR_PASS_V(s, nir_opt_algebraic_late);
844
845 NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
846 NIR_PASS_V(s, nir_copy_prop);
847 /* only HW supported integer source mod is ineg for iadd instruction (?) */
848 NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
849 /* need copy prop after uses_to_dest, and before src mods: see
850 * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
851 */
852
853 NIR_PASS_V(s, nir_opt_dce);
854
855 NIR_PASS_V(s, etna_lower_alu, c);
856
857 if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
858 nir_print_shader(s, stdout);
859
860 unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
861 c->block_ptr = block_ptr;
862
863 unsigned num_consts;
864 ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
865 assert(ok);
866
867 /* empty shader, emit NOP */
868 if (!c->inst_ptr)
869 emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
870
871 /* assemble instructions, fixing up labels */
872 uint32_t *code = MALLOC(c->inst_ptr * 16);
873 for (unsigned i = 0; i < c->inst_ptr; i++) {
874 struct etna_inst *inst = &c->code[i];
875 if (inst->opcode == INST_OPCODE_BRANCH)
876 inst->imm = block_ptr[inst->imm];
877
878 inst->halti5 = specs->halti >= 5;
879 etna_assemble(&code[i * 4], inst);
880 }
881
882 v->code_size = c->inst_ptr * 4;
883 v->code = code;
884 v->needs_icache = c->inst_ptr > specs->max_instructions;
885
886 copy_uniform_state_to_shader(v, c->consts, num_consts);
887
888 if (s->info.stage == MESA_SHADER_FRAGMENT) {
889 v->input_count_unk8 = 31; /* XXX what is this */
890 assert(v->ps_depth_out_reg <= 0);
891 } else {
892 fill_vs_mystery(v);
893 }
894
895 bool result = etna_compile_check_limits(v);
896 ralloc_free(c->nir);
897 FREE(c);
898 return result;
899 }
900
901 void
902 etna_destroy_shader_nir(struct etna_shader_variant *shader)
903 {
904 assert(shader);
905
906 FREE(shader->code);
907 FREE(shader->uniforms.imm_data);
908 FREE(shader->uniforms.imm_contents);
909 FREE(shader);
910 }
911
912 extern const char *tgsi_swizzle_names[];
913 void
914 etna_dump_shader_nir(const struct etna_shader_variant *shader)
915 {
916 if (shader->stage == MESA_SHADER_VERTEX)
917 printf("VERT\n");
918 else
919 printf("FRAG\n");
920
921 etna_disasm(shader->code, shader->code_size, PRINT_RAW);
922
923 printf("num loops: %i\n", shader->num_loops);
924 printf("num temps: %i\n", shader->num_temps);
925 printf("immediates:\n");
926 for (int idx = 0; idx < shader->uniforms.imm_count; ++idx) {
927 printf(" [%i].%s = %f (0x%08x) (%d)\n",
928 idx / 4,
929 tgsi_swizzle_names[idx % 4],
930 *((float *)&shader->uniforms.imm_data[idx]),
931 shader->uniforms.imm_data[idx],
932 shader->uniforms.imm_contents[idx]);
933 }
934 printf("inputs:\n");
935 for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
936 printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
937 (shader->stage == MESA_SHADER_VERTEX) ?
938 gl_vert_attrib_name(shader->infile.reg[idx].slot) :
939 gl_varying_slot_name(shader->infile.reg[idx].slot),
940 shader->infile.reg[idx].num_components);
941 }
942 printf("outputs:\n");
943 for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
944 printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
945 (shader->stage == MESA_SHADER_VERTEX) ?
946 gl_varying_slot_name(shader->outfile.reg[idx].slot) :
947 gl_frag_result_name(shader->outfile.reg[idx].slot),
948 shader->outfile.reg[idx].num_components);
949 }
950 printf("special:\n");
951 if (shader->stage == MESA_SHADER_VERTEX) {
952 printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
953 printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
954 printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
955 } else {
956 printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
957 printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
958 }
959 printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
960 }
961
962 static const struct etna_shader_inout *
963 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
964 const struct etna_shader_inout *in)
965 {
966 for (int i = 0; i < sobj->outfile.num_reg; i++)
967 if (sobj->outfile.reg[i].slot == in->slot)
968 return &sobj->outfile.reg[i];
969
970 return NULL;
971 }
972
973 bool
974 etna_link_shader_nir(struct etna_shader_link_info *info,
975 const struct etna_shader_variant *vs,
976 const struct etna_shader_variant *fs)
977 {
978 int comp_ofs = 0;
979 /* For each fragment input we need to find the associated vertex shader
980 * output, which can be found by matching on semantic name and index. A
981 * binary search could be used because the vs outputs are sorted by their
982 * semantic index and grouped by semantic type by fill_in_vs_outputs.
983 */
984 assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
985 info->pcoord_varying_comp_ofs = -1;
986
987 for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
988 const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
989 const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
990 struct etna_varying *varying;
991 bool interpolate_always = true;
992
993 assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
994
995 if (fsio->reg > info->num_varyings)
996 info->num_varyings = fsio->reg;
997
998 varying = &info->varyings[fsio->reg - 1];
999 varying->num_components = fsio->num_components;
1000
1001 if (!interpolate_always) /* colors affected by flat shading */
1002 varying->pa_attributes = 0x200;
1003 else /* texture coord or other bypasses flat shading */
1004 varying->pa_attributes = 0x2f1;
1005
1006 varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
1007 varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
1008 varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
1009 varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
1010
1011 /* point coord is an input to the PS without matching VS output,
1012 * so it gets a varying slot without being assigned a VS register.
1013 */
1014 if (fsio->slot == VARYING_SLOT_PNTC) {
1015 varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
1016 varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
1017
1018 info->pcoord_varying_comp_ofs = comp_ofs;
1019 } else {
1020 if (vsio == NULL) { /* not found -- link error */
1021 BUG("Semantic value not found in vertex shader outputs\n");
1022 return true;
1023 }
1024 varying->reg = vsio->reg;
1025 }
1026
1027 comp_ofs += varying->num_components;
1028 }
1029
1030 assert(info->num_varyings == fs->infile.num_reg);
1031
1032 return false;
1033 }