panfrost/midgard: Emit type appropriate ld_vary
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "main/imports.h"
37 #include "compiler/nir/nir_builder.h"
38 #include "util/half_float.h"
39 #include "util/u_math.h"
40 #include "util/u_debug.h"
41 #include "util/u_dynarray.h"
42 #include "util/list.h"
43 #include "main/mtypes.h"
44
45 #include "midgard.h"
46 #include "midgard_nir.h"
47 #include "midgard_compile.h"
48 #include "midgard_ops.h"
49 #include "helpers.h"
50 #include "compiler.h"
51
52 #include "disassemble.h"
53
54 static const struct debug_named_value debug_options[] = {
55 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
56 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
57 DEBUG_NAMED_VALUE_END
58 };
59
60 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
61
62 int midgard_debug = 0;
63
64 #define DBG(fmt, ...) \
65 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
66 fprintf(stderr, "%s:%d: "fmt, \
67 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
68
69 static bool
70 midgard_is_branch_unit(unsigned unit)
71 {
72 return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
73 }
74
75 static void
76 midgard_block_add_successor(midgard_block *block, midgard_block *successor)
77 {
78 block->successors[block->nr_successors++] = successor;
79 assert(block->nr_successors <= ARRAY_SIZE(block->successors));
80 }
81
82 /* Helpers to generate midgard_instruction's using macro magic, since every
83 * driver seems to do it that way */
84
85 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
86
87 #define M_LOAD_STORE(name, rname, uname) \
88 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
89 midgard_instruction i = { \
90 .type = TAG_LOAD_STORE_4, \
91 .ssa_args = { \
92 .rname = ssa, \
93 .uname = -1, \
94 .src1 = -1 \
95 }, \
96 .load_store = { \
97 .op = midgard_op_##name, \
98 .mask = 0xF, \
99 .swizzle = SWIZZLE_XYZW, \
100 .address = address \
101 } \
102 }; \
103 \
104 return i; \
105 }
106
107 #define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
108 #define M_STORE(name) M_LOAD_STORE(name, src0, dest)
109
110 /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
111 * the corresponding Midgard source */
112
113 static midgard_vector_alu_src
114 vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
115 {
116 if (!src) return blank_alu_src;
117
118 /* Figure out how many components there are so we can adjust the
119 * swizzle. Specifically we want to broadcast the last channel so
120 * things like ball2/3 work
121 */
122
123 if (broadcast_count) {
124 uint8_t last_component = src->swizzle[broadcast_count - 1];
125
126 for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
127 src->swizzle[c] = last_component;
128 }
129 }
130
131 midgard_vector_alu_src alu_src = {
132 .rep_low = 0,
133 .rep_high = 0,
134 .half = 0, /* TODO */
135 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
136 };
137
138 if (is_int) {
139 /* TODO: sign-extend/zero-extend */
140 alu_src.mod = midgard_int_normal;
141
142 /* These should have been lowered away */
143 assert(!(src->abs || src->negate));
144 } else {
145 alu_src.mod = (src->abs << 0) | (src->negate << 1);
146 }
147
148 return alu_src;
149 }
150
151 /* load/store instructions have both 32-bit and 16-bit variants, depending on
152 * whether we are using vectors composed of highp or mediump. At the moment, we
153 * don't support half-floats -- this requires changes in other parts of the
154 * compiler -- therefore the 16-bit versions are commented out. */
155
156 //M_LOAD(ld_attr_16);
157 M_LOAD(ld_attr_32);
158 //M_LOAD(ld_vary_16);
159 M_LOAD(ld_vary_32);
160 //M_LOAD(ld_uniform_16);
161 M_LOAD(ld_uniform_32);
162 M_LOAD(ld_color_buffer_8);
163 //M_STORE(st_vary_16);
164 M_STORE(st_vary_32);
165 M_STORE(st_cubemap_coords);
166
167 static midgard_instruction
168 v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
169 {
170 midgard_branch_cond branch = {
171 .op = op,
172 .dest_tag = tag,
173 .offset = offset,
174 .cond = cond
175 };
176
177 uint16_t compact;
178 memcpy(&compact, &branch, sizeof(branch));
179
180 midgard_instruction ins = {
181 .type = TAG_ALU_4,
182 .unit = ALU_ENAB_BR_COMPACT,
183 .prepacked_branch = true,
184 .compact_branch = true,
185 .br_compact = compact
186 };
187
188 if (op == midgard_jmp_writeout_op_writeout)
189 ins.writeout = true;
190
191 return ins;
192 }
193
194 static midgard_instruction
195 v_branch(bool conditional, bool invert)
196 {
197 midgard_instruction ins = {
198 .type = TAG_ALU_4,
199 .unit = ALU_ENAB_BRANCH,
200 .compact_branch = true,
201 .branch = {
202 .conditional = conditional,
203 .invert_conditional = invert
204 }
205 };
206
207 return ins;
208 }
209
210 static midgard_branch_extended
211 midgard_create_branch_extended( midgard_condition cond,
212 midgard_jmp_writeout_op op,
213 unsigned dest_tag,
214 signed quadword_offset)
215 {
216 /* For unclear reasons, the condition code is repeated 8 times */
217 uint16_t duplicated_cond =
218 (cond << 14) |
219 (cond << 12) |
220 (cond << 10) |
221 (cond << 8) |
222 (cond << 6) |
223 (cond << 4) |
224 (cond << 2) |
225 (cond << 0);
226
227 midgard_branch_extended branch = {
228 .op = op,
229 .dest_tag = dest_tag,
230 .offset = quadword_offset,
231 .cond = duplicated_cond
232 };
233
234 return branch;
235 }
236
237 static void
238 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
239 {
240 ins->has_constants = true;
241 memcpy(&ins->constants, constants, 16);
242 }
243
244 static int
245 glsl_type_size(const struct glsl_type *type, bool bindless)
246 {
247 return glsl_count_attribute_slots(type, false);
248 }
249
250 /* Lower fdot2 to a vector multiplication followed by channel addition */
251 static void
252 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
253 {
254 if (alu->op != nir_op_fdot2)
255 return;
256
257 b->cursor = nir_before_instr(&alu->instr);
258
259 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
260 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
261
262 nir_ssa_def *product = nir_fmul(b, src0, src1);
263
264 nir_ssa_def *sum = nir_fadd(b,
265 nir_channel(b, product, 0),
266 nir_channel(b, product, 1));
267
268 /* Replace the fdot2 with this sum */
269 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
270 }
271
272 static int
273 midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
274 {
275 switch (instr->intrinsic) {
276 case nir_intrinsic_load_viewport_scale:
277 return PAN_SYSVAL_VIEWPORT_SCALE;
278 case nir_intrinsic_load_viewport_offset:
279 return PAN_SYSVAL_VIEWPORT_OFFSET;
280 default:
281 return -1;
282 }
283 }
284
285 static unsigned
286 nir_dest_index(compiler_context *ctx, nir_dest *dst)
287 {
288 if (dst->is_ssa)
289 return dst->ssa.index;
290 else {
291 assert(!dst->reg.indirect);
292 return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
293 }
294 }
295
296 static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
297 unsigned *dest)
298 {
299 nir_intrinsic_instr *intr;
300 nir_dest *dst = NULL;
301 nir_tex_instr *tex;
302 int sysval = -1;
303
304 switch (instr->type) {
305 case nir_instr_type_intrinsic:
306 intr = nir_instr_as_intrinsic(instr);
307 sysval = midgard_nir_sysval_for_intrinsic(intr);
308 dst = &intr->dest;
309 break;
310 case nir_instr_type_tex:
311 tex = nir_instr_as_tex(instr);
312 if (tex->op != nir_texop_txs)
313 break;
314
315 sysval = PAN_SYSVAL(TEXTURE_SIZE,
316 PAN_TXS_SYSVAL_ID(tex->texture_index,
317 nir_tex_instr_dest_size(tex) -
318 (tex->is_array ? 1 : 0),
319 tex->is_array));
320 dst = &tex->dest;
321 break;
322 default:
323 break;
324 }
325
326 if (dest && dst)
327 *dest = nir_dest_index(ctx, dst);
328
329 return sysval;
330 }
331
332 static void
333 midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
334 {
335 int sysval;
336
337 sysval = sysval_for_instr(ctx, instr, NULL);
338 if (sysval < 0)
339 return;
340
341 /* We have a sysval load; check if it's already been assigned */
342
343 if (_mesa_hash_table_u64_search(ctx->sysval_to_id, sysval))
344 return;
345
346 /* It hasn't -- so assign it now! */
347
348 unsigned id = ctx->sysval_count++;
349 _mesa_hash_table_u64_insert(ctx->sysval_to_id, sysval, (void *) ((uintptr_t) id + 1));
350 ctx->sysvals[id] = sysval;
351 }
352
353 static void
354 midgard_nir_assign_sysvals(compiler_context *ctx, nir_shader *shader)
355 {
356 ctx->sysval_count = 0;
357
358 nir_foreach_function(function, shader) {
359 if (!function->impl) continue;
360
361 nir_foreach_block(block, function->impl) {
362 nir_foreach_instr_safe(instr, block) {
363 midgard_nir_assign_sysval_body(ctx, instr);
364 }
365 }
366 }
367 }
368
369 static bool
370 midgard_nir_lower_fdot2(nir_shader *shader)
371 {
372 bool progress = false;
373
374 nir_foreach_function(function, shader) {
375 if (!function->impl) continue;
376
377 nir_builder _b;
378 nir_builder *b = &_b;
379 nir_builder_init(b, function->impl);
380
381 nir_foreach_block(block, function->impl) {
382 nir_foreach_instr_safe(instr, block) {
383 if (instr->type != nir_instr_type_alu) continue;
384
385 nir_alu_instr *alu = nir_instr_as_alu(instr);
386 midgard_nir_lower_fdot2_body(b, alu);
387
388 progress |= true;
389 }
390 }
391
392 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
393
394 }
395
396 return progress;
397 }
398
399 static void
400 optimise_nir(nir_shader *nir)
401 {
402 bool progress;
403 unsigned lower_flrp =
404 (nir->options->lower_flrp16 ? 16 : 0) |
405 (nir->options->lower_flrp32 ? 32 : 0) |
406 (nir->options->lower_flrp64 ? 64 : 0);
407
408 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
409 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
410 NIR_PASS(progress, nir, nir_lower_idiv);
411
412 nir_lower_tex_options lower_tex_1st_pass_options = {
413 .lower_rect = true,
414 .lower_txp = ~0
415 };
416
417 nir_lower_tex_options lower_tex_2nd_pass_options = {
418 .lower_txs_lod = true,
419 };
420
421 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
422 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
423
424 do {
425 progress = false;
426
427 NIR_PASS(progress, nir, nir_lower_var_copies);
428 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
429
430 NIR_PASS(progress, nir, nir_copy_prop);
431 NIR_PASS(progress, nir, nir_opt_dce);
432 NIR_PASS(progress, nir, nir_opt_dead_cf);
433 NIR_PASS(progress, nir, nir_opt_cse);
434 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
435 NIR_PASS(progress, nir, nir_opt_algebraic);
436 NIR_PASS(progress, nir, nir_opt_constant_folding);
437
438 if (lower_flrp != 0) {
439 bool lower_flrp_progress = false;
440 NIR_PASS(lower_flrp_progress,
441 nir,
442 nir_lower_flrp,
443 lower_flrp,
444 false /* always_precise */,
445 nir->options->lower_ffma);
446 if (lower_flrp_progress) {
447 NIR_PASS(progress, nir,
448 nir_opt_constant_folding);
449 progress = true;
450 }
451
452 /* Nothing should rematerialize any flrps, so we only
453 * need to do this lowering once.
454 */
455 lower_flrp = 0;
456 }
457
458 NIR_PASS(progress, nir, nir_opt_undef);
459 NIR_PASS(progress, nir, nir_opt_loop_unroll,
460 nir_var_shader_in |
461 nir_var_shader_out |
462 nir_var_function_temp);
463
464 NIR_PASS(progress, nir, nir_opt_vectorize);
465 } while (progress);
466
467 /* Must be run at the end to prevent creation of fsin/fcos ops */
468 NIR_PASS(progress, nir, midgard_nir_scale_trig);
469
470 do {
471 progress = false;
472
473 NIR_PASS(progress, nir, nir_opt_dce);
474 NIR_PASS(progress, nir, nir_opt_algebraic);
475 NIR_PASS(progress, nir, nir_opt_constant_folding);
476 NIR_PASS(progress, nir, nir_copy_prop);
477 } while (progress);
478
479 NIR_PASS(progress, nir, nir_opt_algebraic_late);
480
481 /* We implement booleans as 32-bit 0/~0 */
482 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
483
484 /* Now that booleans are lowered, we can run out late opts */
485 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
486
487 /* Lower mods for float ops only. Integer ops don't support modifiers
488 * (saturate doesn't make sense on integers, neg/abs require dedicated
489 * instructions) */
490
491 NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
492 NIR_PASS(progress, nir, nir_copy_prop);
493 NIR_PASS(progress, nir, nir_opt_dce);
494
495 /* Take us out of SSA */
496 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
497 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
498
499 /* We are a vector architecture; write combine where possible */
500 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
501 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
502
503 NIR_PASS(progress, nir, nir_opt_dce);
504 }
505
506 /* Front-half of aliasing the SSA slots, merely by inserting the flag in the
507 * appropriate hash table. Intentional off-by-one to avoid confusing NULL with
508 * r0. See the comments in compiler_context */
509
510 static void
511 alias_ssa(compiler_context *ctx, int dest, int src)
512 {
513 _mesa_hash_table_u64_insert(ctx->ssa_to_alias, dest + 1, (void *) ((uintptr_t) src + 1));
514 _mesa_set_add(ctx->leftover_ssa_to_alias, (void *) (uintptr_t) (dest + 1));
515 }
516
517 /* ...or undo it, after which the original index will be used (dummy move should be emitted alongside this) */
518
519 static void
520 unalias_ssa(compiler_context *ctx, int dest)
521 {
522 _mesa_hash_table_u64_remove(ctx->ssa_to_alias, dest + 1);
523 /* TODO: Remove from leftover or no? */
524 }
525
526 /* Do not actually emit a load; instead, cache the constant for inlining */
527
528 static void
529 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
530 {
531 nir_ssa_def def = instr->def;
532
533 float *v = rzalloc_array(NULL, float, 4);
534 nir_const_load_to_arr(v, instr, f32);
535 _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
536 }
537
538 static unsigned
539 nir_src_index(compiler_context *ctx, nir_src *src)
540 {
541 if (src->is_ssa)
542 return src->ssa->index;
543 else {
544 assert(!src->reg.indirect);
545 return ctx->func->impl->ssa_alloc + src->reg.reg->index;
546 }
547 }
548
549 static unsigned
550 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
551 {
552 return nir_src_index(ctx, &src->src);
553 }
554
555 static bool
556 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
557 {
558 unsigned comp = src->swizzle[0];
559
560 for (unsigned c = 1; c < nr_components; ++c) {
561 if (src->swizzle[c] != comp)
562 return true;
563 }
564
565 return false;
566 }
567
568 /* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
569 * output of a conditional test) into that register */
570
571 static void
572 emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
573 {
574 int condition = nir_src_index(ctx, src);
575
576 /* Source to swizzle the desired component into w */
577
578 const midgard_vector_alu_src alu_src = {
579 .swizzle = SWIZZLE(component, component, component, component),
580 };
581
582 /* There is no boolean move instruction. Instead, we simulate a move by
583 * ANDing the condition with itself to get it into r31.w */
584
585 midgard_instruction ins = {
586 .type = TAG_ALU_4,
587
588 /* We need to set the conditional as close as possible */
589 .precede_break = true,
590 .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
591
592 .ssa_args = {
593 .src0 = condition,
594 .src1 = condition,
595 .dest = SSA_FIXED_REGISTER(31),
596 },
597
598 .alu = {
599 .op = midgard_alu_op_iand,
600 .outmod = midgard_outmod_int_wrap,
601 .reg_mode = midgard_reg_mode_32,
602 .dest_override = midgard_dest_override_none,
603 .mask = (0x3 << 6), /* w */
604 .src1 = vector_alu_srco_unsigned(alu_src),
605 .src2 = vector_alu_srco_unsigned(alu_src)
606 },
607 };
608
609 emit_mir_instruction(ctx, ins);
610 }
611
612 /* Or, for mixed conditions (with csel_v), here's a vector version using all of
613 * r31 instead */
614
615 static void
616 emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
617 {
618 int condition = nir_src_index(ctx, &src->src);
619
620 /* Source to swizzle the desired component into w */
621
622 const midgard_vector_alu_src alu_src = {
623 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
624 };
625
626 /* There is no boolean move instruction. Instead, we simulate a move by
627 * ANDing the condition with itself to get it into r31.w */
628
629 midgard_instruction ins = {
630 .type = TAG_ALU_4,
631 .precede_break = true,
632 .ssa_args = {
633 .src0 = condition,
634 .src1 = condition,
635 .dest = SSA_FIXED_REGISTER(31),
636 },
637 .alu = {
638 .op = midgard_alu_op_iand,
639 .outmod = midgard_outmod_int_wrap,
640 .reg_mode = midgard_reg_mode_32,
641 .dest_override = midgard_dest_override_none,
642 .mask = expand_writemask(mask_of(nr_comp)),
643 .src1 = vector_alu_srco_unsigned(alu_src),
644 .src2 = vector_alu_srco_unsigned(alu_src)
645 },
646 };
647
648 emit_mir_instruction(ctx, ins);
649 }
650
651
652
653 /* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
654 * pinning to eliminate this move in all known cases */
655
656 static void
657 emit_indirect_offset(compiler_context *ctx, nir_src *src)
658 {
659 int offset = nir_src_index(ctx, src);
660
661 midgard_instruction ins = {
662 .type = TAG_ALU_4,
663 .ssa_args = {
664 .src0 = SSA_UNUSED_1,
665 .src1 = offset,
666 .dest = SSA_FIXED_REGISTER(REGISTER_OFFSET),
667 },
668 .alu = {
669 .op = midgard_alu_op_imov,
670 .outmod = midgard_outmod_int_wrap,
671 .reg_mode = midgard_reg_mode_32,
672 .dest_override = midgard_dest_override_none,
673 .mask = (0x3 << 6), /* w */
674 .src1 = vector_alu_srco_unsigned(zero_alu_src),
675 .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
676 },
677 };
678
679 emit_mir_instruction(ctx, ins);
680 }
681
682 #define ALU_CASE(nir, _op) \
683 case nir_op_##nir: \
684 op = midgard_alu_op_##_op; \
685 break;
686
687 #define ALU_CASE_BCAST(nir, _op, count) \
688 case nir_op_##nir: \
689 op = midgard_alu_op_##_op; \
690 broadcast_swizzle = count; \
691 break;
692 static bool
693 nir_is_fzero_constant(nir_src src)
694 {
695 if (!nir_src_is_const(src))
696 return false;
697
698 for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
699 if (nir_src_comp_as_float(src, c) != 0.0)
700 return false;
701 }
702
703 return true;
704 }
705
706 static void
707 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
708 {
709 bool is_ssa = instr->dest.dest.is_ssa;
710
711 unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
712 unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
713 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
714
715 /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
716 * supported. A few do not and are commented for now. Also, there are a
717 * number of NIR ops which Midgard does not support and need to be
718 * lowered, also TODO. This switch block emits the opcode and calling
719 * convention of the Midgard instruction; actual packing is done in
720 * emit_alu below */
721
722 unsigned op;
723
724 /* Number of components valid to check for the instruction (the rest
725 * will be forced to the last), or 0 to use as-is. Relevant as
726 * ball-type instructions have a channel count in NIR but are all vec4
727 * in Midgard */
728
729 unsigned broadcast_swizzle = 0;
730
731 switch (instr->op) {
732 ALU_CASE(fadd, fadd);
733 ALU_CASE(fmul, fmul);
734 ALU_CASE(fmin, fmin);
735 ALU_CASE(fmax, fmax);
736 ALU_CASE(imin, imin);
737 ALU_CASE(imax, imax);
738 ALU_CASE(umin, umin);
739 ALU_CASE(umax, umax);
740 ALU_CASE(ffloor, ffloor);
741 ALU_CASE(fround_even, froundeven);
742 ALU_CASE(ftrunc, ftrunc);
743 ALU_CASE(fceil, fceil);
744 ALU_CASE(fdot3, fdot3);
745 ALU_CASE(fdot4, fdot4);
746 ALU_CASE(iadd, iadd);
747 ALU_CASE(isub, isub);
748 ALU_CASE(imul, imul);
749
750 /* Zero shoved as second-arg */
751 ALU_CASE(iabs, iabsdiff);
752
753 ALU_CASE(mov, imov);
754
755 ALU_CASE(feq32, feq);
756 ALU_CASE(fne32, fne);
757 ALU_CASE(flt32, flt);
758 ALU_CASE(ieq32, ieq);
759 ALU_CASE(ine32, ine);
760 ALU_CASE(ilt32, ilt);
761 ALU_CASE(ult32, ult);
762
763 /* We don't have a native b2f32 instruction. Instead, like many
764 * GPUs, we exploit booleans as 0/~0 for false/true, and
765 * correspondingly AND
766 * by 1.0 to do the type conversion. For the moment, prime us
767 * to emit:
768 *
769 * iand [whatever], #0
770 *
771 * At the end of emit_alu (as MIR), we'll fix-up the constant
772 */
773
774 ALU_CASE(b2f32, iand);
775 ALU_CASE(b2i32, iand);
776
777 /* Likewise, we don't have a dedicated f2b32 instruction, but
778 * we can do a "not equal to 0.0" test. */
779
780 ALU_CASE(f2b32, fne);
781 ALU_CASE(i2b32, ine);
782
783 ALU_CASE(frcp, frcp);
784 ALU_CASE(frsq, frsqrt);
785 ALU_CASE(fsqrt, fsqrt);
786 ALU_CASE(fexp2, fexp2);
787 ALU_CASE(flog2, flog2);
788
789 ALU_CASE(f2i32, f2i_rtz);
790 ALU_CASE(f2u32, f2u_rtz);
791 ALU_CASE(i2f32, i2f_rtz);
792 ALU_CASE(u2f32, u2f_rtz);
793
794 ALU_CASE(fsin, fsin);
795 ALU_CASE(fcos, fcos);
796
797 /* Second op implicit #0 */
798 ALU_CASE(inot, inor);
799 ALU_CASE(iand, iand);
800 ALU_CASE(ior, ior);
801 ALU_CASE(ixor, ixor);
802 ALU_CASE(ishl, ishl);
803 ALU_CASE(ishr, iasr);
804 ALU_CASE(ushr, ilsr);
805
806 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
807 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
808 ALU_CASE(b32all_fequal4, fball_eq);
809
810 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
811 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
812 ALU_CASE(b32any_fnequal4, fbany_neq);
813
814 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
815 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
816 ALU_CASE(b32all_iequal4, iball_eq);
817
818 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
819 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
820 ALU_CASE(b32any_inequal4, ibany_neq);
821
822 /* Source mods will be shoved in later */
823 ALU_CASE(fabs, fmov);
824 ALU_CASE(fneg, fmov);
825 ALU_CASE(fsat, fmov);
826
827 /* For greater-or-equal, we lower to less-or-equal and flip the
828 * arguments */
829
830 case nir_op_fge:
831 case nir_op_fge32:
832 case nir_op_ige32:
833 case nir_op_uge32: {
834 op =
835 instr->op == nir_op_fge ? midgard_alu_op_fle :
836 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
837 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
838 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
839 0;
840
841 /* Swap via temporary */
842 nir_alu_src temp = instr->src[1];
843 instr->src[1] = instr->src[0];
844 instr->src[0] = temp;
845
846 break;
847 }
848
849 case nir_op_b32csel: {
850 /* Midgard features both fcsel and icsel, depending on
851 * the type of the arguments/output. However, as long
852 * as we're careful we can _always_ use icsel and
853 * _never_ need fcsel, since the latter does additional
854 * floating-point-specific processing whereas the
855 * former just moves bits on the wire. It's not obvious
856 * why these are separate opcodes, save for the ability
857 * to do things like sat/pos/abs/neg for free */
858
859 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
860 op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
861
862 /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
863 nr_inputs = 2;
864
865 /* Emit the condition into r31 */
866
867 if (mixed)
868 emit_condition_mixed(ctx, &instr->src[0], nr_components);
869 else
870 emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
871
872 /* The condition is the first argument; move the other
873 * arguments up one to be a binary instruction for
874 * Midgard */
875
876 memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
877 break;
878 }
879
880 default:
881 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
882 assert(0);
883 return;
884 }
885
886 /* Midgard can perform certain modifiers on output of an ALU op */
887 unsigned outmod;
888
889 if (midgard_is_integer_out_op(op)) {
890 outmod = midgard_outmod_int_wrap;
891 } else {
892 bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
893 outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
894 }
895
896 /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
897
898 if (instr->op == nir_op_fmax) {
899 if (nir_is_fzero_constant(instr->src[0].src)) {
900 op = midgard_alu_op_fmov;
901 nr_inputs = 1;
902 outmod = midgard_outmod_pos;
903 instr->src[0] = instr->src[1];
904 } else if (nir_is_fzero_constant(instr->src[1].src)) {
905 op = midgard_alu_op_fmov;
906 nr_inputs = 1;
907 outmod = midgard_outmod_pos;
908 }
909 }
910
911 /* Fetch unit, quirks, etc information */
912 unsigned opcode_props = alu_opcode_props[op].props;
913 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
914
915 /* src0 will always exist afaik, but src1 will not for 1-argument
916 * instructions. The latter can only be fetched if the instruction
917 * needs it, or else we may segfault. */
918
919 unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
920 unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0;
921
922 /* Rather than use the instruction generation helpers, we do it
923 * ourselves here to avoid the mess */
924
925 midgard_instruction ins = {
926 .type = TAG_ALU_4,
927 .ssa_args = {
928 .src0 = quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
929 .src1 = quirk_flipped_r24 ? src0 : src1,
930 .dest = dest,
931 }
932 };
933
934 nir_alu_src *nirmods[2] = { NULL };
935
936 if (nr_inputs == 2) {
937 nirmods[0] = &instr->src[0];
938 nirmods[1] = &instr->src[1];
939 } else if (nr_inputs == 1) {
940 nirmods[quirk_flipped_r24] = &instr->src[0];
941 } else {
942 assert(0);
943 }
944
945 /* These were lowered to a move, so apply the corresponding mod */
946
947 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
948 nir_alu_src *s = nirmods[quirk_flipped_r24];
949
950 if (instr->op == nir_op_fneg)
951 s->negate = !s->negate;
952
953 if (instr->op == nir_op_fabs)
954 s->abs = !s->abs;
955 }
956
957 bool is_int = midgard_is_integer_op(op);
958
959 midgard_vector_alu alu = {
960 .op = op,
961 .reg_mode = midgard_reg_mode_32,
962 .dest_override = midgard_dest_override_none,
963 .outmod = outmod,
964
965 /* Writemask only valid for non-SSA NIR */
966 .mask = expand_writemask(mask_of(nr_components)),
967
968 .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
969 .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
970 };
971
972 /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
973
974 if (!is_ssa)
975 alu.mask &= expand_writemask(instr->dest.write_mask);
976
977 ins.alu = alu;
978
979 /* Late fixup for emulated instructions */
980
981 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
982 /* Presently, our second argument is an inline #0 constant.
983 * Switch over to an embedded 1.0 constant (that can't fit
984 * inline, since we're 32-bit, not 16-bit like the inline
985 * constants) */
986
987 ins.ssa_args.inline_constant = false;
988 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
989 ins.has_constants = true;
990
991 if (instr->op == nir_op_b2f32) {
992 ins.constants[0] = 1.0f;
993 } else {
994 /* Type pun it into place */
995 uint32_t one = 0x1;
996 memcpy(&ins.constants[0], &one, sizeof(uint32_t));
997 }
998
999 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
1000 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1001 /* Lots of instructions need a 0 plonked in */
1002 ins.ssa_args.inline_constant = false;
1003 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1004 ins.has_constants = true;
1005 ins.constants[0] = 0.0f;
1006 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
1007 } else if (instr->op == nir_op_inot) {
1008 /* ~b = ~(b & b), so duplicate the source */
1009 ins.ssa_args.src1 = ins.ssa_args.src0;
1010 ins.alu.src2 = ins.alu.src1;
1011 }
1012
1013 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1014 /* To avoid duplicating the lookup tables (probably), true LUT
1015 * instructions can only operate as if they were scalars. Lower
1016 * them here by changing the component. */
1017
1018 uint8_t original_swizzle[4];
1019 memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
1020
1021 for (int i = 0; i < nr_components; ++i) {
1022 /* Mask the associated component, dropping the
1023 * instruction if needed */
1024
1025 ins.alu.mask = (0x3) << (2 * i);
1026 ins.alu.mask &= alu.mask;
1027
1028 if (!ins.alu.mask)
1029 continue;
1030
1031 for (int j = 0; j < 4; ++j)
1032 nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
1033
1034 ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
1035 emit_mir_instruction(ctx, ins);
1036 }
1037 } else {
1038 emit_mir_instruction(ctx, ins);
1039 }
1040 }
1041
1042 #undef ALU_CASE
1043
1044 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1045 * optimized) versions of UBO #0 */
1046
1047 static void
1048 emit_ubo_read(
1049 compiler_context *ctx,
1050 unsigned dest,
1051 unsigned offset,
1052 nir_src *indirect_offset,
1053 unsigned index)
1054 {
1055 /* TODO: half-floats */
1056
1057 if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
1058 /* Fast path: For the first 16 uniforms, direct accesses are
1059 * 0-cycle, since they're just a register fetch in the usual
1060 * case. So, we alias the registers while we're still in
1061 * SSA-space */
1062
1063 int reg_slot = 23 - offset;
1064 alias_ssa(ctx, dest, SSA_FIXED_REGISTER(reg_slot));
1065 } else {
1066 /* Otherwise, read from the 'special' UBO to access
1067 * higher-indexed uniforms, at a performance cost. More
1068 * generally, we're emitting a UBO read instruction. */
1069
1070 midgard_instruction ins = m_ld_uniform_32(dest, offset);
1071
1072 /* TODO: Don't split */
1073 ins.load_store.varying_parameters = (offset & 7) << 7;
1074 ins.load_store.address = offset >> 3;
1075
1076 if (indirect_offset) {
1077 emit_indirect_offset(ctx, indirect_offset);
1078 ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
1079 } else {
1080 ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
1081 }
1082
1083 /* TODO respect index */
1084
1085 emit_mir_instruction(ctx, ins);
1086 }
1087 }
1088
1089 static void
1090 emit_varying_read(
1091 compiler_context *ctx,
1092 unsigned dest, unsigned offset,
1093 unsigned nr_comp, unsigned component,
1094 nir_src *indirect_offset, nir_alu_type type)
1095 {
1096 /* XXX: Half-floats? */
1097 /* TODO: swizzle, mask */
1098
1099 midgard_instruction ins = m_ld_vary_32(dest, offset);
1100 ins.load_store.mask = mask_of(nr_comp);
1101 ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
1102
1103 midgard_varying_parameter p = {
1104 .is_varying = 1,
1105 .interpolation = midgard_interp_default,
1106 .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
1107 };
1108
1109 unsigned u;
1110 memcpy(&u, &p, sizeof(p));
1111 ins.load_store.varying_parameters = u;
1112
1113 if (indirect_offset) {
1114 /* We need to add in the dynamic index, moved to r27.w */
1115 emit_indirect_offset(ctx, indirect_offset);
1116 ins.load_store.unknown = 0x79e; /* xxx: what is this? */
1117 } else {
1118 /* Just a direct load */
1119 ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
1120 }
1121
1122 /* Use the type appropriate load */
1123 switch (type) {
1124 case nir_type_uint:
1125 case nir_type_bool:
1126 ins.load_store.op = midgard_op_ld_vary_32u;
1127 break;
1128 case nir_type_int:
1129 ins.load_store.op = midgard_op_ld_vary_32i;
1130 break;
1131 case nir_type_float:
1132 ins.load_store.op = midgard_op_ld_vary_32;
1133 break;
1134 default:
1135 unreachable("Attempted to load unknown type");
1136 break;
1137 }
1138
1139 emit_mir_instruction(ctx, ins);
1140 }
1141
1142 static void
1143 emit_sysval_read(compiler_context *ctx, nir_instr *instr)
1144 {
1145 unsigned dest;
1146 /* Figure out which uniform this is */
1147 int sysval = sysval_for_instr(ctx, instr, &dest);
1148 void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
1149
1150 /* Sysvals are prefix uniforms */
1151 unsigned uniform = ((uintptr_t) val) - 1;
1152
1153 /* Emit the read itself -- this is never indirect */
1154 emit_ubo_read(ctx, dest, uniform, NULL, 0);
1155 }
1156
1157 /* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
1158 * using scalar ops functional on earlier Midgard generations. Newer Midgard
1159 * generations have faster vectorized reads. This operation is for blend
1160 * shaders in particular; reading the tilebuffer from the fragment shader
1161 * remains an open problem. */
1162
1163 static void
1164 emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
1165 {
1166 midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
1167 ins.load_store.swizzle = 0; /* xxxx */
1168
1169 /* Read each component sequentially */
1170
1171 for (unsigned c = 0; c < 4; ++c) {
1172 ins.load_store.mask = (1 << c);
1173 ins.load_store.unknown = c;
1174 emit_mir_instruction(ctx, ins);
1175 }
1176
1177 /* vadd.u2f hr2, zext(hr2), #0 */
1178
1179 midgard_vector_alu_src alu_src = blank_alu_src;
1180 alu_src.mod = midgard_int_zero_extend;
1181 alu_src.half = true;
1182
1183 midgard_instruction u2f = {
1184 .type = TAG_ALU_4,
1185 .ssa_args = {
1186 .src0 = reg,
1187 .src1 = SSA_UNUSED_0,
1188 .dest = reg,
1189 .inline_constant = true
1190 },
1191 .alu = {
1192 .op = midgard_alu_op_u2f_rtz,
1193 .reg_mode = midgard_reg_mode_16,
1194 .dest_override = midgard_dest_override_none,
1195 .mask = 0xF,
1196 .src1 = vector_alu_srco_unsigned(alu_src),
1197 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1198 }
1199 };
1200
1201 emit_mir_instruction(ctx, u2f);
1202
1203 /* vmul.fmul.sat r1, hr2, #0.00392151 */
1204
1205 alu_src.mod = 0;
1206
1207 midgard_instruction fmul = {
1208 .type = TAG_ALU_4,
1209 .inline_constant = _mesa_float_to_half(1.0 / 255.0),
1210 .ssa_args = {
1211 .src0 = reg,
1212 .dest = reg,
1213 .src1 = SSA_UNUSED_0,
1214 .inline_constant = true
1215 },
1216 .alu = {
1217 .op = midgard_alu_op_fmul,
1218 .reg_mode = midgard_reg_mode_32,
1219 .dest_override = midgard_dest_override_none,
1220 .outmod = midgard_outmod_sat,
1221 .mask = 0xFF,
1222 .src1 = vector_alu_srco_unsigned(alu_src),
1223 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1224 }
1225 };
1226
1227 emit_mir_instruction(ctx, fmul);
1228 }
1229
1230 static void
1231 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1232 {
1233 unsigned offset = 0, reg;
1234
1235 switch (instr->intrinsic) {
1236 case nir_intrinsic_discard_if:
1237 emit_condition(ctx, &instr->src[0], true, COMPONENT_X);
1238
1239 /* fallthrough */
1240
1241 case nir_intrinsic_discard: {
1242 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1243 struct midgard_instruction discard = v_branch(conditional, false);
1244 discard.branch.target_type = TARGET_DISCARD;
1245 emit_mir_instruction(ctx, discard);
1246
1247 ctx->can_discard = true;
1248 break;
1249 }
1250
1251 case nir_intrinsic_load_uniform:
1252 case nir_intrinsic_load_ubo:
1253 case nir_intrinsic_load_input: {
1254 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1255 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1256
1257 /* Get the base type of the intrinsic */
1258 nir_alu_type t = nir_intrinsic_type(instr);
1259 t = nir_alu_type_get_base_type(t);
1260
1261 if (!is_ubo) {
1262 offset = nir_intrinsic_base(instr);
1263 }
1264
1265 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1266
1267 nir_src *src_offset = nir_get_io_offset_src(instr);
1268
1269 bool direct = nir_src_is_const(*src_offset);
1270
1271 if (direct)
1272 offset += nir_src_as_uint(*src_offset);
1273
1274 /* We may need to apply a fractional offset */
1275 int component = instr->intrinsic == nir_intrinsic_load_input ?
1276 nir_intrinsic_component(instr) : 0;
1277 reg = nir_dest_index(ctx, &instr->dest);
1278
1279 if (is_uniform && !ctx->is_blend) {
1280 emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
1281 } else if (is_ubo) {
1282 nir_src index = instr->src[0];
1283
1284 /* We don't yet support indirect UBOs. For indirect
1285 * block numbers (if that's possible), we don't know
1286 * enough about the hardware yet. For indirect sources,
1287 * we know what we need but we need to add some NIR
1288 * support for lowering correctly with respect to
1289 * 128-bit reads */
1290
1291 assert(nir_src_is_const(index));
1292 assert(nir_src_is_const(*src_offset));
1293
1294 /* TODO: Alignment */
1295 assert((offset & 0xF) == 0);
1296
1297 uint32_t uindex = nir_src_as_uint(index) + 1;
1298 emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
1299 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1300 emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t);
1301 } else if (ctx->is_blend) {
1302 /* For blend shaders, load the input color, which is
1303 * preloaded to r0 */
1304
1305 midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1306 emit_mir_instruction(ctx, move);
1307 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1308 midgard_instruction ins = m_ld_attr_32(reg, offset);
1309 ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
1310 ins.load_store.mask = mask_of(nr_comp);
1311
1312 /* Use the type appropriate load */
1313 switch (t) {
1314 case nir_type_uint:
1315 case nir_type_bool:
1316 ins.load_store.op = midgard_op_ld_attr_32u;
1317 break;
1318 case nir_type_int:
1319 ins.load_store.op = midgard_op_ld_attr_32i;
1320 break;
1321 case nir_type_float:
1322 ins.load_store.op = midgard_op_ld_attr_32;
1323 break;
1324 default:
1325 unreachable("Attempted to load unknown type");
1326 break;
1327 }
1328
1329 emit_mir_instruction(ctx, ins);
1330 } else {
1331 DBG("Unknown load\n");
1332 assert(0);
1333 }
1334
1335 break;
1336 }
1337
1338 case nir_intrinsic_load_output:
1339 assert(nir_src_is_const(instr->src[0]));
1340 reg = nir_dest_index(ctx, &instr->dest);
1341
1342 if (ctx->is_blend) {
1343 /* TODO: MRT */
1344 emit_fb_read_blend_scalar(ctx, reg);
1345 } else {
1346 DBG("Unknown output load\n");
1347 assert(0);
1348 }
1349
1350 break;
1351
1352 case nir_intrinsic_load_blend_const_color_rgba: {
1353 assert(ctx->is_blend);
1354 reg = nir_dest_index(ctx, &instr->dest);
1355
1356 /* Blend constants are embedded directly in the shader and
1357 * patched in, so we use some magic routing */
1358
1359 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
1360 ins.has_constants = true;
1361 ins.has_blend_constant = true;
1362 emit_mir_instruction(ctx, ins);
1363 break;
1364 }
1365
1366 case nir_intrinsic_store_output:
1367 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1368
1369 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1370
1371 reg = nir_src_index(ctx, &instr->src[0]);
1372
1373 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1374 /* gl_FragColor is not emitted with load/store
1375 * instructions. Instead, it gets plonked into
1376 * r0 at the end of the shader and we do the
1377 * framebuffer writeout dance. TODO: Defer
1378 * writes */
1379
1380 midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1381 emit_mir_instruction(ctx, move);
1382
1383 /* Save the index we're writing to for later reference
1384 * in the epilogue */
1385
1386 ctx->fragment_output = reg;
1387 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1388 /* Varyings are written into one of two special
1389 * varying register, r26 or r27. The register itself is
1390 * selected as the register in the st_vary instruction,
1391 * minus the base of 26. E.g. write into r27 and then
1392 * call st_vary(1) */
1393
1394 midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
1395 emit_mir_instruction(ctx, ins);
1396
1397 /* We should have been vectorized, though we don't
1398 * currently check that st_vary is emitted only once
1399 * per slot (this is relevant, since there's not a mask
1400 * parameter available on the store [set to 0 by the
1401 * blob]). We do respect the component by adjusting the
1402 * swizzle. */
1403
1404 unsigned component = nir_intrinsic_component(instr);
1405
1406 midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
1407 st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
1408 st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
1409 emit_mir_instruction(ctx, st);
1410 } else {
1411 DBG("Unknown store\n");
1412 assert(0);
1413 }
1414
1415 break;
1416
1417 case nir_intrinsic_load_alpha_ref_float:
1418 assert(instr->dest.is_ssa);
1419
1420 float ref_value = ctx->alpha_ref;
1421
1422 float *v = ralloc_array(NULL, float, 4);
1423 memcpy(v, &ref_value, sizeof(float));
1424 _mesa_hash_table_u64_insert(ctx->ssa_constants, instr->dest.ssa.index + 1, v);
1425 break;
1426
1427 case nir_intrinsic_load_viewport_scale:
1428 case nir_intrinsic_load_viewport_offset:
1429 emit_sysval_read(ctx, &instr->instr);
1430 break;
1431
1432 default:
1433 printf ("Unhandled intrinsic\n");
1434 assert(0);
1435 break;
1436 }
1437 }
1438
1439 static unsigned
1440 midgard_tex_format(enum glsl_sampler_dim dim)
1441 {
1442 switch (dim) {
1443 case GLSL_SAMPLER_DIM_1D:
1444 case GLSL_SAMPLER_DIM_BUF:
1445 return MALI_TEX_1D;
1446
1447 case GLSL_SAMPLER_DIM_2D:
1448 case GLSL_SAMPLER_DIM_EXTERNAL:
1449 return MALI_TEX_2D;
1450
1451 case GLSL_SAMPLER_DIM_3D:
1452 return MALI_TEX_3D;
1453
1454 case GLSL_SAMPLER_DIM_CUBE:
1455 return MALI_TEX_CUBE;
1456
1457 default:
1458 DBG("Unknown sampler dim type\n");
1459 assert(0);
1460 return 0;
1461 }
1462 }
1463
1464 /* Tries to attach an explicit LOD / bias as a constant. Returns whether this
1465 * was successful */
1466
1467 static bool
1468 pan_attach_constant_bias(
1469 compiler_context *ctx,
1470 nir_src lod,
1471 midgard_texture_word *word)
1472 {
1473 /* To attach as constant, it has to *be* constant */
1474
1475 if (!nir_src_is_const(lod))
1476 return false;
1477
1478 float f = nir_src_as_float(lod);
1479
1480 /* Break into fixed-point */
1481 signed lod_int = f;
1482 float lod_frac = f - lod_int;
1483
1484 /* Carry over negative fractions */
1485 if (lod_frac < 0.0) {
1486 lod_int--;
1487 lod_frac += 1.0;
1488 }
1489
1490 /* Encode */
1491 word->bias = float_to_ubyte(lod_frac);
1492 word->bias_int = lod_int;
1493
1494 return true;
1495 }
1496
1497 static enum mali_sampler_type
1498 midgard_sampler_type(nir_alu_type t)
1499 {
1500 switch (nir_alu_type_get_base_type(t)) {
1501 case nir_type_float:
1502 return MALI_SAMPLER_FLOAT;
1503 case nir_type_int:
1504 return MALI_SAMPLER_SIGNED;
1505 case nir_type_uint:
1506 return MALI_SAMPLER_UNSIGNED;
1507 default:
1508 unreachable("Unknown sampler type");
1509 }
1510 }
1511
1512 static void
1513 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
1514 unsigned midgard_texop)
1515 {
1516 /* TODO */
1517 //assert (!instr->sampler);
1518 //assert (!instr->texture_array_size);
1519
1520 /* Allocate registers via a round robin scheme to alternate between the two registers */
1521 int reg = ctx->texture_op_count & 1;
1522 int in_reg = reg, out_reg = reg;
1523
1524 /* Make room for the reg */
1525
1526 if (ctx->texture_index[reg] > -1)
1527 unalias_ssa(ctx, ctx->texture_index[reg]);
1528
1529 int texture_index = instr->texture_index;
1530 int sampler_index = texture_index;
1531
1532 /* No helper to build texture words -- we do it all here */
1533 midgard_instruction ins = {
1534 .type = TAG_TEXTURE_4,
1535 .texture = {
1536 .op = midgard_texop,
1537 .format = midgard_tex_format(instr->sampler_dim),
1538 .texture_handle = texture_index,
1539 .sampler_handle = sampler_index,
1540
1541 /* TODO: Regalloc it in */
1542 .swizzle = SWIZZLE_XYZW,
1543 .mask = 0xF,
1544
1545 /* TODO: half */
1546 .in_reg_full = 1,
1547 .out_full = 1,
1548
1549 .sampler_type = midgard_sampler_type(instr->dest_type),
1550 }
1551 };
1552
1553 for (unsigned i = 0; i < instr->num_srcs; ++i) {
1554 int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
1555 int index = nir_src_index(ctx, &instr->src[i].src);
1556 int nr_comp = nir_src_num_components(instr->src[i].src);
1557 midgard_vector_alu_src alu_src = blank_alu_src;
1558
1559 switch (instr->src[i].src_type) {
1560 case nir_tex_src_coord: {
1561 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1562 /* texelFetch is undefined on samplerCube */
1563 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
1564
1565 /* For cubemaps, we need to load coords into
1566 * special r27, and then use a special ld/st op
1567 * to select the face and copy the xy into the
1568 * texture register */
1569
1570 alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
1571
1572 midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
1573 emit_mir_instruction(ctx, move);
1574
1575 midgard_instruction st = m_st_cubemap_coords(reg, 0);
1576 st.load_store.unknown = 0x24; /* XXX: What is this? */
1577 st.load_store.mask = 0x3; /* xy */
1578 st.load_store.swizzle = alu_src.swizzle;
1579 emit_mir_instruction(ctx, st);
1580
1581 ins.texture.in_reg_swizzle = swizzle_of(2);
1582 } else {
1583 ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
1584
1585 midgard_instruction mov = v_mov(index, alu_src, reg);
1586 mov.alu.mask = expand_writemask(mask_of(nr_comp));
1587 emit_mir_instruction(ctx, mov);
1588
1589 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
1590 /* Texel fetch opcodes care about the
1591 * values of z and w, so we actually
1592 * need to spill into a second register
1593 * for a texel fetch with register bias
1594 * (for non-2D). TODO: Implement that
1595 */
1596
1597 assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
1598
1599 midgard_instruction zero = v_mov(index, alu_src, reg);
1600 zero.ssa_args.inline_constant = true;
1601 zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1602 zero.has_constants = true;
1603 zero.alu.mask = ~mov.alu.mask;
1604 emit_mir_instruction(ctx, zero);
1605
1606 ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
1607 } else {
1608 /* Non-texel fetch doesn't need that
1609 * nonsense. However we do use the Z
1610 * for array indexing */
1611 bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
1612 ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
1613 }
1614 }
1615
1616 break;
1617 }
1618
1619 case nir_tex_src_bias:
1620 case nir_tex_src_lod: {
1621 /* Try as a constant if we can */
1622
1623 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
1624 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
1625 break;
1626
1627 /* Otherwise we use a register. To keep RA simple, we
1628 * put the bias/LOD into the w component of the input
1629 * source, which is otherwise in xy */
1630
1631 alu_src.swizzle = SWIZZLE_XXXX;
1632
1633 midgard_instruction mov = v_mov(index, alu_src, reg);
1634 mov.alu.mask = expand_writemask(1 << COMPONENT_W);
1635 emit_mir_instruction(ctx, mov);
1636
1637 ins.texture.lod_register = true;
1638
1639 midgard_tex_register_select sel = {
1640 .select = in_reg,
1641 .full = 1,
1642
1643 /* w */
1644 .component_lo = 1,
1645 .component_hi = 1
1646 };
1647
1648 uint8_t packed;
1649 memcpy(&packed, &sel, sizeof(packed));
1650 ins.texture.bias = packed;
1651
1652 break;
1653 };
1654
1655 default:
1656 unreachable("Unknown texture source type\n");
1657 }
1658 }
1659
1660 /* Set registers to read and write from the same place */
1661 ins.texture.in_reg_select = in_reg;
1662 ins.texture.out_reg_select = out_reg;
1663
1664 emit_mir_instruction(ctx, ins);
1665
1666 /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
1667
1668 int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
1669 alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
1670 ctx->texture_index[reg] = o_index;
1671
1672 midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
1673 emit_mir_instruction(ctx, ins2);
1674
1675 /* Used for .cont and .last hinting */
1676 ctx->texture_op_count++;
1677 }
1678
1679 static void
1680 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
1681 {
1682 /* Fixup op, since only textureLod is permitted in VS but NIR can give
1683 * generic tex in some cases (which confuses the hardware) */
1684
1685 bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
1686
1687 if (is_vertex && instr->op == nir_texop_tex)
1688 instr->op = nir_texop_txl;
1689
1690 switch (instr->op) {
1691 case nir_texop_tex:
1692 case nir_texop_txb:
1693 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
1694 break;
1695 case nir_texop_txl:
1696 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
1697 break;
1698 case nir_texop_txf:
1699 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
1700 break;
1701 case nir_texop_txs:
1702 emit_sysval_read(ctx, &instr->instr);
1703 break;
1704 default:
1705 unreachable("Unhanlded texture op");
1706 }
1707 }
1708
1709 static void
1710 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
1711 {
1712 switch (instr->type) {
1713 case nir_jump_break: {
1714 /* Emit a branch out of the loop */
1715 struct midgard_instruction br = v_branch(false, false);
1716 br.branch.target_type = TARGET_BREAK;
1717 br.branch.target_break = ctx->current_loop_depth;
1718 emit_mir_instruction(ctx, br);
1719
1720 DBG("break..\n");
1721 break;
1722 }
1723
1724 default:
1725 DBG("Unknown jump type %d\n", instr->type);
1726 break;
1727 }
1728 }
1729
1730 static void
1731 emit_instr(compiler_context *ctx, struct nir_instr *instr)
1732 {
1733 switch (instr->type) {
1734 case nir_instr_type_load_const:
1735 emit_load_const(ctx, nir_instr_as_load_const(instr));
1736 break;
1737
1738 case nir_instr_type_intrinsic:
1739 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1740 break;
1741
1742 case nir_instr_type_alu:
1743 emit_alu(ctx, nir_instr_as_alu(instr));
1744 break;
1745
1746 case nir_instr_type_tex:
1747 emit_tex(ctx, nir_instr_as_tex(instr));
1748 break;
1749
1750 case nir_instr_type_jump:
1751 emit_jump(ctx, nir_instr_as_jump(instr));
1752 break;
1753
1754 case nir_instr_type_ssa_undef:
1755 /* Spurious */
1756 break;
1757
1758 default:
1759 DBG("Unhandled instruction type\n");
1760 break;
1761 }
1762 }
1763
1764
1765 /* ALU instructions can inline or embed constants, which decreases register
1766 * pressure and saves space. */
1767
1768 #define CONDITIONAL_ATTACH(src) { \
1769 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src + 1); \
1770 \
1771 if (entry) { \
1772 attach_constants(ctx, alu, entry, alu->ssa_args.src + 1); \
1773 alu->ssa_args.src = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
1774 } \
1775 }
1776
1777 static void
1778 inline_alu_constants(compiler_context *ctx)
1779 {
1780 mir_foreach_instr(ctx, alu) {
1781 /* Other instructions cannot inline constants */
1782 if (alu->type != TAG_ALU_4) continue;
1783
1784 /* If there is already a constant here, we can do nothing */
1785 if (alu->has_constants) continue;
1786
1787 /* It makes no sense to inline constants on a branch */
1788 if (alu->compact_branch || alu->prepacked_branch) continue;
1789
1790 CONDITIONAL_ATTACH(src0);
1791
1792 if (!alu->has_constants) {
1793 CONDITIONAL_ATTACH(src1)
1794 } else if (!alu->inline_constant) {
1795 /* Corner case: _two_ vec4 constants, for instance with a
1796 * csel. For this case, we can only use a constant
1797 * register for one, we'll have to emit a move for the
1798 * other. Note, if both arguments are constants, then
1799 * necessarily neither argument depends on the value of
1800 * any particular register. As the destination register
1801 * will be wiped, that means we can spill the constant
1802 * to the destination register.
1803 */
1804
1805 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src1 + 1);
1806 unsigned scratch = alu->ssa_args.dest;
1807
1808 if (entry) {
1809 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
1810 attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
1811
1812 /* Force a break XXX Defer r31 writes */
1813 ins.unit = UNIT_VLUT;
1814
1815 /* Set the source */
1816 alu->ssa_args.src1 = scratch;
1817
1818 /* Inject us -before- the last instruction which set r31 */
1819 mir_insert_instruction_before(mir_prev_op(alu), ins);
1820 }
1821 }
1822 }
1823 }
1824
1825 /* Midgard supports two types of constants, embedded constants (128-bit) and
1826 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
1827 * constants can be demoted to inline constants, for space savings and
1828 * sometimes a performance boost */
1829
1830 static void
1831 embedded_to_inline_constant(compiler_context *ctx)
1832 {
1833 mir_foreach_instr(ctx, ins) {
1834 if (!ins->has_constants) continue;
1835
1836 if (ins->ssa_args.inline_constant) continue;
1837
1838 /* Blend constants must not be inlined by definition */
1839 if (ins->has_blend_constant) continue;
1840
1841 /* src1 cannot be an inline constant due to encoding
1842 * restrictions. So, if possible we try to flip the arguments
1843 * in that case */
1844
1845 int op = ins->alu.op;
1846
1847 if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1848 switch (op) {
1849 /* These ops require an operational change to flip
1850 * their arguments TODO */
1851 case midgard_alu_op_flt:
1852 case midgard_alu_op_fle:
1853 case midgard_alu_op_ilt:
1854 case midgard_alu_op_ile:
1855 case midgard_alu_op_fcsel:
1856 case midgard_alu_op_icsel:
1857 DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
1858 default:
1859 break;
1860 }
1861
1862 if (alu_opcode_props[op].props & OP_COMMUTES) {
1863 /* Flip the SSA numbers */
1864 ins->ssa_args.src0 = ins->ssa_args.src1;
1865 ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1866
1867 /* And flip the modifiers */
1868
1869 unsigned src_temp;
1870
1871 src_temp = ins->alu.src2;
1872 ins->alu.src2 = ins->alu.src1;
1873 ins->alu.src1 = src_temp;
1874 }
1875 }
1876
1877 if (ins->ssa_args.src1 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1878 /* Extract the source information */
1879
1880 midgard_vector_alu_src *src;
1881 int q = ins->alu.src2;
1882 midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
1883 src = m;
1884
1885 /* Component is from the swizzle, e.g. r26.w -> w component. TODO: What if x is masked out? */
1886 int component = src->swizzle & 3;
1887
1888 /* Scale constant appropriately, if we can legally */
1889 uint16_t scaled_constant = 0;
1890
1891 if (midgard_is_integer_op(op)) {
1892 unsigned int *iconstants = (unsigned int *) ins->constants;
1893 scaled_constant = (uint16_t) iconstants[component];
1894
1895 /* Constant overflow after resize */
1896 if (scaled_constant != iconstants[component])
1897 continue;
1898 } else {
1899 float original = (float) ins->constants[component];
1900 scaled_constant = _mesa_float_to_half(original);
1901
1902 /* Check for loss of precision. If this is
1903 * mediump, we don't care, but for a highp
1904 * shader, we need to pay attention. NIR
1905 * doesn't yet tell us which mode we're in!
1906 * Practically this prevents most constants
1907 * from being inlined, sadly. */
1908
1909 float fp32 = _mesa_half_to_float(scaled_constant);
1910
1911 if (fp32 != original)
1912 continue;
1913 }
1914
1915 /* We don't know how to handle these with a constant */
1916
1917 if (src->mod || src->half || src->rep_low || src->rep_high) {
1918 DBG("Bailing inline constant...\n");
1919 continue;
1920 }
1921
1922 /* Make sure that the constant is not itself a
1923 * vector by checking if all accessed values
1924 * (by the swizzle) are the same. */
1925
1926 uint32_t *cons = (uint32_t *) ins->constants;
1927 uint32_t value = cons[component];
1928
1929 bool is_vector = false;
1930 unsigned mask = effective_writemask(&ins->alu);
1931
1932 for (int c = 1; c < 4; ++c) {
1933 /* We only care if this component is actually used */
1934 if (!(mask & (1 << c)))
1935 continue;
1936
1937 uint32_t test = cons[(src->swizzle >> (2 * c)) & 3];
1938
1939 if (test != value) {
1940 is_vector = true;
1941 break;
1942 }
1943 }
1944
1945 if (is_vector)
1946 continue;
1947
1948 /* Get rid of the embedded constant */
1949 ins->has_constants = false;
1950 ins->ssa_args.src1 = SSA_UNUSED_0;
1951 ins->ssa_args.inline_constant = true;
1952 ins->inline_constant = scaled_constant;
1953 }
1954 }
1955 }
1956
1957 /* Map normal SSA sources to other SSA sources / fixed registers (like
1958 * uniforms) */
1959
1960 static void
1961 map_ssa_to_alias(compiler_context *ctx, int *ref)
1962 {
1963 /* Sign is used quite deliberately for unused */
1964 if (*ref < 0)
1965 return;
1966
1967 unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
1968
1969 if (alias) {
1970 /* Remove entry in leftovers to avoid a redunant fmov */
1971
1972 struct set_entry *leftover = _mesa_set_search(ctx->leftover_ssa_to_alias, ((void *) (uintptr_t) (*ref + 1)));
1973
1974 if (leftover)
1975 _mesa_set_remove(ctx->leftover_ssa_to_alias, leftover);
1976
1977 /* Assign the alias map */
1978 *ref = alias - 1;
1979 return;
1980 }
1981 }
1982
1983 /* Basic dead code elimination on the MIR itself, which cleans up e.g. the
1984 * texture pipeline */
1985
1986 static bool
1987 midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
1988 {
1989 bool progress = false;
1990
1991 mir_foreach_instr_in_block_safe(block, ins) {
1992 if (ins->type != TAG_ALU_4) continue;
1993 if (ins->compact_branch) continue;
1994
1995 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
1996 if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
1997
1998 mir_remove_instruction(ins);
1999 progress = true;
2000 }
2001
2002 return progress;
2003 }
2004
2005 /* Dead code elimination for branches at the end of a block - only one branch
2006 * per block is legal semantically */
2007
2008 static void
2009 midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2010 {
2011 bool branched = false;
2012
2013 mir_foreach_instr_in_block_safe(block, ins) {
2014 if (!midgard_is_branch_unit(ins->unit)) continue;
2015
2016 /* We ignore prepacked branches since the fragment epilogue is
2017 * just generally special */
2018 if (ins->prepacked_branch) continue;
2019
2020 /* Discards are similarly special and may not correspond to the
2021 * end of a block */
2022
2023 if (ins->branch.target_type == TARGET_DISCARD) continue;
2024
2025 if (branched) {
2026 /* We already branched, so this is dead */
2027 mir_remove_instruction(ins);
2028 }
2029
2030 branched = true;
2031 }
2032 }
2033
2034 static bool
2035 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
2036 {
2037 /* abs or neg */
2038 if (!is_int && src.mod) return true;
2039
2040 /* swizzle */
2041 for (unsigned c = 0; c < 4; ++c) {
2042 if (!(mask & (1 << c))) continue;
2043 if (((src.swizzle >> (2*c)) & 3) != c) return true;
2044 }
2045
2046 return false;
2047 }
2048
2049 static bool
2050 mir_nontrivial_source2_mod(midgard_instruction *ins)
2051 {
2052 unsigned mask = squeeze_writemask(ins->alu.mask);
2053 bool is_int = midgard_is_integer_op(ins->alu.op);
2054
2055 midgard_vector_alu_src src2 =
2056 vector_alu_from_unsigned(ins->alu.src2);
2057
2058 return mir_nontrivial_mod(src2, is_int, mask);
2059 }
2060
2061 static bool
2062 mir_nontrivial_outmod(midgard_instruction *ins)
2063 {
2064 bool is_int = midgard_is_integer_op(ins->alu.op);
2065 unsigned mod = ins->alu.outmod;
2066
2067 if (is_int)
2068 return mod != midgard_outmod_int_wrap;
2069 else
2070 return mod != midgard_outmod_none;
2071 }
2072
2073 static bool
2074 midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
2075 {
2076 bool progress = false;
2077
2078 mir_foreach_instr_in_block_safe(block, ins) {
2079 if (ins->type != TAG_ALU_4) continue;
2080 if (!OP_IS_MOVE(ins->alu.op)) continue;
2081
2082 unsigned from = ins->ssa_args.src1;
2083 unsigned to = ins->ssa_args.dest;
2084
2085 /* We only work on pure SSA */
2086
2087 if (to >= SSA_FIXED_MINIMUM) continue;
2088 if (from >= SSA_FIXED_MINIMUM) continue;
2089 if (to >= ctx->func->impl->ssa_alloc) continue;
2090 if (from >= ctx->func->impl->ssa_alloc) continue;
2091
2092 /* Constant propagation is not handled here, either */
2093 if (ins->ssa_args.inline_constant) continue;
2094 if (ins->has_constants) continue;
2095
2096 if (mir_nontrivial_source2_mod(ins)) continue;
2097 if (mir_nontrivial_outmod(ins)) continue;
2098
2099 /* We're clear -- rewrite */
2100 mir_rewrite_index_src(ctx, to, from);
2101 mir_remove_instruction(ins);
2102 progress |= true;
2103 }
2104
2105 return progress;
2106 }
2107
2108 /* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
2109 * the move can be propagated away entirely */
2110
2111 static bool
2112 mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
2113 {
2114 /* Nothing to do */
2115 if (comp == midgard_outmod_none)
2116 return true;
2117
2118 if (*outmod == midgard_outmod_none) {
2119 *outmod = comp;
2120 return true;
2121 }
2122
2123 /* TODO: Compose rules */
2124 return false;
2125 }
2126
2127 static bool
2128 midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
2129 {
2130 bool progress = false;
2131
2132 mir_foreach_instr_in_block_safe(block, ins) {
2133 if (ins->type != TAG_ALU_4) continue;
2134 if (ins->alu.op != midgard_alu_op_fmov) continue;
2135 if (ins->alu.outmod != midgard_outmod_pos) continue;
2136
2137 /* TODO: Registers? */
2138 unsigned src = ins->ssa_args.src1;
2139 if (src >= ctx->func->impl->ssa_alloc) continue;
2140 assert(!mir_has_multiple_writes(ctx, src));
2141
2142 /* There might be a source modifier, too */
2143 if (mir_nontrivial_source2_mod(ins)) continue;
2144
2145 /* Backpropagate the modifier */
2146 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
2147 if (v->type != TAG_ALU_4) continue;
2148 if (v->ssa_args.dest != src) continue;
2149
2150 /* Can we even take a float outmod? */
2151 if (midgard_is_integer_out_op(v->alu.op)) continue;
2152
2153 midgard_outmod_float temp = v->alu.outmod;
2154 progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
2155
2156 /* Throw in the towel.. */
2157 if (!progress) break;
2158
2159 /* Otherwise, transfer the modifier */
2160 v->alu.outmod = temp;
2161 ins->alu.outmod = midgard_outmod_none;
2162
2163 break;
2164 }
2165 }
2166
2167 return progress;
2168 }
2169
2170 static bool
2171 midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
2172 {
2173 bool progress = false;
2174
2175 mir_foreach_instr_in_block_safe(block, ins) {
2176 if (ins->type != TAG_ALU_4) continue;
2177 if (!OP_IS_MOVE(ins->alu.op)) continue;
2178
2179 unsigned from = ins->ssa_args.src1;
2180 unsigned to = ins->ssa_args.dest;
2181
2182 /* Make sure it's simple enough for us to handle */
2183
2184 if (from >= SSA_FIXED_MINIMUM) continue;
2185 if (from >= ctx->func->impl->ssa_alloc) continue;
2186 if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
2187 if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
2188
2189 bool eliminated = false;
2190
2191 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
2192 /* The texture registers are not SSA so be careful.
2193 * Conservatively, just stop if we hit a texture op
2194 * (even if it may not write) to where we are */
2195
2196 if (v->type != TAG_ALU_4)
2197 break;
2198
2199 if (v->ssa_args.dest == from) {
2200 /* We don't want to track partial writes ... */
2201 if (v->alu.mask == 0xF) {
2202 v->ssa_args.dest = to;
2203 eliminated = true;
2204 }
2205
2206 break;
2207 }
2208 }
2209
2210 if (eliminated)
2211 mir_remove_instruction(ins);
2212
2213 progress |= eliminated;
2214 }
2215
2216 return progress;
2217 }
2218
2219 /* The following passes reorder MIR instructions to enable better scheduling */
2220
2221 static void
2222 midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
2223 {
2224 mir_foreach_instr_in_block_safe(block, ins) {
2225 if (ins->type != TAG_LOAD_STORE_4) continue;
2226
2227 /* We've found a load/store op. Check if next is also load/store. */
2228 midgard_instruction *next_op = mir_next_op(ins);
2229 if (&next_op->link != &block->instructions) {
2230 if (next_op->type == TAG_LOAD_STORE_4) {
2231 /* If so, we're done since we're a pair */
2232 ins = mir_next_op(ins);
2233 continue;
2234 }
2235
2236 /* Maximum search distance to pair, to avoid register pressure disasters */
2237 int search_distance = 8;
2238
2239 /* Otherwise, we have an orphaned load/store -- search for another load */
2240 mir_foreach_instr_in_block_from(block, c, mir_next_op(ins)) {
2241 /* Terminate search if necessary */
2242 if (!(search_distance--)) break;
2243
2244 if (c->type != TAG_LOAD_STORE_4) continue;
2245
2246 /* Stores cannot be reordered, since they have
2247 * dependencies. For the same reason, indirect
2248 * loads cannot be reordered as their index is
2249 * loaded in r27.w */
2250
2251 if (OP_IS_STORE(c->load_store.op)) continue;
2252
2253 /* It appears the 0x800 bit is set whenever a
2254 * load is direct, unset when it is indirect.
2255 * Skip indirect loads. */
2256
2257 if (!(c->load_store.unknown & 0x800)) continue;
2258
2259 /* We found one! Move it up to pair and remove it from the old location */
2260
2261 mir_insert_instruction_before(ins, *c);
2262 mir_remove_instruction(c);
2263
2264 break;
2265 }
2266 }
2267 }
2268 }
2269
2270 /* If there are leftovers after the below pass, emit actual fmov
2271 * instructions for the slow-but-correct path */
2272
2273 static void
2274 emit_leftover_move(compiler_context *ctx)
2275 {
2276 set_foreach(ctx->leftover_ssa_to_alias, leftover) {
2277 int base = ((uintptr_t) leftover->key) - 1;
2278 int mapped = base;
2279
2280 map_ssa_to_alias(ctx, &mapped);
2281 EMIT(mov, mapped, blank_alu_src, base);
2282 }
2283 }
2284
2285 static void
2286 actualise_ssa_to_alias(compiler_context *ctx)
2287 {
2288 mir_foreach_instr(ctx, ins) {
2289 map_ssa_to_alias(ctx, &ins->ssa_args.src0);
2290 map_ssa_to_alias(ctx, &ins->ssa_args.src1);
2291 }
2292
2293 emit_leftover_move(ctx);
2294 }
2295
2296 static void
2297 emit_fragment_epilogue(compiler_context *ctx)
2298 {
2299 /* Special case: writing out constants requires us to include the move
2300 * explicitly now, so shove it into r0 */
2301
2302 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
2303
2304 if (constant_value) {
2305 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
2306 attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
2307 emit_mir_instruction(ctx, ins);
2308 }
2309
2310 /* Perform the actual fragment writeout. We have two writeout/branch
2311 * instructions, forming a loop until writeout is successful as per the
2312 * docs. TODO: gl_FragDepth */
2313
2314 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2315 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2316 }
2317
2318 /* For the blend epilogue, we need to convert the blended fragment vec4 (stored
2319 * in r0) to a RGBA8888 value by scaling and type converting. We then output it
2320 * with the int8 analogue to the fragment epilogue */
2321
2322 static void
2323 emit_blend_epilogue(compiler_context *ctx)
2324 {
2325 /* vmul.fmul.none.fulllow hr48, r0, #255 */
2326
2327 midgard_instruction scale = {
2328 .type = TAG_ALU_4,
2329 .unit = UNIT_VMUL,
2330 .inline_constant = _mesa_float_to_half(255.0),
2331 .ssa_args = {
2332 .src0 = SSA_FIXED_REGISTER(0),
2333 .src1 = SSA_UNUSED_0,
2334 .dest = SSA_FIXED_REGISTER(24),
2335 .inline_constant = true
2336 },
2337 .alu = {
2338 .op = midgard_alu_op_fmul,
2339 .reg_mode = midgard_reg_mode_32,
2340 .dest_override = midgard_dest_override_lower,
2341 .mask = 0xFF,
2342 .src1 = vector_alu_srco_unsigned(blank_alu_src),
2343 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2344 }
2345 };
2346
2347 emit_mir_instruction(ctx, scale);
2348
2349 /* vadd.f2u_rte.pos.low hr0, hr48, #0 */
2350
2351 midgard_vector_alu_src alu_src = blank_alu_src;
2352 alu_src.half = true;
2353
2354 midgard_instruction f2u_rte = {
2355 .type = TAG_ALU_4,
2356 .ssa_args = {
2357 .src0 = SSA_FIXED_REGISTER(24),
2358 .src1 = SSA_UNUSED_0,
2359 .dest = SSA_FIXED_REGISTER(0),
2360 .inline_constant = true
2361 },
2362 .alu = {
2363 .op = midgard_alu_op_f2u_rte,
2364 .reg_mode = midgard_reg_mode_16,
2365 .dest_override = midgard_dest_override_lower,
2366 .outmod = midgard_outmod_pos,
2367 .mask = 0xF,
2368 .src1 = vector_alu_srco_unsigned(alu_src),
2369 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2370 }
2371 };
2372
2373 emit_mir_instruction(ctx, f2u_rte);
2374
2375 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2376 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2377 }
2378
2379 static midgard_block *
2380 emit_block(compiler_context *ctx, nir_block *block)
2381 {
2382 midgard_block *this_block = calloc(sizeof(midgard_block), 1);
2383 list_addtail(&this_block->link, &ctx->blocks);
2384
2385 this_block->is_scheduled = false;
2386 ++ctx->block_count;
2387
2388 ctx->texture_index[0] = -1;
2389 ctx->texture_index[1] = -1;
2390
2391 /* Add us as a successor to the block we are following */
2392 if (ctx->current_block)
2393 midgard_block_add_successor(ctx->current_block, this_block);
2394
2395 /* Set up current block */
2396 list_inithead(&this_block->instructions);
2397 ctx->current_block = this_block;
2398
2399 nir_foreach_instr(instr, block) {
2400 emit_instr(ctx, instr);
2401 ++ctx->instruction_count;
2402 }
2403
2404 inline_alu_constants(ctx);
2405 embedded_to_inline_constant(ctx);
2406
2407 /* Perform heavylifting for aliasing */
2408 actualise_ssa_to_alias(ctx);
2409
2410 midgard_pair_load_store(ctx, this_block);
2411
2412 /* Append fragment shader epilogue (value writeout) */
2413 if (ctx->stage == MESA_SHADER_FRAGMENT) {
2414 if (block == nir_impl_last_block(ctx->func->impl)) {
2415 if (ctx->is_blend)
2416 emit_blend_epilogue(ctx);
2417 else
2418 emit_fragment_epilogue(ctx);
2419 }
2420 }
2421
2422 if (block == nir_start_block(ctx->func->impl))
2423 ctx->initial_block = this_block;
2424
2425 if (block == nir_impl_last_block(ctx->func->impl))
2426 ctx->final_block = this_block;
2427
2428 /* Allow the next control flow to access us retroactively, for
2429 * branching etc */
2430 ctx->current_block = this_block;
2431
2432 /* Document the fallthrough chain */
2433 ctx->previous_source_block = this_block;
2434
2435 return this_block;
2436 }
2437
2438 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2439
2440 static void
2441 emit_if(struct compiler_context *ctx, nir_if *nif)
2442 {
2443 /* Conditional branches expect the condition in r31.w; emit a move for
2444 * that in the _previous_ block (which is the current block). */
2445 emit_condition(ctx, &nif->condition, true, COMPONENT_X);
2446
2447 /* Speculatively emit the branch, but we can't fill it in until later */
2448 EMIT(branch, true, true);
2449 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2450
2451 /* Emit the two subblocks */
2452 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2453
2454 /* Emit a jump from the end of the then block to the end of the else */
2455 EMIT(branch, false, false);
2456 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2457
2458 /* Emit second block, and check if it's empty */
2459
2460 int else_idx = ctx->block_count;
2461 int count_in = ctx->instruction_count;
2462 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2463 int after_else_idx = ctx->block_count;
2464
2465 /* Now that we have the subblocks emitted, fix up the branches */
2466
2467 assert(then_block);
2468 assert(else_block);
2469
2470 if (ctx->instruction_count == count_in) {
2471 /* The else block is empty, so don't emit an exit jump */
2472 mir_remove_instruction(then_exit);
2473 then_branch->branch.target_block = after_else_idx;
2474 } else {
2475 then_branch->branch.target_block = else_idx;
2476 then_exit->branch.target_block = after_else_idx;
2477 }
2478 }
2479
2480 static void
2481 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2482 {
2483 /* Remember where we are */
2484 midgard_block *start_block = ctx->current_block;
2485
2486 /* Allocate a loop number, growing the current inner loop depth */
2487 int loop_idx = ++ctx->current_loop_depth;
2488
2489 /* Get index from before the body so we can loop back later */
2490 int start_idx = ctx->block_count;
2491
2492 /* Emit the body itself */
2493 emit_cf_list(ctx, &nloop->body);
2494
2495 /* Branch back to loop back */
2496 struct midgard_instruction br_back = v_branch(false, false);
2497 br_back.branch.target_block = start_idx;
2498 emit_mir_instruction(ctx, br_back);
2499
2500 /* Mark down that branch in the graph. Note that we're really branching
2501 * to the block *after* we started in. TODO: Why doesn't the branch
2502 * itself have an off-by-one then...? */
2503 midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
2504
2505 /* Find the index of the block about to follow us (note: we don't add
2506 * one; blocks are 0-indexed so we get a fencepost problem) */
2507 int break_block_idx = ctx->block_count;
2508
2509 /* Fix up the break statements we emitted to point to the right place,
2510 * now that we can allocate a block number for them */
2511
2512 list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
2513 mir_foreach_instr_in_block(block, ins) {
2514 if (ins->type != TAG_ALU_4) continue;
2515 if (!ins->compact_branch) continue;
2516 if (ins->prepacked_branch) continue;
2517
2518 /* We found a branch -- check the type to see if we need to do anything */
2519 if (ins->branch.target_type != TARGET_BREAK) continue;
2520
2521 /* It's a break! Check if it's our break */
2522 if (ins->branch.target_break != loop_idx) continue;
2523
2524 /* Okay, cool, we're breaking out of this loop.
2525 * Rewrite from a break to a goto */
2526
2527 ins->branch.target_type = TARGET_GOTO;
2528 ins->branch.target_block = break_block_idx;
2529 }
2530 }
2531
2532 /* Now that we've finished emitting the loop, free up the depth again
2533 * so we play nice with recursion amid nested loops */
2534 --ctx->current_loop_depth;
2535 }
2536
2537 static midgard_block *
2538 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2539 {
2540 midgard_block *start_block = NULL;
2541
2542 foreach_list_typed(nir_cf_node, node, node, list) {
2543 switch (node->type) {
2544 case nir_cf_node_block: {
2545 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2546
2547 if (!start_block)
2548 start_block = block;
2549
2550 break;
2551 }
2552
2553 case nir_cf_node_if:
2554 emit_if(ctx, nir_cf_node_as_if(node));
2555 break;
2556
2557 case nir_cf_node_loop:
2558 emit_loop(ctx, nir_cf_node_as_loop(node));
2559 break;
2560
2561 case nir_cf_node_function:
2562 assert(0);
2563 break;
2564 }
2565 }
2566
2567 return start_block;
2568 }
2569
2570 /* Due to lookahead, we need to report the first tag executed in the command
2571 * stream and in branch targets. An initial block might be empty, so iterate
2572 * until we find one that 'works' */
2573
2574 static unsigned
2575 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2576 {
2577 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2578
2579 unsigned first_tag = 0;
2580
2581 do {
2582 midgard_bundle *initial_bundle = util_dynarray_element(&initial_block->bundles, midgard_bundle, 0);
2583
2584 if (initial_bundle) {
2585 first_tag = initial_bundle->tag;
2586 break;
2587 }
2588
2589 /* Initial block is empty, try the next block */
2590 initial_block = list_first_entry(&(initial_block->link), midgard_block, link);
2591 } while(initial_block != NULL);
2592
2593 assert(first_tag);
2594 return first_tag;
2595 }
2596
2597 int
2598 midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend)
2599 {
2600 struct util_dynarray *compiled = &program->compiled;
2601
2602 midgard_debug = debug_get_option_midgard_debug();
2603
2604 compiler_context ictx = {
2605 .nir = nir,
2606 .stage = nir->info.stage,
2607
2608 .is_blend = is_blend,
2609 .blend_constant_offset = -1,
2610
2611 .alpha_ref = program->alpha_ref
2612 };
2613
2614 compiler_context *ctx = &ictx;
2615
2616 /* TODO: Decide this at runtime */
2617 ctx->uniform_cutoff = 8;
2618
2619 /* Initialize at a global (not block) level hash tables */
2620
2621 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2622 ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
2623 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2624 ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
2625 ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
2626
2627 /* Record the varying mapping for the command stream's bookkeeping */
2628
2629 struct exec_list *varyings =
2630 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
2631
2632 unsigned max_varying = 0;
2633 nir_foreach_variable(var, varyings) {
2634 unsigned loc = var->data.driver_location;
2635 unsigned sz = glsl_type_size(var->type, FALSE);
2636
2637 for (int c = 0; c < sz; ++c) {
2638 program->varyings[loc + c] = var->data.location + c;
2639 max_varying = MAX2(max_varying, loc + c);
2640 }
2641 }
2642
2643 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2644 * (so we don't accidentally duplicate the epilogue since mesa/st has
2645 * messed with our I/O quite a bit already) */
2646
2647 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2648
2649 if (ctx->stage == MESA_SHADER_VERTEX)
2650 NIR_PASS_V(nir, nir_lower_viewport_transform);
2651
2652 NIR_PASS_V(nir, nir_lower_var_copies);
2653 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2654 NIR_PASS_V(nir, nir_split_var_copies);
2655 NIR_PASS_V(nir, nir_lower_var_copies);
2656 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2657 NIR_PASS_V(nir, nir_lower_var_copies);
2658 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2659
2660 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
2661
2662 /* Optimisation passes */
2663
2664 optimise_nir(nir);
2665
2666 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2667 nir_print_shader(nir, stdout);
2668 }
2669
2670 /* Assign sysvals and counts, now that we're sure
2671 * (post-optimisation) */
2672
2673 midgard_nir_assign_sysvals(ctx, nir);
2674
2675 program->uniform_count = nir->num_uniforms;
2676 program->sysval_count = ctx->sysval_count;
2677 memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
2678
2679 program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
2680 program->varying_count = max_varying + 1; /* Fencepost off-by-one */
2681
2682 nir_foreach_function(func, nir) {
2683 if (!func->impl)
2684 continue;
2685
2686 list_inithead(&ctx->blocks);
2687 ctx->block_count = 0;
2688 ctx->func = func;
2689
2690 emit_cf_list(ctx, &func->impl->body);
2691 emit_block(ctx, func->impl->end_block);
2692
2693 break; /* TODO: Multi-function shaders */
2694 }
2695
2696 util_dynarray_init(compiled, NULL);
2697
2698 /* MIR-level optimizations */
2699
2700 bool progress = false;
2701
2702 do {
2703 progress = false;
2704
2705 mir_foreach_block(ctx, block) {
2706 progress |= midgard_opt_pos_propagate(ctx, block);
2707 progress |= midgard_opt_copy_prop(ctx, block);
2708 progress |= midgard_opt_copy_prop_tex(ctx, block);
2709 progress |= midgard_opt_dead_code_eliminate(ctx, block);
2710 }
2711 } while (progress);
2712
2713 /* Nested control-flow can result in dead branches at the end of the
2714 * block. This messes with our analysis and is just dead code, so cull
2715 * them */
2716 mir_foreach_block(ctx, block) {
2717 midgard_opt_cull_dead_branch(ctx, block);
2718 }
2719
2720 /* Schedule! */
2721 schedule_program(ctx);
2722
2723 /* Now that all the bundles are scheduled and we can calculate block
2724 * sizes, emit actual branch instructions rather than placeholders */
2725
2726 int br_block_idx = 0;
2727
2728 mir_foreach_block(ctx, block) {
2729 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2730 for (int c = 0; c < bundle->instruction_count; ++c) {
2731 midgard_instruction *ins = bundle->instructions[c];
2732
2733 if (!midgard_is_branch_unit(ins->unit)) continue;
2734
2735 if (ins->prepacked_branch) continue;
2736
2737 /* Parse some basic branch info */
2738 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2739 bool is_conditional = ins->branch.conditional;
2740 bool is_inverted = ins->branch.invert_conditional;
2741 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2742
2743 /* Determine the block we're jumping to */
2744 int target_number = ins->branch.target_block;
2745
2746 /* Report the destination tag */
2747 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
2748
2749 /* Count up the number of quadwords we're
2750 * jumping over = number of quadwords until
2751 * (br_block_idx, target_number) */
2752
2753 int quadword_offset = 0;
2754
2755 if (is_discard) {
2756 /* Jump to the end of the shader. We
2757 * need to include not only the
2758 * following blocks, but also the
2759 * contents of our current block (since
2760 * discard can come in the middle of
2761 * the block) */
2762
2763 midgard_block *blk = mir_get_block(ctx, br_block_idx + 1);
2764
2765 for (midgard_bundle *bun = bundle + 1; bun < (midgard_bundle *)((char*) block->bundles.data + block->bundles.size); ++bun) {
2766 quadword_offset += quadword_size(bun->tag);
2767 }
2768
2769 mir_foreach_block_from(ctx, blk, b) {
2770 quadword_offset += b->quadword_count;
2771 }
2772
2773 } else if (target_number > br_block_idx) {
2774 /* Jump forward */
2775
2776 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2777 midgard_block *blk = mir_get_block(ctx, idx);
2778 assert(blk);
2779
2780 quadword_offset += blk->quadword_count;
2781 }
2782 } else {
2783 /* Jump backwards */
2784
2785 for (int idx = br_block_idx; idx >= target_number; --idx) {
2786 midgard_block *blk = mir_get_block(ctx, idx);
2787 assert(blk);
2788
2789 quadword_offset -= blk->quadword_count;
2790 }
2791 }
2792
2793 /* Unconditional extended branches (far jumps)
2794 * have issues, so we always use a conditional
2795 * branch, setting the condition to always for
2796 * unconditional. For compact unconditional
2797 * branches, cond isn't used so it doesn't
2798 * matter what we pick. */
2799
2800 midgard_condition cond =
2801 !is_conditional ? midgard_condition_always :
2802 is_inverted ? midgard_condition_false :
2803 midgard_condition_true;
2804
2805 midgard_jmp_writeout_op op =
2806 is_discard ? midgard_jmp_writeout_op_discard :
2807 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2808 midgard_jmp_writeout_op_branch_cond;
2809
2810 if (!is_compact) {
2811 midgard_branch_extended branch =
2812 midgard_create_branch_extended(
2813 cond, op,
2814 dest_tag,
2815 quadword_offset);
2816
2817 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2818 } else if (is_conditional || is_discard) {
2819 midgard_branch_cond branch = {
2820 .op = op,
2821 .dest_tag = dest_tag,
2822 .offset = quadword_offset,
2823 .cond = cond
2824 };
2825
2826 assert(branch.offset == quadword_offset);
2827
2828 memcpy(&ins->br_compact, &branch, sizeof(branch));
2829 } else {
2830 assert(op == midgard_jmp_writeout_op_branch_uncond);
2831
2832 midgard_branch_uncond branch = {
2833 .op = op,
2834 .dest_tag = dest_tag,
2835 .offset = quadword_offset,
2836 .unknown = 1
2837 };
2838
2839 assert(branch.offset == quadword_offset);
2840
2841 memcpy(&ins->br_compact, &branch, sizeof(branch));
2842 }
2843 }
2844 }
2845
2846 ++br_block_idx;
2847 }
2848
2849 /* Emit flat binary from the instruction arrays. Iterate each block in
2850 * sequence. Save instruction boundaries such that lookahead tags can
2851 * be assigned easily */
2852
2853 /* Cache _all_ bundles in source order for lookahead across failed branches */
2854
2855 int bundle_count = 0;
2856 mir_foreach_block(ctx, block) {
2857 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2858 }
2859 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2860 int bundle_idx = 0;
2861 mir_foreach_block(ctx, block) {
2862 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2863 source_order_bundles[bundle_idx++] = bundle;
2864 }
2865 }
2866
2867 int current_bundle = 0;
2868
2869 /* Midgard prefetches instruction types, so during emission we
2870 * need to lookahead. Unless this is the last instruction, in
2871 * which we return 1. Or if this is the second to last and the
2872 * last is an ALU, then it's also 1... */
2873
2874 mir_foreach_block(ctx, block) {
2875 mir_foreach_bundle_in_block(block, bundle) {
2876 int lookahead = 1;
2877
2878 if (current_bundle + 1 < bundle_count) {
2879 uint8_t next = source_order_bundles[current_bundle + 1]->tag;
2880
2881 if (!(current_bundle + 2 < bundle_count) && IS_ALU(next)) {
2882 lookahead = 1;
2883 } else {
2884 lookahead = next;
2885 }
2886 }
2887
2888 emit_binary_bundle(ctx, bundle, compiled, lookahead);
2889 ++current_bundle;
2890 }
2891
2892 /* TODO: Free deeper */
2893 //util_dynarray_fini(&block->instructions);
2894 }
2895
2896 free(source_order_bundles);
2897
2898 /* Report the very first tag executed */
2899 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
2900
2901 /* Deal with off-by-one related to the fencepost problem */
2902 program->work_register_count = ctx->work_registers + 1;
2903
2904 program->can_discard = ctx->can_discard;
2905 program->uniform_cutoff = ctx->uniform_cutoff;
2906
2907 program->blend_patch_offset = ctx->blend_constant_offset;
2908
2909 if (midgard_debug & MIDGARD_DBG_SHADERS)
2910 disassemble_midgard(program->compiled.data, program->compiled.size);
2911
2912 return 0;
2913 }