panfrost/midgard: Implement integer sampler
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "main/imports.h"
37 #include "compiler/nir/nir_builder.h"
38 #include "util/half_float.h"
39 #include "util/u_math.h"
40 #include "util/u_debug.h"
41 #include "util/u_dynarray.h"
42 #include "util/list.h"
43 #include "main/mtypes.h"
44
45 #include "midgard.h"
46 #include "midgard_nir.h"
47 #include "midgard_compile.h"
48 #include "midgard_ops.h"
49 #include "helpers.h"
50 #include "compiler.h"
51
52 #include "disassemble.h"
53
54 static const struct debug_named_value debug_options[] = {
55 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
56 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
57 DEBUG_NAMED_VALUE_END
58 };
59
60 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
61
62 int midgard_debug = 0;
63
64 #define DBG(fmt, ...) \
65 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
66 fprintf(stderr, "%s:%d: "fmt, \
67 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
68
69 static bool
70 midgard_is_branch_unit(unsigned unit)
71 {
72 return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
73 }
74
75 static void
76 midgard_block_add_successor(midgard_block *block, midgard_block *successor)
77 {
78 block->successors[block->nr_successors++] = successor;
79 assert(block->nr_successors <= ARRAY_SIZE(block->successors));
80 }
81
82 /* Helpers to generate midgard_instruction's using macro magic, since every
83 * driver seems to do it that way */
84
85 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
86
87 #define M_LOAD_STORE(name, rname, uname) \
88 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
89 midgard_instruction i = { \
90 .type = TAG_LOAD_STORE_4, \
91 .ssa_args = { \
92 .rname = ssa, \
93 .uname = -1, \
94 .src1 = -1 \
95 }, \
96 .load_store = { \
97 .op = midgard_op_##name, \
98 .mask = 0xF, \
99 .swizzle = SWIZZLE_XYZW, \
100 .address = address \
101 } \
102 }; \
103 \
104 return i; \
105 }
106
107 #define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
108 #define M_STORE(name) M_LOAD_STORE(name, src0, dest)
109
110 /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
111 * the corresponding Midgard source */
112
113 static midgard_vector_alu_src
114 vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count)
115 {
116 if (!src) return blank_alu_src;
117
118 /* Figure out how many components there are so we can adjust the
119 * swizzle. Specifically we want to broadcast the last channel so
120 * things like ball2/3 work
121 */
122
123 if (broadcast_count) {
124 uint8_t last_component = src->swizzle[broadcast_count - 1];
125
126 for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
127 src->swizzle[c] = last_component;
128 }
129 }
130
131 midgard_vector_alu_src alu_src = {
132 .rep_low = 0,
133 .rep_high = 0,
134 .half = 0, /* TODO */
135 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
136 };
137
138 if (is_int) {
139 /* TODO: sign-extend/zero-extend */
140 alu_src.mod = midgard_int_normal;
141
142 /* These should have been lowered away */
143 assert(!(src->abs || src->negate));
144 } else {
145 alu_src.mod = (src->abs << 0) | (src->negate << 1);
146 }
147
148 return alu_src;
149 }
150
151 /* load/store instructions have both 32-bit and 16-bit variants, depending on
152 * whether we are using vectors composed of highp or mediump. At the moment, we
153 * don't support half-floats -- this requires changes in other parts of the
154 * compiler -- therefore the 16-bit versions are commented out. */
155
156 //M_LOAD(ld_attr_16);
157 M_LOAD(ld_attr_32);
158 //M_LOAD(ld_vary_16);
159 M_LOAD(ld_vary_32);
160 //M_LOAD(ld_uniform_16);
161 M_LOAD(ld_uniform_32);
162 M_LOAD(ld_color_buffer_8);
163 //M_STORE(st_vary_16);
164 M_STORE(st_vary_32);
165 M_STORE(st_cubemap_coords);
166
167 static midgard_instruction
168 v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
169 {
170 midgard_branch_cond branch = {
171 .op = op,
172 .dest_tag = tag,
173 .offset = offset,
174 .cond = cond
175 };
176
177 uint16_t compact;
178 memcpy(&compact, &branch, sizeof(branch));
179
180 midgard_instruction ins = {
181 .type = TAG_ALU_4,
182 .unit = ALU_ENAB_BR_COMPACT,
183 .prepacked_branch = true,
184 .compact_branch = true,
185 .br_compact = compact
186 };
187
188 if (op == midgard_jmp_writeout_op_writeout)
189 ins.writeout = true;
190
191 return ins;
192 }
193
194 static midgard_instruction
195 v_branch(bool conditional, bool invert)
196 {
197 midgard_instruction ins = {
198 .type = TAG_ALU_4,
199 .unit = ALU_ENAB_BRANCH,
200 .compact_branch = true,
201 .branch = {
202 .conditional = conditional,
203 .invert_conditional = invert
204 }
205 };
206
207 return ins;
208 }
209
210 static midgard_branch_extended
211 midgard_create_branch_extended( midgard_condition cond,
212 midgard_jmp_writeout_op op,
213 unsigned dest_tag,
214 signed quadword_offset)
215 {
216 /* For unclear reasons, the condition code is repeated 8 times */
217 uint16_t duplicated_cond =
218 (cond << 14) |
219 (cond << 12) |
220 (cond << 10) |
221 (cond << 8) |
222 (cond << 6) |
223 (cond << 4) |
224 (cond << 2) |
225 (cond << 0);
226
227 midgard_branch_extended branch = {
228 .op = op,
229 .dest_tag = dest_tag,
230 .offset = quadword_offset,
231 .cond = duplicated_cond
232 };
233
234 return branch;
235 }
236
237 static void
238 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
239 {
240 ins->has_constants = true;
241 memcpy(&ins->constants, constants, 16);
242 }
243
244 static int
245 glsl_type_size(const struct glsl_type *type, bool bindless)
246 {
247 return glsl_count_attribute_slots(type, false);
248 }
249
250 /* Lower fdot2 to a vector multiplication followed by channel addition */
251 static void
252 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
253 {
254 if (alu->op != nir_op_fdot2)
255 return;
256
257 b->cursor = nir_before_instr(&alu->instr);
258
259 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
260 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
261
262 nir_ssa_def *product = nir_fmul(b, src0, src1);
263
264 nir_ssa_def *sum = nir_fadd(b,
265 nir_channel(b, product, 0),
266 nir_channel(b, product, 1));
267
268 /* Replace the fdot2 with this sum */
269 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
270 }
271
272 static int
273 midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
274 {
275 switch (instr->intrinsic) {
276 case nir_intrinsic_load_viewport_scale:
277 return PAN_SYSVAL_VIEWPORT_SCALE;
278 case nir_intrinsic_load_viewport_offset:
279 return PAN_SYSVAL_VIEWPORT_OFFSET;
280 default:
281 return -1;
282 }
283 }
284
285 static unsigned
286 nir_dest_index(compiler_context *ctx, nir_dest *dst)
287 {
288 if (dst->is_ssa)
289 return dst->ssa.index;
290 else {
291 assert(!dst->reg.indirect);
292 return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
293 }
294 }
295
296 static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
297 unsigned *dest)
298 {
299 nir_intrinsic_instr *intr;
300 nir_dest *dst = NULL;
301 nir_tex_instr *tex;
302 int sysval = -1;
303
304 switch (instr->type) {
305 case nir_instr_type_intrinsic:
306 intr = nir_instr_as_intrinsic(instr);
307 sysval = midgard_nir_sysval_for_intrinsic(intr);
308 dst = &intr->dest;
309 break;
310 case nir_instr_type_tex:
311 tex = nir_instr_as_tex(instr);
312 if (tex->op != nir_texop_txs)
313 break;
314
315 sysval = PAN_SYSVAL(TEXTURE_SIZE,
316 PAN_TXS_SYSVAL_ID(tex->texture_index,
317 nir_tex_instr_dest_size(tex) -
318 (tex->is_array ? 1 : 0),
319 tex->is_array));
320 dst = &tex->dest;
321 break;
322 default:
323 break;
324 }
325
326 if (dest && dst)
327 *dest = nir_dest_index(ctx, dst);
328
329 return sysval;
330 }
331
332 static void
333 midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
334 {
335 int sysval;
336
337 sysval = sysval_for_instr(ctx, instr, NULL);
338 if (sysval < 0)
339 return;
340
341 /* We have a sysval load; check if it's already been assigned */
342
343 if (_mesa_hash_table_u64_search(ctx->sysval_to_id, sysval))
344 return;
345
346 /* It hasn't -- so assign it now! */
347
348 unsigned id = ctx->sysval_count++;
349 _mesa_hash_table_u64_insert(ctx->sysval_to_id, sysval, (void *) ((uintptr_t) id + 1));
350 ctx->sysvals[id] = sysval;
351 }
352
353 static void
354 midgard_nir_assign_sysvals(compiler_context *ctx, nir_shader *shader)
355 {
356 ctx->sysval_count = 0;
357
358 nir_foreach_function(function, shader) {
359 if (!function->impl) continue;
360
361 nir_foreach_block(block, function->impl) {
362 nir_foreach_instr_safe(instr, block) {
363 midgard_nir_assign_sysval_body(ctx, instr);
364 }
365 }
366 }
367 }
368
369 static bool
370 midgard_nir_lower_fdot2(nir_shader *shader)
371 {
372 bool progress = false;
373
374 nir_foreach_function(function, shader) {
375 if (!function->impl) continue;
376
377 nir_builder _b;
378 nir_builder *b = &_b;
379 nir_builder_init(b, function->impl);
380
381 nir_foreach_block(block, function->impl) {
382 nir_foreach_instr_safe(instr, block) {
383 if (instr->type != nir_instr_type_alu) continue;
384
385 nir_alu_instr *alu = nir_instr_as_alu(instr);
386 midgard_nir_lower_fdot2_body(b, alu);
387
388 progress |= true;
389 }
390 }
391
392 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
393
394 }
395
396 return progress;
397 }
398
399 static void
400 optimise_nir(nir_shader *nir)
401 {
402 bool progress;
403 unsigned lower_flrp =
404 (nir->options->lower_flrp16 ? 16 : 0) |
405 (nir->options->lower_flrp32 ? 32 : 0) |
406 (nir->options->lower_flrp64 ? 64 : 0);
407
408 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
409 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
410 NIR_PASS(progress, nir, nir_lower_idiv);
411
412 nir_lower_tex_options lower_tex_1st_pass_options = {
413 .lower_rect = true,
414 .lower_txp = ~0
415 };
416
417 nir_lower_tex_options lower_tex_2nd_pass_options = {
418 .lower_txs_lod = true,
419 };
420
421 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options);
422 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options);
423
424 do {
425 progress = false;
426
427 NIR_PASS(progress, nir, nir_lower_var_copies);
428 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
429
430 NIR_PASS(progress, nir, nir_copy_prop);
431 NIR_PASS(progress, nir, nir_opt_dce);
432 NIR_PASS(progress, nir, nir_opt_dead_cf);
433 NIR_PASS(progress, nir, nir_opt_cse);
434 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
435 NIR_PASS(progress, nir, nir_opt_algebraic);
436 NIR_PASS(progress, nir, nir_opt_constant_folding);
437
438 if (lower_flrp != 0) {
439 bool lower_flrp_progress = false;
440 NIR_PASS(lower_flrp_progress,
441 nir,
442 nir_lower_flrp,
443 lower_flrp,
444 false /* always_precise */,
445 nir->options->lower_ffma);
446 if (lower_flrp_progress) {
447 NIR_PASS(progress, nir,
448 nir_opt_constant_folding);
449 progress = true;
450 }
451
452 /* Nothing should rematerialize any flrps, so we only
453 * need to do this lowering once.
454 */
455 lower_flrp = 0;
456 }
457
458 NIR_PASS(progress, nir, nir_opt_undef);
459 NIR_PASS(progress, nir, nir_opt_loop_unroll,
460 nir_var_shader_in |
461 nir_var_shader_out |
462 nir_var_function_temp);
463
464 NIR_PASS(progress, nir, nir_opt_vectorize);
465 } while (progress);
466
467 /* Must be run at the end to prevent creation of fsin/fcos ops */
468 NIR_PASS(progress, nir, midgard_nir_scale_trig);
469
470 do {
471 progress = false;
472
473 NIR_PASS(progress, nir, nir_opt_dce);
474 NIR_PASS(progress, nir, nir_opt_algebraic);
475 NIR_PASS(progress, nir, nir_opt_constant_folding);
476 NIR_PASS(progress, nir, nir_copy_prop);
477 } while (progress);
478
479 NIR_PASS(progress, nir, nir_opt_algebraic_late);
480
481 /* We implement booleans as 32-bit 0/~0 */
482 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
483
484 /* Now that booleans are lowered, we can run out late opts */
485 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
486
487 /* Lower mods for float ops only. Integer ops don't support modifiers
488 * (saturate doesn't make sense on integers, neg/abs require dedicated
489 * instructions) */
490
491 NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
492 NIR_PASS(progress, nir, nir_copy_prop);
493 NIR_PASS(progress, nir, nir_opt_dce);
494
495 /* Take us out of SSA */
496 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
497 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
498
499 /* We are a vector architecture; write combine where possible */
500 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
501 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
502
503 NIR_PASS(progress, nir, nir_opt_dce);
504 }
505
506 /* Front-half of aliasing the SSA slots, merely by inserting the flag in the
507 * appropriate hash table. Intentional off-by-one to avoid confusing NULL with
508 * r0. See the comments in compiler_context */
509
510 static void
511 alias_ssa(compiler_context *ctx, int dest, int src)
512 {
513 _mesa_hash_table_u64_insert(ctx->ssa_to_alias, dest + 1, (void *) ((uintptr_t) src + 1));
514 _mesa_set_add(ctx->leftover_ssa_to_alias, (void *) (uintptr_t) (dest + 1));
515 }
516
517 /* ...or undo it, after which the original index will be used (dummy move should be emitted alongside this) */
518
519 static void
520 unalias_ssa(compiler_context *ctx, int dest)
521 {
522 _mesa_hash_table_u64_remove(ctx->ssa_to_alias, dest + 1);
523 /* TODO: Remove from leftover or no? */
524 }
525
526 /* Do not actually emit a load; instead, cache the constant for inlining */
527
528 static void
529 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
530 {
531 nir_ssa_def def = instr->def;
532
533 float *v = rzalloc_array(NULL, float, 4);
534 nir_const_load_to_arr(v, instr, f32);
535 _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
536 }
537
538 static unsigned
539 nir_src_index(compiler_context *ctx, nir_src *src)
540 {
541 if (src->is_ssa)
542 return src->ssa->index;
543 else {
544 assert(!src->reg.indirect);
545 return ctx->func->impl->ssa_alloc + src->reg.reg->index;
546 }
547 }
548
549 static unsigned
550 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
551 {
552 return nir_src_index(ctx, &src->src);
553 }
554
555 static bool
556 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
557 {
558 unsigned comp = src->swizzle[0];
559
560 for (unsigned c = 1; c < nr_components; ++c) {
561 if (src->swizzle[c] != comp)
562 return true;
563 }
564
565 return false;
566 }
567
568 /* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
569 * output of a conditional test) into that register */
570
571 static void
572 emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
573 {
574 int condition = nir_src_index(ctx, src);
575
576 /* Source to swizzle the desired component into w */
577
578 const midgard_vector_alu_src alu_src = {
579 .swizzle = SWIZZLE(component, component, component, component),
580 };
581
582 /* There is no boolean move instruction. Instead, we simulate a move by
583 * ANDing the condition with itself to get it into r31.w */
584
585 midgard_instruction ins = {
586 .type = TAG_ALU_4,
587
588 /* We need to set the conditional as close as possible */
589 .precede_break = true,
590 .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
591
592 .ssa_args = {
593 .src0 = condition,
594 .src1 = condition,
595 .dest = SSA_FIXED_REGISTER(31),
596 },
597
598 .alu = {
599 .op = midgard_alu_op_iand,
600 .outmod = midgard_outmod_int_wrap,
601 .reg_mode = midgard_reg_mode_32,
602 .dest_override = midgard_dest_override_none,
603 .mask = (0x3 << 6), /* w */
604 .src1 = vector_alu_srco_unsigned(alu_src),
605 .src2 = vector_alu_srco_unsigned(alu_src)
606 },
607 };
608
609 emit_mir_instruction(ctx, ins);
610 }
611
612 /* Or, for mixed conditions (with csel_v), here's a vector version using all of
613 * r31 instead */
614
615 static void
616 emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
617 {
618 int condition = nir_src_index(ctx, &src->src);
619
620 /* Source to swizzle the desired component into w */
621
622 const midgard_vector_alu_src alu_src = {
623 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
624 };
625
626 /* There is no boolean move instruction. Instead, we simulate a move by
627 * ANDing the condition with itself to get it into r31.w */
628
629 midgard_instruction ins = {
630 .type = TAG_ALU_4,
631 .precede_break = true,
632 .ssa_args = {
633 .src0 = condition,
634 .src1 = condition,
635 .dest = SSA_FIXED_REGISTER(31),
636 },
637 .alu = {
638 .op = midgard_alu_op_iand,
639 .outmod = midgard_outmod_int_wrap,
640 .reg_mode = midgard_reg_mode_32,
641 .dest_override = midgard_dest_override_none,
642 .mask = expand_writemask(mask_of(nr_comp)),
643 .src1 = vector_alu_srco_unsigned(alu_src),
644 .src2 = vector_alu_srco_unsigned(alu_src)
645 },
646 };
647
648 emit_mir_instruction(ctx, ins);
649 }
650
651
652
653 /* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
654 * pinning to eliminate this move in all known cases */
655
656 static void
657 emit_indirect_offset(compiler_context *ctx, nir_src *src)
658 {
659 int offset = nir_src_index(ctx, src);
660
661 midgard_instruction ins = {
662 .type = TAG_ALU_4,
663 .ssa_args = {
664 .src0 = SSA_UNUSED_1,
665 .src1 = offset,
666 .dest = SSA_FIXED_REGISTER(REGISTER_OFFSET),
667 },
668 .alu = {
669 .op = midgard_alu_op_imov,
670 .outmod = midgard_outmod_int_wrap,
671 .reg_mode = midgard_reg_mode_32,
672 .dest_override = midgard_dest_override_none,
673 .mask = (0x3 << 6), /* w */
674 .src1 = vector_alu_srco_unsigned(zero_alu_src),
675 .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
676 },
677 };
678
679 emit_mir_instruction(ctx, ins);
680 }
681
682 #define ALU_CASE(nir, _op) \
683 case nir_op_##nir: \
684 op = midgard_alu_op_##_op; \
685 break;
686
687 #define ALU_CASE_BCAST(nir, _op, count) \
688 case nir_op_##nir: \
689 op = midgard_alu_op_##_op; \
690 broadcast_swizzle = count; \
691 break;
692 static bool
693 nir_is_fzero_constant(nir_src src)
694 {
695 if (!nir_src_is_const(src))
696 return false;
697
698 for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
699 if (nir_src_comp_as_float(src, c) != 0.0)
700 return false;
701 }
702
703 return true;
704 }
705
706 static void
707 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
708 {
709 bool is_ssa = instr->dest.dest.is_ssa;
710
711 unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
712 unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
713 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
714
715 /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
716 * supported. A few do not and are commented for now. Also, there are a
717 * number of NIR ops which Midgard does not support and need to be
718 * lowered, also TODO. This switch block emits the opcode and calling
719 * convention of the Midgard instruction; actual packing is done in
720 * emit_alu below */
721
722 unsigned op;
723
724 /* Number of components valid to check for the instruction (the rest
725 * will be forced to the last), or 0 to use as-is. Relevant as
726 * ball-type instructions have a channel count in NIR but are all vec4
727 * in Midgard */
728
729 unsigned broadcast_swizzle = 0;
730
731 switch (instr->op) {
732 ALU_CASE(fadd, fadd);
733 ALU_CASE(fmul, fmul);
734 ALU_CASE(fmin, fmin);
735 ALU_CASE(fmax, fmax);
736 ALU_CASE(imin, imin);
737 ALU_CASE(imax, imax);
738 ALU_CASE(umin, umin);
739 ALU_CASE(umax, umax);
740 ALU_CASE(ffloor, ffloor);
741 ALU_CASE(fround_even, froundeven);
742 ALU_CASE(ftrunc, ftrunc);
743 ALU_CASE(fceil, fceil);
744 ALU_CASE(fdot3, fdot3);
745 ALU_CASE(fdot4, fdot4);
746 ALU_CASE(iadd, iadd);
747 ALU_CASE(isub, isub);
748 ALU_CASE(imul, imul);
749
750 /* Zero shoved as second-arg */
751 ALU_CASE(iabs, iabsdiff);
752
753 ALU_CASE(mov, imov);
754
755 ALU_CASE(feq32, feq);
756 ALU_CASE(fne32, fne);
757 ALU_CASE(flt32, flt);
758 ALU_CASE(ieq32, ieq);
759 ALU_CASE(ine32, ine);
760 ALU_CASE(ilt32, ilt);
761 ALU_CASE(ult32, ult);
762
763 /* We don't have a native b2f32 instruction. Instead, like many
764 * GPUs, we exploit booleans as 0/~0 for false/true, and
765 * correspondingly AND
766 * by 1.0 to do the type conversion. For the moment, prime us
767 * to emit:
768 *
769 * iand [whatever], #0
770 *
771 * At the end of emit_alu (as MIR), we'll fix-up the constant
772 */
773
774 ALU_CASE(b2f32, iand);
775 ALU_CASE(b2i32, iand);
776
777 /* Likewise, we don't have a dedicated f2b32 instruction, but
778 * we can do a "not equal to 0.0" test. */
779
780 ALU_CASE(f2b32, fne);
781 ALU_CASE(i2b32, ine);
782
783 ALU_CASE(frcp, frcp);
784 ALU_CASE(frsq, frsqrt);
785 ALU_CASE(fsqrt, fsqrt);
786 ALU_CASE(fexp2, fexp2);
787 ALU_CASE(flog2, flog2);
788
789 ALU_CASE(f2i32, f2i_rtz);
790 ALU_CASE(f2u32, f2u_rtz);
791 ALU_CASE(i2f32, i2f_rtz);
792 ALU_CASE(u2f32, u2f_rtz);
793
794 ALU_CASE(fsin, fsin);
795 ALU_CASE(fcos, fcos);
796
797 /* Second op implicit #0 */
798 ALU_CASE(inot, inor);
799 ALU_CASE(iand, iand);
800 ALU_CASE(ior, ior);
801 ALU_CASE(ixor, ixor);
802 ALU_CASE(ishl, ishl);
803 ALU_CASE(ishr, iasr);
804 ALU_CASE(ushr, ilsr);
805
806 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
807 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
808 ALU_CASE(b32all_fequal4, fball_eq);
809
810 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
811 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
812 ALU_CASE(b32any_fnequal4, fbany_neq);
813
814 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
815 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
816 ALU_CASE(b32all_iequal4, iball_eq);
817
818 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
819 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
820 ALU_CASE(b32any_inequal4, ibany_neq);
821
822 /* Source mods will be shoved in later */
823 ALU_CASE(fabs, fmov);
824 ALU_CASE(fneg, fmov);
825 ALU_CASE(fsat, fmov);
826
827 /* For greater-or-equal, we lower to less-or-equal and flip the
828 * arguments */
829
830 case nir_op_fge:
831 case nir_op_fge32:
832 case nir_op_ige32:
833 case nir_op_uge32: {
834 op =
835 instr->op == nir_op_fge ? midgard_alu_op_fle :
836 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
837 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
838 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
839 0;
840
841 /* Swap via temporary */
842 nir_alu_src temp = instr->src[1];
843 instr->src[1] = instr->src[0];
844 instr->src[0] = temp;
845
846 break;
847 }
848
849 case nir_op_b32csel: {
850 /* Midgard features both fcsel and icsel, depending on
851 * the type of the arguments/output. However, as long
852 * as we're careful we can _always_ use icsel and
853 * _never_ need fcsel, since the latter does additional
854 * floating-point-specific processing whereas the
855 * former just moves bits on the wire. It's not obvious
856 * why these are separate opcodes, save for the ability
857 * to do things like sat/pos/abs/neg for free */
858
859 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
860 op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
861
862 /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
863 nr_inputs = 2;
864
865 /* Emit the condition into r31 */
866
867 if (mixed)
868 emit_condition_mixed(ctx, &instr->src[0], nr_components);
869 else
870 emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
871
872 /* The condition is the first argument; move the other
873 * arguments up one to be a binary instruction for
874 * Midgard */
875
876 memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
877 break;
878 }
879
880 default:
881 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
882 assert(0);
883 return;
884 }
885
886 /* Midgard can perform certain modifiers on output of an ALU op */
887 unsigned outmod;
888
889 if (midgard_is_integer_out_op(op)) {
890 outmod = midgard_outmod_int_wrap;
891 } else {
892 bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
893 outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
894 }
895
896 /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
897
898 if (instr->op == nir_op_fmax) {
899 if (nir_is_fzero_constant(instr->src[0].src)) {
900 op = midgard_alu_op_fmov;
901 nr_inputs = 1;
902 outmod = midgard_outmod_pos;
903 instr->src[0] = instr->src[1];
904 } else if (nir_is_fzero_constant(instr->src[1].src)) {
905 op = midgard_alu_op_fmov;
906 nr_inputs = 1;
907 outmod = midgard_outmod_pos;
908 }
909 }
910
911 /* Fetch unit, quirks, etc information */
912 unsigned opcode_props = alu_opcode_props[op].props;
913 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
914
915 /* src0 will always exist afaik, but src1 will not for 1-argument
916 * instructions. The latter can only be fetched if the instruction
917 * needs it, or else we may segfault. */
918
919 unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
920 unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0;
921
922 /* Rather than use the instruction generation helpers, we do it
923 * ourselves here to avoid the mess */
924
925 midgard_instruction ins = {
926 .type = TAG_ALU_4,
927 .ssa_args = {
928 .src0 = quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
929 .src1 = quirk_flipped_r24 ? src0 : src1,
930 .dest = dest,
931 }
932 };
933
934 nir_alu_src *nirmods[2] = { NULL };
935
936 if (nr_inputs == 2) {
937 nirmods[0] = &instr->src[0];
938 nirmods[1] = &instr->src[1];
939 } else if (nr_inputs == 1) {
940 nirmods[quirk_flipped_r24] = &instr->src[0];
941 } else {
942 assert(0);
943 }
944
945 /* These were lowered to a move, so apply the corresponding mod */
946
947 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
948 nir_alu_src *s = nirmods[quirk_flipped_r24];
949
950 if (instr->op == nir_op_fneg)
951 s->negate = !s->negate;
952
953 if (instr->op == nir_op_fabs)
954 s->abs = !s->abs;
955 }
956
957 bool is_int = midgard_is_integer_op(op);
958
959 midgard_vector_alu alu = {
960 .op = op,
961 .reg_mode = midgard_reg_mode_32,
962 .dest_override = midgard_dest_override_none,
963 .outmod = outmod,
964
965 /* Writemask only valid for non-SSA NIR */
966 .mask = expand_writemask(mask_of(nr_components)),
967
968 .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle)),
969 .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle)),
970 };
971
972 /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
973
974 if (!is_ssa)
975 alu.mask &= expand_writemask(instr->dest.write_mask);
976
977 ins.alu = alu;
978
979 /* Late fixup for emulated instructions */
980
981 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
982 /* Presently, our second argument is an inline #0 constant.
983 * Switch over to an embedded 1.0 constant (that can't fit
984 * inline, since we're 32-bit, not 16-bit like the inline
985 * constants) */
986
987 ins.ssa_args.inline_constant = false;
988 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
989 ins.has_constants = true;
990
991 if (instr->op == nir_op_b2f32) {
992 ins.constants[0] = 1.0f;
993 } else {
994 /* Type pun it into place */
995 uint32_t one = 0x1;
996 memcpy(&ins.constants[0], &one, sizeof(uint32_t));
997 }
998
999 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
1000 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1001 /* Lots of instructions need a 0 plonked in */
1002 ins.ssa_args.inline_constant = false;
1003 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1004 ins.has_constants = true;
1005 ins.constants[0] = 0.0f;
1006 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
1007 } else if (instr->op == nir_op_inot) {
1008 /* ~b = ~(b & b), so duplicate the source */
1009 ins.ssa_args.src1 = ins.ssa_args.src0;
1010 ins.alu.src2 = ins.alu.src1;
1011 }
1012
1013 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1014 /* To avoid duplicating the lookup tables (probably), true LUT
1015 * instructions can only operate as if they were scalars. Lower
1016 * them here by changing the component. */
1017
1018 uint8_t original_swizzle[4];
1019 memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
1020
1021 for (int i = 0; i < nr_components; ++i) {
1022 /* Mask the associated component, dropping the
1023 * instruction if needed */
1024
1025 ins.alu.mask = (0x3) << (2 * i);
1026 ins.alu.mask &= alu.mask;
1027
1028 if (!ins.alu.mask)
1029 continue;
1030
1031 for (int j = 0; j < 4; ++j)
1032 nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
1033
1034 ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle));
1035 emit_mir_instruction(ctx, ins);
1036 }
1037 } else {
1038 emit_mir_instruction(ctx, ins);
1039 }
1040 }
1041
1042 #undef ALU_CASE
1043
1044 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1045 * optimized) versions of UBO #0 */
1046
1047 static void
1048 emit_ubo_read(
1049 compiler_context *ctx,
1050 unsigned dest,
1051 unsigned offset,
1052 nir_src *indirect_offset,
1053 unsigned index)
1054 {
1055 /* TODO: half-floats */
1056
1057 if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) {
1058 /* Fast path: For the first 16 uniforms, direct accesses are
1059 * 0-cycle, since they're just a register fetch in the usual
1060 * case. So, we alias the registers while we're still in
1061 * SSA-space */
1062
1063 int reg_slot = 23 - offset;
1064 alias_ssa(ctx, dest, SSA_FIXED_REGISTER(reg_slot));
1065 } else {
1066 /* Otherwise, read from the 'special' UBO to access
1067 * higher-indexed uniforms, at a performance cost. More
1068 * generally, we're emitting a UBO read instruction. */
1069
1070 midgard_instruction ins = m_ld_uniform_32(dest, offset);
1071
1072 /* TODO: Don't split */
1073 ins.load_store.varying_parameters = (offset & 7) << 7;
1074 ins.load_store.address = offset >> 3;
1075
1076 if (indirect_offset) {
1077 emit_indirect_offset(ctx, indirect_offset);
1078 ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */
1079 } else {
1080 ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */
1081 }
1082
1083 /* TODO respect index */
1084
1085 emit_mir_instruction(ctx, ins);
1086 }
1087 }
1088
1089 static void
1090 emit_varying_read(
1091 compiler_context *ctx,
1092 unsigned dest, unsigned offset,
1093 unsigned nr_comp, unsigned component,
1094 nir_src *indirect_offset)
1095 {
1096 /* XXX: Half-floats? */
1097 /* TODO: swizzle, mask */
1098
1099 midgard_instruction ins = m_ld_vary_32(dest, offset);
1100 ins.load_store.mask = mask_of(nr_comp);
1101 ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
1102
1103 midgard_varying_parameter p = {
1104 .is_varying = 1,
1105 .interpolation = midgard_interp_default,
1106 .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
1107 };
1108
1109 unsigned u;
1110 memcpy(&u, &p, sizeof(p));
1111 ins.load_store.varying_parameters = u;
1112
1113 if (indirect_offset) {
1114 /* We need to add in the dynamic index, moved to r27.w */
1115 emit_indirect_offset(ctx, indirect_offset);
1116 ins.load_store.unknown = 0x79e; /* xxx: what is this? */
1117 } else {
1118 /* Just a direct load */
1119 ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
1120 }
1121
1122 emit_mir_instruction(ctx, ins);
1123 }
1124
1125 static void
1126 emit_sysval_read(compiler_context *ctx, nir_instr *instr)
1127 {
1128 unsigned dest;
1129 /* Figure out which uniform this is */
1130 int sysval = sysval_for_instr(ctx, instr, &dest);
1131 void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
1132
1133 /* Sysvals are prefix uniforms */
1134 unsigned uniform = ((uintptr_t) val) - 1;
1135
1136 /* Emit the read itself -- this is never indirect */
1137 emit_ubo_read(ctx, dest, uniform, NULL, 0);
1138 }
1139
1140 /* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
1141 * using scalar ops functional on earlier Midgard generations. Newer Midgard
1142 * generations have faster vectorized reads. This operation is for blend
1143 * shaders in particular; reading the tilebuffer from the fragment shader
1144 * remains an open problem. */
1145
1146 static void
1147 emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
1148 {
1149 midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
1150 ins.load_store.swizzle = 0; /* xxxx */
1151
1152 /* Read each component sequentially */
1153
1154 for (unsigned c = 0; c < 4; ++c) {
1155 ins.load_store.mask = (1 << c);
1156 ins.load_store.unknown = c;
1157 emit_mir_instruction(ctx, ins);
1158 }
1159
1160 /* vadd.u2f hr2, zext(hr2), #0 */
1161
1162 midgard_vector_alu_src alu_src = blank_alu_src;
1163 alu_src.mod = midgard_int_zero_extend;
1164 alu_src.half = true;
1165
1166 midgard_instruction u2f = {
1167 .type = TAG_ALU_4,
1168 .ssa_args = {
1169 .src0 = reg,
1170 .src1 = SSA_UNUSED_0,
1171 .dest = reg,
1172 .inline_constant = true
1173 },
1174 .alu = {
1175 .op = midgard_alu_op_u2f_rtz,
1176 .reg_mode = midgard_reg_mode_16,
1177 .dest_override = midgard_dest_override_none,
1178 .mask = 0xF,
1179 .src1 = vector_alu_srco_unsigned(alu_src),
1180 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1181 }
1182 };
1183
1184 emit_mir_instruction(ctx, u2f);
1185
1186 /* vmul.fmul.sat r1, hr2, #0.00392151 */
1187
1188 alu_src.mod = 0;
1189
1190 midgard_instruction fmul = {
1191 .type = TAG_ALU_4,
1192 .inline_constant = _mesa_float_to_half(1.0 / 255.0),
1193 .ssa_args = {
1194 .src0 = reg,
1195 .dest = reg,
1196 .src1 = SSA_UNUSED_0,
1197 .inline_constant = true
1198 },
1199 .alu = {
1200 .op = midgard_alu_op_fmul,
1201 .reg_mode = midgard_reg_mode_32,
1202 .dest_override = midgard_dest_override_none,
1203 .outmod = midgard_outmod_sat,
1204 .mask = 0xFF,
1205 .src1 = vector_alu_srco_unsigned(alu_src),
1206 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1207 }
1208 };
1209
1210 emit_mir_instruction(ctx, fmul);
1211 }
1212
1213 static void
1214 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1215 {
1216 unsigned offset = 0, reg;
1217
1218 switch (instr->intrinsic) {
1219 case nir_intrinsic_discard_if:
1220 emit_condition(ctx, &instr->src[0], true, COMPONENT_X);
1221
1222 /* fallthrough */
1223
1224 case nir_intrinsic_discard: {
1225 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1226 struct midgard_instruction discard = v_branch(conditional, false);
1227 discard.branch.target_type = TARGET_DISCARD;
1228 emit_mir_instruction(ctx, discard);
1229
1230 ctx->can_discard = true;
1231 break;
1232 }
1233
1234 case nir_intrinsic_load_uniform:
1235 case nir_intrinsic_load_ubo:
1236 case nir_intrinsic_load_input: {
1237 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1238 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1239
1240 if (!is_ubo) {
1241 offset = nir_intrinsic_base(instr);
1242 }
1243
1244 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1245
1246 nir_src *src_offset = nir_get_io_offset_src(instr);
1247
1248 bool direct = nir_src_is_const(*src_offset);
1249
1250 if (direct)
1251 offset += nir_src_as_uint(*src_offset);
1252
1253 /* We may need to apply a fractional offset */
1254 int component = instr->intrinsic == nir_intrinsic_load_input ?
1255 nir_intrinsic_component(instr) : 0;
1256 reg = nir_dest_index(ctx, &instr->dest);
1257
1258 if (is_uniform && !ctx->is_blend) {
1259 emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0);
1260 } else if (is_ubo) {
1261 nir_src index = instr->src[0];
1262
1263 /* We don't yet support indirect UBOs. For indirect
1264 * block numbers (if that's possible), we don't know
1265 * enough about the hardware yet. For indirect sources,
1266 * we know what we need but we need to add some NIR
1267 * support for lowering correctly with respect to
1268 * 128-bit reads */
1269
1270 assert(nir_src_is_const(index));
1271 assert(nir_src_is_const(*src_offset));
1272
1273 /* TODO: Alignment */
1274 assert((offset & 0xF) == 0);
1275
1276 uint32_t uindex = nir_src_as_uint(index) + 1;
1277 emit_ubo_read(ctx, reg, offset / 16, NULL, uindex);
1278 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1279 emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL);
1280 } else if (ctx->is_blend) {
1281 /* For blend shaders, load the input color, which is
1282 * preloaded to r0 */
1283
1284 midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1285 emit_mir_instruction(ctx, move);
1286 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1287 midgard_instruction ins = m_ld_attr_32(reg, offset);
1288 ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
1289 ins.load_store.mask = mask_of(nr_comp);
1290 emit_mir_instruction(ctx, ins);
1291 } else {
1292 DBG("Unknown load\n");
1293 assert(0);
1294 }
1295
1296 break;
1297 }
1298
1299 case nir_intrinsic_load_output:
1300 assert(nir_src_is_const(instr->src[0]));
1301 reg = nir_dest_index(ctx, &instr->dest);
1302
1303 if (ctx->is_blend) {
1304 /* TODO: MRT */
1305 emit_fb_read_blend_scalar(ctx, reg);
1306 } else {
1307 DBG("Unknown output load\n");
1308 assert(0);
1309 }
1310
1311 break;
1312
1313 case nir_intrinsic_load_blend_const_color_rgba: {
1314 assert(ctx->is_blend);
1315 reg = nir_dest_index(ctx, &instr->dest);
1316
1317 /* Blend constants are embedded directly in the shader and
1318 * patched in, so we use some magic routing */
1319
1320 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
1321 ins.has_constants = true;
1322 ins.has_blend_constant = true;
1323 emit_mir_instruction(ctx, ins);
1324 break;
1325 }
1326
1327 case nir_intrinsic_store_output:
1328 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1329
1330 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1331
1332 reg = nir_src_index(ctx, &instr->src[0]);
1333
1334 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1335 /* gl_FragColor is not emitted with load/store
1336 * instructions. Instead, it gets plonked into
1337 * r0 at the end of the shader and we do the
1338 * framebuffer writeout dance. TODO: Defer
1339 * writes */
1340
1341 midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1342 emit_mir_instruction(ctx, move);
1343
1344 /* Save the index we're writing to for later reference
1345 * in the epilogue */
1346
1347 ctx->fragment_output = reg;
1348 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1349 /* Varyings are written into one of two special
1350 * varying register, r26 or r27. The register itself is
1351 * selected as the register in the st_vary instruction,
1352 * minus the base of 26. E.g. write into r27 and then
1353 * call st_vary(1) */
1354
1355 midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
1356 emit_mir_instruction(ctx, ins);
1357
1358 /* We should have been vectorized, though we don't
1359 * currently check that st_vary is emitted only once
1360 * per slot (this is relevant, since there's not a mask
1361 * parameter available on the store [set to 0 by the
1362 * blob]). We do respect the component by adjusting the
1363 * swizzle. */
1364
1365 unsigned component = nir_intrinsic_component(instr);
1366
1367 midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
1368 st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
1369 st.load_store.swizzle = SWIZZLE_XYZW << (2*component);
1370 emit_mir_instruction(ctx, st);
1371 } else {
1372 DBG("Unknown store\n");
1373 assert(0);
1374 }
1375
1376 break;
1377
1378 case nir_intrinsic_load_alpha_ref_float:
1379 assert(instr->dest.is_ssa);
1380
1381 float ref_value = ctx->alpha_ref;
1382
1383 float *v = ralloc_array(NULL, float, 4);
1384 memcpy(v, &ref_value, sizeof(float));
1385 _mesa_hash_table_u64_insert(ctx->ssa_constants, instr->dest.ssa.index + 1, v);
1386 break;
1387
1388 case nir_intrinsic_load_viewport_scale:
1389 case nir_intrinsic_load_viewport_offset:
1390 emit_sysval_read(ctx, &instr->instr);
1391 break;
1392
1393 default:
1394 printf ("Unhandled intrinsic\n");
1395 assert(0);
1396 break;
1397 }
1398 }
1399
1400 static unsigned
1401 midgard_tex_format(enum glsl_sampler_dim dim)
1402 {
1403 switch (dim) {
1404 case GLSL_SAMPLER_DIM_1D:
1405 case GLSL_SAMPLER_DIM_BUF:
1406 return MALI_TEX_1D;
1407
1408 case GLSL_SAMPLER_DIM_2D:
1409 case GLSL_SAMPLER_DIM_EXTERNAL:
1410 return MALI_TEX_2D;
1411
1412 case GLSL_SAMPLER_DIM_3D:
1413 return MALI_TEX_3D;
1414
1415 case GLSL_SAMPLER_DIM_CUBE:
1416 return MALI_TEX_CUBE;
1417
1418 default:
1419 DBG("Unknown sampler dim type\n");
1420 assert(0);
1421 return 0;
1422 }
1423 }
1424
1425 /* Tries to attach an explicit LOD / bias as a constant. Returns whether this
1426 * was successful */
1427
1428 static bool
1429 pan_attach_constant_bias(
1430 compiler_context *ctx,
1431 nir_src lod,
1432 midgard_texture_word *word)
1433 {
1434 /* To attach as constant, it has to *be* constant */
1435
1436 if (!nir_src_is_const(lod))
1437 return false;
1438
1439 float f = nir_src_as_float(lod);
1440
1441 /* Break into fixed-point */
1442 signed lod_int = f;
1443 float lod_frac = f - lod_int;
1444
1445 /* Carry over negative fractions */
1446 if (lod_frac < 0.0) {
1447 lod_int--;
1448 lod_frac += 1.0;
1449 }
1450
1451 /* Encode */
1452 word->bias = float_to_ubyte(lod_frac);
1453 word->bias_int = lod_int;
1454
1455 return true;
1456 }
1457
1458 static enum mali_sampler_type
1459 midgard_sampler_type(nir_alu_type t)
1460 {
1461 switch (nir_alu_type_get_base_type(t)) {
1462 case nir_type_float:
1463 return MALI_SAMPLER_FLOAT;
1464 case nir_type_int:
1465 return MALI_SAMPLER_SIGNED;
1466 case nir_type_uint:
1467 return MALI_SAMPLER_UNSIGNED;
1468 default:
1469 unreachable("Unknown sampler type");
1470 }
1471 }
1472
1473 static void
1474 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
1475 unsigned midgard_texop)
1476 {
1477 /* TODO */
1478 //assert (!instr->sampler);
1479 //assert (!instr->texture_array_size);
1480
1481 /* Allocate registers via a round robin scheme to alternate between the two registers */
1482 int reg = ctx->texture_op_count & 1;
1483 int in_reg = reg, out_reg = reg;
1484
1485 /* Make room for the reg */
1486
1487 if (ctx->texture_index[reg] > -1)
1488 unalias_ssa(ctx, ctx->texture_index[reg]);
1489
1490 int texture_index = instr->texture_index;
1491 int sampler_index = texture_index;
1492
1493 /* No helper to build texture words -- we do it all here */
1494 midgard_instruction ins = {
1495 .type = TAG_TEXTURE_4,
1496 .texture = {
1497 .op = midgard_texop,
1498 .format = midgard_tex_format(instr->sampler_dim),
1499 .texture_handle = texture_index,
1500 .sampler_handle = sampler_index,
1501
1502 /* TODO: Regalloc it in */
1503 .swizzle = SWIZZLE_XYZW,
1504 .mask = 0xF,
1505
1506 /* TODO: half */
1507 .in_reg_full = 1,
1508 .out_full = 1,
1509
1510 .sampler_type = midgard_sampler_type(instr->dest_type),
1511 }
1512 };
1513
1514 for (unsigned i = 0; i < instr->num_srcs; ++i) {
1515 int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
1516 int index = nir_src_index(ctx, &instr->src[i].src);
1517 int nr_comp = nir_src_num_components(instr->src[i].src);
1518 midgard_vector_alu_src alu_src = blank_alu_src;
1519
1520 switch (instr->src[i].src_type) {
1521 case nir_tex_src_coord: {
1522 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1523 /* texelFetch is undefined on samplerCube */
1524 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
1525
1526 /* For cubemaps, we need to load coords into
1527 * special r27, and then use a special ld/st op
1528 * to select the face and copy the xy into the
1529 * texture register */
1530
1531 alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
1532
1533 midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27));
1534 emit_mir_instruction(ctx, move);
1535
1536 midgard_instruction st = m_st_cubemap_coords(reg, 0);
1537 st.load_store.unknown = 0x24; /* XXX: What is this? */
1538 st.load_store.mask = 0x3; /* xy */
1539 st.load_store.swizzle = alu_src.swizzle;
1540 emit_mir_instruction(ctx, st);
1541
1542 ins.texture.in_reg_swizzle = swizzle_of(2);
1543 } else {
1544 ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
1545
1546 midgard_instruction mov = v_mov(index, alu_src, reg);
1547 mov.alu.mask = expand_writemask(mask_of(nr_comp));
1548 emit_mir_instruction(ctx, mov);
1549
1550 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
1551 /* Texel fetch opcodes care about the
1552 * values of z and w, so we actually
1553 * need to spill into a second register
1554 * for a texel fetch with register bias
1555 * (for non-2D). TODO: Implement that
1556 */
1557
1558 assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
1559
1560 midgard_instruction zero = v_mov(index, alu_src, reg);
1561 zero.ssa_args.inline_constant = true;
1562 zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1563 zero.has_constants = true;
1564 zero.alu.mask = ~mov.alu.mask;
1565 emit_mir_instruction(ctx, zero);
1566
1567 ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
1568 } else {
1569 /* Non-texel fetch doesn't need that
1570 * nonsense. However we do use the Z
1571 * for array indexing */
1572 ins.texture.in_reg_swizzle = SWIZZLE_XYXZ;
1573 }
1574 }
1575
1576 break;
1577 }
1578
1579 case nir_tex_src_bias:
1580 case nir_tex_src_lod: {
1581 /* Try as a constant if we can */
1582
1583 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
1584 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
1585 break;
1586
1587 /* Otherwise we use a register. To keep RA simple, we
1588 * put the bias/LOD into the w component of the input
1589 * source, which is otherwise in xy */
1590
1591 alu_src.swizzle = SWIZZLE_XXXX;
1592
1593 midgard_instruction mov = v_mov(index, alu_src, reg);
1594 mov.alu.mask = expand_writemask(1 << COMPONENT_W);
1595 emit_mir_instruction(ctx, mov);
1596
1597 ins.texture.lod_register = true;
1598
1599 midgard_tex_register_select sel = {
1600 .select = in_reg,
1601 .full = 1,
1602
1603 /* w */
1604 .component_lo = 1,
1605 .component_hi = 1
1606 };
1607
1608 uint8_t packed;
1609 memcpy(&packed, &sel, sizeof(packed));
1610 ins.texture.bias = packed;
1611
1612 break;
1613 };
1614
1615 default:
1616 unreachable("Unknown texture source type\n");
1617 }
1618 }
1619
1620 /* Set registers to read and write from the same place */
1621 ins.texture.in_reg_select = in_reg;
1622 ins.texture.out_reg_select = out_reg;
1623
1624 emit_mir_instruction(ctx, ins);
1625
1626 /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
1627
1628 int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
1629 alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
1630 ctx->texture_index[reg] = o_index;
1631
1632 midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
1633 emit_mir_instruction(ctx, ins2);
1634
1635 /* Used for .cont and .last hinting */
1636 ctx->texture_op_count++;
1637 }
1638
1639 static void
1640 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
1641 {
1642 /* Fixup op, since only textureLod is permitted in VS but NIR can give
1643 * generic tex in some cases (which confuses the hardware) */
1644
1645 bool is_vertex = ctx->stage == MESA_SHADER_VERTEX;
1646
1647 if (is_vertex && instr->op == nir_texop_tex)
1648 instr->op = nir_texop_txl;
1649
1650 switch (instr->op) {
1651 case nir_texop_tex:
1652 case nir_texop_txb:
1653 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
1654 break;
1655 case nir_texop_txl:
1656 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
1657 break;
1658 case nir_texop_txf:
1659 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
1660 break;
1661 case nir_texop_txs:
1662 emit_sysval_read(ctx, &instr->instr);
1663 break;
1664 default:
1665 unreachable("Unhanlded texture op");
1666 }
1667 }
1668
1669 static void
1670 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
1671 {
1672 switch (instr->type) {
1673 case nir_jump_break: {
1674 /* Emit a branch out of the loop */
1675 struct midgard_instruction br = v_branch(false, false);
1676 br.branch.target_type = TARGET_BREAK;
1677 br.branch.target_break = ctx->current_loop_depth;
1678 emit_mir_instruction(ctx, br);
1679
1680 DBG("break..\n");
1681 break;
1682 }
1683
1684 default:
1685 DBG("Unknown jump type %d\n", instr->type);
1686 break;
1687 }
1688 }
1689
1690 static void
1691 emit_instr(compiler_context *ctx, struct nir_instr *instr)
1692 {
1693 switch (instr->type) {
1694 case nir_instr_type_load_const:
1695 emit_load_const(ctx, nir_instr_as_load_const(instr));
1696 break;
1697
1698 case nir_instr_type_intrinsic:
1699 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1700 break;
1701
1702 case nir_instr_type_alu:
1703 emit_alu(ctx, nir_instr_as_alu(instr));
1704 break;
1705
1706 case nir_instr_type_tex:
1707 emit_tex(ctx, nir_instr_as_tex(instr));
1708 break;
1709
1710 case nir_instr_type_jump:
1711 emit_jump(ctx, nir_instr_as_jump(instr));
1712 break;
1713
1714 case nir_instr_type_ssa_undef:
1715 /* Spurious */
1716 break;
1717
1718 default:
1719 DBG("Unhandled instruction type\n");
1720 break;
1721 }
1722 }
1723
1724
1725 /* ALU instructions can inline or embed constants, which decreases register
1726 * pressure and saves space. */
1727
1728 #define CONDITIONAL_ATTACH(src) { \
1729 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src + 1); \
1730 \
1731 if (entry) { \
1732 attach_constants(ctx, alu, entry, alu->ssa_args.src + 1); \
1733 alu->ssa_args.src = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
1734 } \
1735 }
1736
1737 static void
1738 inline_alu_constants(compiler_context *ctx)
1739 {
1740 mir_foreach_instr(ctx, alu) {
1741 /* Other instructions cannot inline constants */
1742 if (alu->type != TAG_ALU_4) continue;
1743
1744 /* If there is already a constant here, we can do nothing */
1745 if (alu->has_constants) continue;
1746
1747 /* It makes no sense to inline constants on a branch */
1748 if (alu->compact_branch || alu->prepacked_branch) continue;
1749
1750 CONDITIONAL_ATTACH(src0);
1751
1752 if (!alu->has_constants) {
1753 CONDITIONAL_ATTACH(src1)
1754 } else if (!alu->inline_constant) {
1755 /* Corner case: _two_ vec4 constants, for instance with a
1756 * csel. For this case, we can only use a constant
1757 * register for one, we'll have to emit a move for the
1758 * other. Note, if both arguments are constants, then
1759 * necessarily neither argument depends on the value of
1760 * any particular register. As the destination register
1761 * will be wiped, that means we can spill the constant
1762 * to the destination register.
1763 */
1764
1765 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src1 + 1);
1766 unsigned scratch = alu->ssa_args.dest;
1767
1768 if (entry) {
1769 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
1770 attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
1771
1772 /* Force a break XXX Defer r31 writes */
1773 ins.unit = UNIT_VLUT;
1774
1775 /* Set the source */
1776 alu->ssa_args.src1 = scratch;
1777
1778 /* Inject us -before- the last instruction which set r31 */
1779 mir_insert_instruction_before(mir_prev_op(alu), ins);
1780 }
1781 }
1782 }
1783 }
1784
1785 /* Midgard supports two types of constants, embedded constants (128-bit) and
1786 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
1787 * constants can be demoted to inline constants, for space savings and
1788 * sometimes a performance boost */
1789
1790 static void
1791 embedded_to_inline_constant(compiler_context *ctx)
1792 {
1793 mir_foreach_instr(ctx, ins) {
1794 if (!ins->has_constants) continue;
1795
1796 if (ins->ssa_args.inline_constant) continue;
1797
1798 /* Blend constants must not be inlined by definition */
1799 if (ins->has_blend_constant) continue;
1800
1801 /* src1 cannot be an inline constant due to encoding
1802 * restrictions. So, if possible we try to flip the arguments
1803 * in that case */
1804
1805 int op = ins->alu.op;
1806
1807 if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1808 switch (op) {
1809 /* These ops require an operational change to flip
1810 * their arguments TODO */
1811 case midgard_alu_op_flt:
1812 case midgard_alu_op_fle:
1813 case midgard_alu_op_ilt:
1814 case midgard_alu_op_ile:
1815 case midgard_alu_op_fcsel:
1816 case midgard_alu_op_icsel:
1817 DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
1818 default:
1819 break;
1820 }
1821
1822 if (alu_opcode_props[op].props & OP_COMMUTES) {
1823 /* Flip the SSA numbers */
1824 ins->ssa_args.src0 = ins->ssa_args.src1;
1825 ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1826
1827 /* And flip the modifiers */
1828
1829 unsigned src_temp;
1830
1831 src_temp = ins->alu.src2;
1832 ins->alu.src2 = ins->alu.src1;
1833 ins->alu.src1 = src_temp;
1834 }
1835 }
1836
1837 if (ins->ssa_args.src1 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1838 /* Extract the source information */
1839
1840 midgard_vector_alu_src *src;
1841 int q = ins->alu.src2;
1842 midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
1843 src = m;
1844
1845 /* Component is from the swizzle, e.g. r26.w -> w component. TODO: What if x is masked out? */
1846 int component = src->swizzle & 3;
1847
1848 /* Scale constant appropriately, if we can legally */
1849 uint16_t scaled_constant = 0;
1850
1851 if (midgard_is_integer_op(op)) {
1852 unsigned int *iconstants = (unsigned int *) ins->constants;
1853 scaled_constant = (uint16_t) iconstants[component];
1854
1855 /* Constant overflow after resize */
1856 if (scaled_constant != iconstants[component])
1857 continue;
1858 } else {
1859 float original = (float) ins->constants[component];
1860 scaled_constant = _mesa_float_to_half(original);
1861
1862 /* Check for loss of precision. If this is
1863 * mediump, we don't care, but for a highp
1864 * shader, we need to pay attention. NIR
1865 * doesn't yet tell us which mode we're in!
1866 * Practically this prevents most constants
1867 * from being inlined, sadly. */
1868
1869 float fp32 = _mesa_half_to_float(scaled_constant);
1870
1871 if (fp32 != original)
1872 continue;
1873 }
1874
1875 /* We don't know how to handle these with a constant */
1876
1877 if (src->mod || src->half || src->rep_low || src->rep_high) {
1878 DBG("Bailing inline constant...\n");
1879 continue;
1880 }
1881
1882 /* Make sure that the constant is not itself a
1883 * vector by checking if all accessed values
1884 * (by the swizzle) are the same. */
1885
1886 uint32_t *cons = (uint32_t *) ins->constants;
1887 uint32_t value = cons[component];
1888
1889 bool is_vector = false;
1890 unsigned mask = effective_writemask(&ins->alu);
1891
1892 for (int c = 1; c < 4; ++c) {
1893 /* We only care if this component is actually used */
1894 if (!(mask & (1 << c)))
1895 continue;
1896
1897 uint32_t test = cons[(src->swizzle >> (2 * c)) & 3];
1898
1899 if (test != value) {
1900 is_vector = true;
1901 break;
1902 }
1903 }
1904
1905 if (is_vector)
1906 continue;
1907
1908 /* Get rid of the embedded constant */
1909 ins->has_constants = false;
1910 ins->ssa_args.src1 = SSA_UNUSED_0;
1911 ins->ssa_args.inline_constant = true;
1912 ins->inline_constant = scaled_constant;
1913 }
1914 }
1915 }
1916
1917 /* Map normal SSA sources to other SSA sources / fixed registers (like
1918 * uniforms) */
1919
1920 static void
1921 map_ssa_to_alias(compiler_context *ctx, int *ref)
1922 {
1923 /* Sign is used quite deliberately for unused */
1924 if (*ref < 0)
1925 return;
1926
1927 unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
1928
1929 if (alias) {
1930 /* Remove entry in leftovers to avoid a redunant fmov */
1931
1932 struct set_entry *leftover = _mesa_set_search(ctx->leftover_ssa_to_alias, ((void *) (uintptr_t) (*ref + 1)));
1933
1934 if (leftover)
1935 _mesa_set_remove(ctx->leftover_ssa_to_alias, leftover);
1936
1937 /* Assign the alias map */
1938 *ref = alias - 1;
1939 return;
1940 }
1941 }
1942
1943 /* Basic dead code elimination on the MIR itself, which cleans up e.g. the
1944 * texture pipeline */
1945
1946 static bool
1947 midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
1948 {
1949 bool progress = false;
1950
1951 mir_foreach_instr_in_block_safe(block, ins) {
1952 if (ins->type != TAG_ALU_4) continue;
1953 if (ins->compact_branch) continue;
1954
1955 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
1956 if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
1957
1958 mir_remove_instruction(ins);
1959 progress = true;
1960 }
1961
1962 return progress;
1963 }
1964
1965 /* Dead code elimination for branches at the end of a block - only one branch
1966 * per block is legal semantically */
1967
1968 static void
1969 midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
1970 {
1971 bool branched = false;
1972
1973 mir_foreach_instr_in_block_safe(block, ins) {
1974 if (!midgard_is_branch_unit(ins->unit)) continue;
1975
1976 /* We ignore prepacked branches since the fragment epilogue is
1977 * just generally special */
1978 if (ins->prepacked_branch) continue;
1979
1980 /* Discards are similarly special and may not correspond to the
1981 * end of a block */
1982
1983 if (ins->branch.target_type == TARGET_DISCARD) continue;
1984
1985 if (branched) {
1986 /* We already branched, so this is dead */
1987 mir_remove_instruction(ins);
1988 }
1989
1990 branched = true;
1991 }
1992 }
1993
1994 static bool
1995 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
1996 {
1997 /* abs or neg */
1998 if (!is_int && src.mod) return true;
1999
2000 /* swizzle */
2001 for (unsigned c = 0; c < 4; ++c) {
2002 if (!(mask & (1 << c))) continue;
2003 if (((src.swizzle >> (2*c)) & 3) != c) return true;
2004 }
2005
2006 return false;
2007 }
2008
2009 static bool
2010 mir_nontrivial_source2_mod(midgard_instruction *ins)
2011 {
2012 unsigned mask = squeeze_writemask(ins->alu.mask);
2013 bool is_int = midgard_is_integer_op(ins->alu.op);
2014
2015 midgard_vector_alu_src src2 =
2016 vector_alu_from_unsigned(ins->alu.src2);
2017
2018 return mir_nontrivial_mod(src2, is_int, mask);
2019 }
2020
2021 static bool
2022 mir_nontrivial_outmod(midgard_instruction *ins)
2023 {
2024 bool is_int = midgard_is_integer_op(ins->alu.op);
2025 unsigned mod = ins->alu.outmod;
2026
2027 if (is_int)
2028 return mod != midgard_outmod_int_wrap;
2029 else
2030 return mod != midgard_outmod_none;
2031 }
2032
2033 static bool
2034 midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
2035 {
2036 bool progress = false;
2037
2038 mir_foreach_instr_in_block_safe(block, ins) {
2039 if (ins->type != TAG_ALU_4) continue;
2040 if (!OP_IS_MOVE(ins->alu.op)) continue;
2041
2042 unsigned from = ins->ssa_args.src1;
2043 unsigned to = ins->ssa_args.dest;
2044
2045 /* We only work on pure SSA */
2046
2047 if (to >= SSA_FIXED_MINIMUM) continue;
2048 if (from >= SSA_FIXED_MINIMUM) continue;
2049 if (to >= ctx->func->impl->ssa_alloc) continue;
2050 if (from >= ctx->func->impl->ssa_alloc) continue;
2051
2052 /* Constant propagation is not handled here, either */
2053 if (ins->ssa_args.inline_constant) continue;
2054 if (ins->has_constants) continue;
2055
2056 if (mir_nontrivial_source2_mod(ins)) continue;
2057 if (mir_nontrivial_outmod(ins)) continue;
2058
2059 /* We're clear -- rewrite */
2060 mir_rewrite_index_src(ctx, to, from);
2061 mir_remove_instruction(ins);
2062 progress |= true;
2063 }
2064
2065 return progress;
2066 }
2067
2068 /* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
2069 * the move can be propagated away entirely */
2070
2071 static bool
2072 mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
2073 {
2074 /* Nothing to do */
2075 if (comp == midgard_outmod_none)
2076 return true;
2077
2078 if (*outmod == midgard_outmod_none) {
2079 *outmod = comp;
2080 return true;
2081 }
2082
2083 /* TODO: Compose rules */
2084 return false;
2085 }
2086
2087 static bool
2088 midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
2089 {
2090 bool progress = false;
2091
2092 mir_foreach_instr_in_block_safe(block, ins) {
2093 if (ins->type != TAG_ALU_4) continue;
2094 if (ins->alu.op != midgard_alu_op_fmov) continue;
2095 if (ins->alu.outmod != midgard_outmod_pos) continue;
2096
2097 /* TODO: Registers? */
2098 unsigned src = ins->ssa_args.src1;
2099 if (src >= ctx->func->impl->ssa_alloc) continue;
2100 assert(!mir_has_multiple_writes(ctx, src));
2101
2102 /* There might be a source modifier, too */
2103 if (mir_nontrivial_source2_mod(ins)) continue;
2104
2105 /* Backpropagate the modifier */
2106 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
2107 if (v->type != TAG_ALU_4) continue;
2108 if (v->ssa_args.dest != src) continue;
2109
2110 /* Can we even take a float outmod? */
2111 if (midgard_is_integer_out_op(v->alu.op)) continue;
2112
2113 midgard_outmod_float temp = v->alu.outmod;
2114 progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
2115
2116 /* Throw in the towel.. */
2117 if (!progress) break;
2118
2119 /* Otherwise, transfer the modifier */
2120 v->alu.outmod = temp;
2121 ins->alu.outmod = midgard_outmod_none;
2122
2123 break;
2124 }
2125 }
2126
2127 return progress;
2128 }
2129
2130 static bool
2131 midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
2132 {
2133 bool progress = false;
2134
2135 mir_foreach_instr_in_block_safe(block, ins) {
2136 if (ins->type != TAG_ALU_4) continue;
2137 if (!OP_IS_MOVE(ins->alu.op)) continue;
2138
2139 unsigned from = ins->ssa_args.src1;
2140 unsigned to = ins->ssa_args.dest;
2141
2142 /* Make sure it's simple enough for us to handle */
2143
2144 if (from >= SSA_FIXED_MINIMUM) continue;
2145 if (from >= ctx->func->impl->ssa_alloc) continue;
2146 if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
2147 if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
2148
2149 bool eliminated = false;
2150
2151 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
2152 /* The texture registers are not SSA so be careful.
2153 * Conservatively, just stop if we hit a texture op
2154 * (even if it may not write) to where we are */
2155
2156 if (v->type != TAG_ALU_4)
2157 break;
2158
2159 if (v->ssa_args.dest == from) {
2160 /* We don't want to track partial writes ... */
2161 if (v->alu.mask == 0xF) {
2162 v->ssa_args.dest = to;
2163 eliminated = true;
2164 }
2165
2166 break;
2167 }
2168 }
2169
2170 if (eliminated)
2171 mir_remove_instruction(ins);
2172
2173 progress |= eliminated;
2174 }
2175
2176 return progress;
2177 }
2178
2179 /* The following passes reorder MIR instructions to enable better scheduling */
2180
2181 static void
2182 midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
2183 {
2184 mir_foreach_instr_in_block_safe(block, ins) {
2185 if (ins->type != TAG_LOAD_STORE_4) continue;
2186
2187 /* We've found a load/store op. Check if next is also load/store. */
2188 midgard_instruction *next_op = mir_next_op(ins);
2189 if (&next_op->link != &block->instructions) {
2190 if (next_op->type == TAG_LOAD_STORE_4) {
2191 /* If so, we're done since we're a pair */
2192 ins = mir_next_op(ins);
2193 continue;
2194 }
2195
2196 /* Maximum search distance to pair, to avoid register pressure disasters */
2197 int search_distance = 8;
2198
2199 /* Otherwise, we have an orphaned load/store -- search for another load */
2200 mir_foreach_instr_in_block_from(block, c, mir_next_op(ins)) {
2201 /* Terminate search if necessary */
2202 if (!(search_distance--)) break;
2203
2204 if (c->type != TAG_LOAD_STORE_4) continue;
2205
2206 /* Stores cannot be reordered, since they have
2207 * dependencies. For the same reason, indirect
2208 * loads cannot be reordered as their index is
2209 * loaded in r27.w */
2210
2211 if (OP_IS_STORE(c->load_store.op)) continue;
2212
2213 /* It appears the 0x800 bit is set whenever a
2214 * load is direct, unset when it is indirect.
2215 * Skip indirect loads. */
2216
2217 if (!(c->load_store.unknown & 0x800)) continue;
2218
2219 /* We found one! Move it up to pair and remove it from the old location */
2220
2221 mir_insert_instruction_before(ins, *c);
2222 mir_remove_instruction(c);
2223
2224 break;
2225 }
2226 }
2227 }
2228 }
2229
2230 /* If there are leftovers after the below pass, emit actual fmov
2231 * instructions for the slow-but-correct path */
2232
2233 static void
2234 emit_leftover_move(compiler_context *ctx)
2235 {
2236 set_foreach(ctx->leftover_ssa_to_alias, leftover) {
2237 int base = ((uintptr_t) leftover->key) - 1;
2238 int mapped = base;
2239
2240 map_ssa_to_alias(ctx, &mapped);
2241 EMIT(mov, mapped, blank_alu_src, base);
2242 }
2243 }
2244
2245 static void
2246 actualise_ssa_to_alias(compiler_context *ctx)
2247 {
2248 mir_foreach_instr(ctx, ins) {
2249 map_ssa_to_alias(ctx, &ins->ssa_args.src0);
2250 map_ssa_to_alias(ctx, &ins->ssa_args.src1);
2251 }
2252
2253 emit_leftover_move(ctx);
2254 }
2255
2256 static void
2257 emit_fragment_epilogue(compiler_context *ctx)
2258 {
2259 /* Special case: writing out constants requires us to include the move
2260 * explicitly now, so shove it into r0 */
2261
2262 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
2263
2264 if (constant_value) {
2265 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
2266 attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
2267 emit_mir_instruction(ctx, ins);
2268 }
2269
2270 /* Perform the actual fragment writeout. We have two writeout/branch
2271 * instructions, forming a loop until writeout is successful as per the
2272 * docs. TODO: gl_FragDepth */
2273
2274 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2275 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2276 }
2277
2278 /* For the blend epilogue, we need to convert the blended fragment vec4 (stored
2279 * in r0) to a RGBA8888 value by scaling and type converting. We then output it
2280 * with the int8 analogue to the fragment epilogue */
2281
2282 static void
2283 emit_blend_epilogue(compiler_context *ctx)
2284 {
2285 /* vmul.fmul.none.fulllow hr48, r0, #255 */
2286
2287 midgard_instruction scale = {
2288 .type = TAG_ALU_4,
2289 .unit = UNIT_VMUL,
2290 .inline_constant = _mesa_float_to_half(255.0),
2291 .ssa_args = {
2292 .src0 = SSA_FIXED_REGISTER(0),
2293 .src1 = SSA_UNUSED_0,
2294 .dest = SSA_FIXED_REGISTER(24),
2295 .inline_constant = true
2296 },
2297 .alu = {
2298 .op = midgard_alu_op_fmul,
2299 .reg_mode = midgard_reg_mode_32,
2300 .dest_override = midgard_dest_override_lower,
2301 .mask = 0xFF,
2302 .src1 = vector_alu_srco_unsigned(blank_alu_src),
2303 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2304 }
2305 };
2306
2307 emit_mir_instruction(ctx, scale);
2308
2309 /* vadd.f2u_rte.pos.low hr0, hr48, #0 */
2310
2311 midgard_vector_alu_src alu_src = blank_alu_src;
2312 alu_src.half = true;
2313
2314 midgard_instruction f2u_rte = {
2315 .type = TAG_ALU_4,
2316 .ssa_args = {
2317 .src0 = SSA_FIXED_REGISTER(24),
2318 .src1 = SSA_UNUSED_0,
2319 .dest = SSA_FIXED_REGISTER(0),
2320 .inline_constant = true
2321 },
2322 .alu = {
2323 .op = midgard_alu_op_f2u_rte,
2324 .reg_mode = midgard_reg_mode_16,
2325 .dest_override = midgard_dest_override_lower,
2326 .outmod = midgard_outmod_pos,
2327 .mask = 0xF,
2328 .src1 = vector_alu_srco_unsigned(alu_src),
2329 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2330 }
2331 };
2332
2333 emit_mir_instruction(ctx, f2u_rte);
2334
2335 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2336 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2337 }
2338
2339 static midgard_block *
2340 emit_block(compiler_context *ctx, nir_block *block)
2341 {
2342 midgard_block *this_block = calloc(sizeof(midgard_block), 1);
2343 list_addtail(&this_block->link, &ctx->blocks);
2344
2345 this_block->is_scheduled = false;
2346 ++ctx->block_count;
2347
2348 ctx->texture_index[0] = -1;
2349 ctx->texture_index[1] = -1;
2350
2351 /* Add us as a successor to the block we are following */
2352 if (ctx->current_block)
2353 midgard_block_add_successor(ctx->current_block, this_block);
2354
2355 /* Set up current block */
2356 list_inithead(&this_block->instructions);
2357 ctx->current_block = this_block;
2358
2359 nir_foreach_instr(instr, block) {
2360 emit_instr(ctx, instr);
2361 ++ctx->instruction_count;
2362 }
2363
2364 inline_alu_constants(ctx);
2365 embedded_to_inline_constant(ctx);
2366
2367 /* Perform heavylifting for aliasing */
2368 actualise_ssa_to_alias(ctx);
2369
2370 midgard_pair_load_store(ctx, this_block);
2371
2372 /* Append fragment shader epilogue (value writeout) */
2373 if (ctx->stage == MESA_SHADER_FRAGMENT) {
2374 if (block == nir_impl_last_block(ctx->func->impl)) {
2375 if (ctx->is_blend)
2376 emit_blend_epilogue(ctx);
2377 else
2378 emit_fragment_epilogue(ctx);
2379 }
2380 }
2381
2382 if (block == nir_start_block(ctx->func->impl))
2383 ctx->initial_block = this_block;
2384
2385 if (block == nir_impl_last_block(ctx->func->impl))
2386 ctx->final_block = this_block;
2387
2388 /* Allow the next control flow to access us retroactively, for
2389 * branching etc */
2390 ctx->current_block = this_block;
2391
2392 /* Document the fallthrough chain */
2393 ctx->previous_source_block = this_block;
2394
2395 return this_block;
2396 }
2397
2398 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2399
2400 static void
2401 emit_if(struct compiler_context *ctx, nir_if *nif)
2402 {
2403 /* Conditional branches expect the condition in r31.w; emit a move for
2404 * that in the _previous_ block (which is the current block). */
2405 emit_condition(ctx, &nif->condition, true, COMPONENT_X);
2406
2407 /* Speculatively emit the branch, but we can't fill it in until later */
2408 EMIT(branch, true, true);
2409 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2410
2411 /* Emit the two subblocks */
2412 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2413
2414 /* Emit a jump from the end of the then block to the end of the else */
2415 EMIT(branch, false, false);
2416 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2417
2418 /* Emit second block, and check if it's empty */
2419
2420 int else_idx = ctx->block_count;
2421 int count_in = ctx->instruction_count;
2422 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2423 int after_else_idx = ctx->block_count;
2424
2425 /* Now that we have the subblocks emitted, fix up the branches */
2426
2427 assert(then_block);
2428 assert(else_block);
2429
2430 if (ctx->instruction_count == count_in) {
2431 /* The else block is empty, so don't emit an exit jump */
2432 mir_remove_instruction(then_exit);
2433 then_branch->branch.target_block = after_else_idx;
2434 } else {
2435 then_branch->branch.target_block = else_idx;
2436 then_exit->branch.target_block = after_else_idx;
2437 }
2438 }
2439
2440 static void
2441 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2442 {
2443 /* Remember where we are */
2444 midgard_block *start_block = ctx->current_block;
2445
2446 /* Allocate a loop number, growing the current inner loop depth */
2447 int loop_idx = ++ctx->current_loop_depth;
2448
2449 /* Get index from before the body so we can loop back later */
2450 int start_idx = ctx->block_count;
2451
2452 /* Emit the body itself */
2453 emit_cf_list(ctx, &nloop->body);
2454
2455 /* Branch back to loop back */
2456 struct midgard_instruction br_back = v_branch(false, false);
2457 br_back.branch.target_block = start_idx;
2458 emit_mir_instruction(ctx, br_back);
2459
2460 /* Mark down that branch in the graph. Note that we're really branching
2461 * to the block *after* we started in. TODO: Why doesn't the branch
2462 * itself have an off-by-one then...? */
2463 midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
2464
2465 /* Find the index of the block about to follow us (note: we don't add
2466 * one; blocks are 0-indexed so we get a fencepost problem) */
2467 int break_block_idx = ctx->block_count;
2468
2469 /* Fix up the break statements we emitted to point to the right place,
2470 * now that we can allocate a block number for them */
2471
2472 list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
2473 mir_foreach_instr_in_block(block, ins) {
2474 if (ins->type != TAG_ALU_4) continue;
2475 if (!ins->compact_branch) continue;
2476 if (ins->prepacked_branch) continue;
2477
2478 /* We found a branch -- check the type to see if we need to do anything */
2479 if (ins->branch.target_type != TARGET_BREAK) continue;
2480
2481 /* It's a break! Check if it's our break */
2482 if (ins->branch.target_break != loop_idx) continue;
2483
2484 /* Okay, cool, we're breaking out of this loop.
2485 * Rewrite from a break to a goto */
2486
2487 ins->branch.target_type = TARGET_GOTO;
2488 ins->branch.target_block = break_block_idx;
2489 }
2490 }
2491
2492 /* Now that we've finished emitting the loop, free up the depth again
2493 * so we play nice with recursion amid nested loops */
2494 --ctx->current_loop_depth;
2495 }
2496
2497 static midgard_block *
2498 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2499 {
2500 midgard_block *start_block = NULL;
2501
2502 foreach_list_typed(nir_cf_node, node, node, list) {
2503 switch (node->type) {
2504 case nir_cf_node_block: {
2505 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2506
2507 if (!start_block)
2508 start_block = block;
2509
2510 break;
2511 }
2512
2513 case nir_cf_node_if:
2514 emit_if(ctx, nir_cf_node_as_if(node));
2515 break;
2516
2517 case nir_cf_node_loop:
2518 emit_loop(ctx, nir_cf_node_as_loop(node));
2519 break;
2520
2521 case nir_cf_node_function:
2522 assert(0);
2523 break;
2524 }
2525 }
2526
2527 return start_block;
2528 }
2529
2530 /* Due to lookahead, we need to report the first tag executed in the command
2531 * stream and in branch targets. An initial block might be empty, so iterate
2532 * until we find one that 'works' */
2533
2534 static unsigned
2535 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2536 {
2537 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2538
2539 unsigned first_tag = 0;
2540
2541 do {
2542 midgard_bundle *initial_bundle = util_dynarray_element(&initial_block->bundles, midgard_bundle, 0);
2543
2544 if (initial_bundle) {
2545 first_tag = initial_bundle->tag;
2546 break;
2547 }
2548
2549 /* Initial block is empty, try the next block */
2550 initial_block = list_first_entry(&(initial_block->link), midgard_block, link);
2551 } while(initial_block != NULL);
2552
2553 assert(first_tag);
2554 return first_tag;
2555 }
2556
2557 int
2558 midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend)
2559 {
2560 struct util_dynarray *compiled = &program->compiled;
2561
2562 midgard_debug = debug_get_option_midgard_debug();
2563
2564 compiler_context ictx = {
2565 .nir = nir,
2566 .stage = nir->info.stage,
2567
2568 .is_blend = is_blend,
2569 .blend_constant_offset = -1,
2570
2571 .alpha_ref = program->alpha_ref
2572 };
2573
2574 compiler_context *ctx = &ictx;
2575
2576 /* TODO: Decide this at runtime */
2577 ctx->uniform_cutoff = 8;
2578
2579 /* Initialize at a global (not block) level hash tables */
2580
2581 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2582 ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
2583 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2584 ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
2585 ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
2586
2587 /* Record the varying mapping for the command stream's bookkeeping */
2588
2589 struct exec_list *varyings =
2590 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
2591
2592 unsigned max_varying = 0;
2593 nir_foreach_variable(var, varyings) {
2594 unsigned loc = var->data.driver_location;
2595 unsigned sz = glsl_type_size(var->type, FALSE);
2596
2597 for (int c = 0; c < sz; ++c) {
2598 program->varyings[loc + c] = var->data.location + c;
2599 max_varying = MAX2(max_varying, loc + c);
2600 }
2601 }
2602
2603 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2604 * (so we don't accidentally duplicate the epilogue since mesa/st has
2605 * messed with our I/O quite a bit already) */
2606
2607 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2608
2609 if (ctx->stage == MESA_SHADER_VERTEX)
2610 NIR_PASS_V(nir, nir_lower_viewport_transform);
2611
2612 NIR_PASS_V(nir, nir_lower_var_copies);
2613 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2614 NIR_PASS_V(nir, nir_split_var_copies);
2615 NIR_PASS_V(nir, nir_lower_var_copies);
2616 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2617 NIR_PASS_V(nir, nir_lower_var_copies);
2618 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2619
2620 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
2621
2622 /* Optimisation passes */
2623
2624 optimise_nir(nir);
2625
2626 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2627 nir_print_shader(nir, stdout);
2628 }
2629
2630 /* Assign sysvals and counts, now that we're sure
2631 * (post-optimisation) */
2632
2633 midgard_nir_assign_sysvals(ctx, nir);
2634
2635 program->uniform_count = nir->num_uniforms;
2636 program->sysval_count = ctx->sysval_count;
2637 memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
2638
2639 program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
2640 program->varying_count = max_varying + 1; /* Fencepost off-by-one */
2641
2642 nir_foreach_function(func, nir) {
2643 if (!func->impl)
2644 continue;
2645
2646 list_inithead(&ctx->blocks);
2647 ctx->block_count = 0;
2648 ctx->func = func;
2649
2650 emit_cf_list(ctx, &func->impl->body);
2651 emit_block(ctx, func->impl->end_block);
2652
2653 break; /* TODO: Multi-function shaders */
2654 }
2655
2656 util_dynarray_init(compiled, NULL);
2657
2658 /* MIR-level optimizations */
2659
2660 bool progress = false;
2661
2662 do {
2663 progress = false;
2664
2665 mir_foreach_block(ctx, block) {
2666 progress |= midgard_opt_pos_propagate(ctx, block);
2667 progress |= midgard_opt_copy_prop(ctx, block);
2668 progress |= midgard_opt_copy_prop_tex(ctx, block);
2669 progress |= midgard_opt_dead_code_eliminate(ctx, block);
2670 }
2671 } while (progress);
2672
2673 /* Nested control-flow can result in dead branches at the end of the
2674 * block. This messes with our analysis and is just dead code, so cull
2675 * them */
2676 mir_foreach_block(ctx, block) {
2677 midgard_opt_cull_dead_branch(ctx, block);
2678 }
2679
2680 /* Schedule! */
2681 schedule_program(ctx);
2682
2683 /* Now that all the bundles are scheduled and we can calculate block
2684 * sizes, emit actual branch instructions rather than placeholders */
2685
2686 int br_block_idx = 0;
2687
2688 mir_foreach_block(ctx, block) {
2689 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2690 for (int c = 0; c < bundle->instruction_count; ++c) {
2691 midgard_instruction *ins = bundle->instructions[c];
2692
2693 if (!midgard_is_branch_unit(ins->unit)) continue;
2694
2695 if (ins->prepacked_branch) continue;
2696
2697 /* Parse some basic branch info */
2698 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2699 bool is_conditional = ins->branch.conditional;
2700 bool is_inverted = ins->branch.invert_conditional;
2701 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2702
2703 /* Determine the block we're jumping to */
2704 int target_number = ins->branch.target_block;
2705
2706 /* Report the destination tag */
2707 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
2708
2709 /* Count up the number of quadwords we're
2710 * jumping over = number of quadwords until
2711 * (br_block_idx, target_number) */
2712
2713 int quadword_offset = 0;
2714
2715 if (is_discard) {
2716 /* Jump to the end of the shader. We
2717 * need to include not only the
2718 * following blocks, but also the
2719 * contents of our current block (since
2720 * discard can come in the middle of
2721 * the block) */
2722
2723 midgard_block *blk = mir_get_block(ctx, br_block_idx + 1);
2724
2725 for (midgard_bundle *bun = bundle + 1; bun < (midgard_bundle *)((char*) block->bundles.data + block->bundles.size); ++bun) {
2726 quadword_offset += quadword_size(bun->tag);
2727 }
2728
2729 mir_foreach_block_from(ctx, blk, b) {
2730 quadword_offset += b->quadword_count;
2731 }
2732
2733 } else if (target_number > br_block_idx) {
2734 /* Jump forward */
2735
2736 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2737 midgard_block *blk = mir_get_block(ctx, idx);
2738 assert(blk);
2739
2740 quadword_offset += blk->quadword_count;
2741 }
2742 } else {
2743 /* Jump backwards */
2744
2745 for (int idx = br_block_idx; idx >= target_number; --idx) {
2746 midgard_block *blk = mir_get_block(ctx, idx);
2747 assert(blk);
2748
2749 quadword_offset -= blk->quadword_count;
2750 }
2751 }
2752
2753 /* Unconditional extended branches (far jumps)
2754 * have issues, so we always use a conditional
2755 * branch, setting the condition to always for
2756 * unconditional. For compact unconditional
2757 * branches, cond isn't used so it doesn't
2758 * matter what we pick. */
2759
2760 midgard_condition cond =
2761 !is_conditional ? midgard_condition_always :
2762 is_inverted ? midgard_condition_false :
2763 midgard_condition_true;
2764
2765 midgard_jmp_writeout_op op =
2766 is_discard ? midgard_jmp_writeout_op_discard :
2767 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2768 midgard_jmp_writeout_op_branch_cond;
2769
2770 if (!is_compact) {
2771 midgard_branch_extended branch =
2772 midgard_create_branch_extended(
2773 cond, op,
2774 dest_tag,
2775 quadword_offset);
2776
2777 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2778 } else if (is_conditional || is_discard) {
2779 midgard_branch_cond branch = {
2780 .op = op,
2781 .dest_tag = dest_tag,
2782 .offset = quadword_offset,
2783 .cond = cond
2784 };
2785
2786 assert(branch.offset == quadword_offset);
2787
2788 memcpy(&ins->br_compact, &branch, sizeof(branch));
2789 } else {
2790 assert(op == midgard_jmp_writeout_op_branch_uncond);
2791
2792 midgard_branch_uncond branch = {
2793 .op = op,
2794 .dest_tag = dest_tag,
2795 .offset = quadword_offset,
2796 .unknown = 1
2797 };
2798
2799 assert(branch.offset == quadword_offset);
2800
2801 memcpy(&ins->br_compact, &branch, sizeof(branch));
2802 }
2803 }
2804 }
2805
2806 ++br_block_idx;
2807 }
2808
2809 /* Emit flat binary from the instruction arrays. Iterate each block in
2810 * sequence. Save instruction boundaries such that lookahead tags can
2811 * be assigned easily */
2812
2813 /* Cache _all_ bundles in source order for lookahead across failed branches */
2814
2815 int bundle_count = 0;
2816 mir_foreach_block(ctx, block) {
2817 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2818 }
2819 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2820 int bundle_idx = 0;
2821 mir_foreach_block(ctx, block) {
2822 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2823 source_order_bundles[bundle_idx++] = bundle;
2824 }
2825 }
2826
2827 int current_bundle = 0;
2828
2829 /* Midgard prefetches instruction types, so during emission we
2830 * need to lookahead. Unless this is the last instruction, in
2831 * which we return 1. Or if this is the second to last and the
2832 * last is an ALU, then it's also 1... */
2833
2834 mir_foreach_block(ctx, block) {
2835 mir_foreach_bundle_in_block(block, bundle) {
2836 int lookahead = 1;
2837
2838 if (current_bundle + 1 < bundle_count) {
2839 uint8_t next = source_order_bundles[current_bundle + 1]->tag;
2840
2841 if (!(current_bundle + 2 < bundle_count) && IS_ALU(next)) {
2842 lookahead = 1;
2843 } else {
2844 lookahead = next;
2845 }
2846 }
2847
2848 emit_binary_bundle(ctx, bundle, compiled, lookahead);
2849 ++current_bundle;
2850 }
2851
2852 /* TODO: Free deeper */
2853 //util_dynarray_fini(&block->instructions);
2854 }
2855
2856 free(source_order_bundles);
2857
2858 /* Report the very first tag executed */
2859 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
2860
2861 /* Deal with off-by-one related to the fencepost problem */
2862 program->work_register_count = ctx->work_registers + 1;
2863
2864 program->can_discard = ctx->can_discard;
2865 program->uniform_cutoff = ctx->uniform_cutoff;
2866
2867 program->blend_patch_offset = ctx->blend_constant_offset;
2868
2869 if (midgard_debug & MIDGARD_DBG_SHADERS)
2870 disassemble_midgard(program->compiled.data, program->compiled.size);
2871
2872 return 0;
2873 }