panfrost/midgard: Lower texture projectors
[mesa.git] / src / gallium / drivers / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "main/imports.h"
37 #include "compiler/nir/nir_builder.h"
38 #include "util/half_float.h"
39 #include "util/u_debug.h"
40 #include "util/u_dynarray.h"
41 #include "util/list.h"
42 #include "main/mtypes.h"
43
44 #include "midgard.h"
45 #include "midgard_nir.h"
46 #include "midgard_compile.h"
47 #include "midgard_ops.h"
48 #include "helpers.h"
49 #include "compiler.h"
50
51 #include "disassemble.h"
52
53 static const struct debug_named_value debug_options[] = {
54 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
55 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
56 DEBUG_NAMED_VALUE_END
57 };
58
59 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
60
61 int midgard_debug = 0;
62
63 #define DBG(fmt, ...) \
64 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
65 fprintf(stderr, "%s:%d: "fmt, \
66 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
67
68 static bool
69 midgard_is_branch_unit(unsigned unit)
70 {
71 return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
72 }
73
74 static void
75 midgard_block_add_successor(midgard_block *block, midgard_block *successor)
76 {
77 block->successors[block->nr_successors++] = successor;
78 assert(block->nr_successors <= ARRAY_SIZE(block->successors));
79 }
80
81 /* Helpers to generate midgard_instruction's using macro magic, since every
82 * driver seems to do it that way */
83
84 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
85 #define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
86 #define SWIZZLE_XXXX SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X)
87 #define SWIZZLE_WWWW SWIZZLE(COMPONENT_W, COMPONENT_W, COMPONENT_W, COMPONENT_W)
88
89 #define M_LOAD_STORE(name, rname, uname) \
90 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
91 midgard_instruction i = { \
92 .type = TAG_LOAD_STORE_4, \
93 .ssa_args = { \
94 .rname = ssa, \
95 .uname = -1, \
96 .src1 = -1 \
97 }, \
98 .load_store = { \
99 .op = midgard_op_##name, \
100 .mask = 0xF, \
101 .swizzle = SWIZZLE_XYZW, \
102 .address = address \
103 } \
104 }; \
105 \
106 return i; \
107 }
108
109 #define M_LOAD(name) M_LOAD_STORE(name, dest, src0)
110 #define M_STORE(name) M_LOAD_STORE(name, src0, dest)
111
112 /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
113 * the corresponding Midgard source */
114
115 static midgard_vector_alu_src
116 vector_alu_modifiers(nir_alu_src *src, bool is_int)
117 {
118 if (!src) return blank_alu_src;
119
120 midgard_vector_alu_src alu_src = {
121 .rep_low = 0,
122 .rep_high = 0,
123 .half = 0, /* TODO */
124 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
125 };
126
127 if (is_int) {
128 /* TODO: sign-extend/zero-extend */
129 alu_src.mod = midgard_int_normal;
130
131 /* These should have been lowered away */
132 assert(!(src->abs || src->negate));
133 } else {
134 alu_src.mod = (src->abs << 0) | (src->negate << 1);
135 }
136
137 return alu_src;
138 }
139
140 /* load/store instructions have both 32-bit and 16-bit variants, depending on
141 * whether we are using vectors composed of highp or mediump. At the moment, we
142 * don't support half-floats -- this requires changes in other parts of the
143 * compiler -- therefore the 16-bit versions are commented out. */
144
145 //M_LOAD(ld_attr_16);
146 M_LOAD(ld_attr_32);
147 //M_LOAD(ld_vary_16);
148 M_LOAD(ld_vary_32);
149 //M_LOAD(ld_uniform_16);
150 M_LOAD(ld_uniform_32);
151 M_LOAD(ld_color_buffer_8);
152 //M_STORE(st_vary_16);
153 M_STORE(st_vary_32);
154 M_STORE(st_cubemap_coords);
155
156 static midgard_instruction
157 v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
158 {
159 midgard_branch_cond branch = {
160 .op = op,
161 .dest_tag = tag,
162 .offset = offset,
163 .cond = cond
164 };
165
166 uint16_t compact;
167 memcpy(&compact, &branch, sizeof(branch));
168
169 midgard_instruction ins = {
170 .type = TAG_ALU_4,
171 .unit = ALU_ENAB_BR_COMPACT,
172 .prepacked_branch = true,
173 .compact_branch = true,
174 .br_compact = compact
175 };
176
177 if (op == midgard_jmp_writeout_op_writeout)
178 ins.writeout = true;
179
180 return ins;
181 }
182
183 static midgard_instruction
184 v_branch(bool conditional, bool invert)
185 {
186 midgard_instruction ins = {
187 .type = TAG_ALU_4,
188 .unit = ALU_ENAB_BRANCH,
189 .compact_branch = true,
190 .branch = {
191 .conditional = conditional,
192 .invert_conditional = invert
193 }
194 };
195
196 return ins;
197 }
198
199 static midgard_branch_extended
200 midgard_create_branch_extended( midgard_condition cond,
201 midgard_jmp_writeout_op op,
202 unsigned dest_tag,
203 signed quadword_offset)
204 {
205 /* For unclear reasons, the condition code is repeated 8 times */
206 uint16_t duplicated_cond =
207 (cond << 14) |
208 (cond << 12) |
209 (cond << 10) |
210 (cond << 8) |
211 (cond << 6) |
212 (cond << 4) |
213 (cond << 2) |
214 (cond << 0);
215
216 midgard_branch_extended branch = {
217 .op = op,
218 .dest_tag = dest_tag,
219 .offset = quadword_offset,
220 .cond = duplicated_cond
221 };
222
223 return branch;
224 }
225
226 static void
227 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
228 {
229 ins->has_constants = true;
230 memcpy(&ins->constants, constants, 16);
231 }
232
233 static int
234 glsl_type_size(const struct glsl_type *type, bool bindless)
235 {
236 return glsl_count_attribute_slots(type, false);
237 }
238
239 /* Lower fdot2 to a vector multiplication followed by channel addition */
240 static void
241 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
242 {
243 if (alu->op != nir_op_fdot2)
244 return;
245
246 b->cursor = nir_before_instr(&alu->instr);
247
248 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
249 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
250
251 nir_ssa_def *product = nir_fmul(b, src0, src1);
252
253 nir_ssa_def *sum = nir_fadd(b,
254 nir_channel(b, product, 0),
255 nir_channel(b, product, 1));
256
257 /* Replace the fdot2 with this sum */
258 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
259 }
260
261 static int
262 midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
263 {
264 switch (instr->intrinsic) {
265 case nir_intrinsic_load_viewport_scale:
266 return PAN_SYSVAL_VIEWPORT_SCALE;
267 case nir_intrinsic_load_viewport_offset:
268 return PAN_SYSVAL_VIEWPORT_OFFSET;
269 default:
270 return -1;
271 }
272 }
273
274 static void
275 midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
276 {
277 int sysval = -1;
278
279 if (instr->type == nir_instr_type_intrinsic) {
280 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
281 sysval = midgard_nir_sysval_for_intrinsic(intr);
282 }
283
284 if (sysval < 0)
285 return;
286
287 /* We have a sysval load; check if it's already been assigned */
288
289 if (_mesa_hash_table_u64_search(ctx->sysval_to_id, sysval))
290 return;
291
292 /* It hasn't -- so assign it now! */
293
294 unsigned id = ctx->sysval_count++;
295 _mesa_hash_table_u64_insert(ctx->sysval_to_id, sysval, (void *) ((uintptr_t) id + 1));
296 ctx->sysvals[id] = sysval;
297 }
298
299 static void
300 midgard_nir_assign_sysvals(compiler_context *ctx, nir_shader *shader)
301 {
302 ctx->sysval_count = 0;
303
304 nir_foreach_function(function, shader) {
305 if (!function->impl) continue;
306
307 nir_foreach_block(block, function->impl) {
308 nir_foreach_instr_safe(instr, block) {
309 midgard_nir_assign_sysval_body(ctx, instr);
310 }
311 }
312 }
313 }
314
315 static bool
316 midgard_nir_lower_fdot2(nir_shader *shader)
317 {
318 bool progress = false;
319
320 nir_foreach_function(function, shader) {
321 if (!function->impl) continue;
322
323 nir_builder _b;
324 nir_builder *b = &_b;
325 nir_builder_init(b, function->impl);
326
327 nir_foreach_block(block, function->impl) {
328 nir_foreach_instr_safe(instr, block) {
329 if (instr->type != nir_instr_type_alu) continue;
330
331 nir_alu_instr *alu = nir_instr_as_alu(instr);
332 midgard_nir_lower_fdot2_body(b, alu);
333
334 progress |= true;
335 }
336 }
337
338 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
339
340 }
341
342 return progress;
343 }
344
345 static void
346 optimise_nir(nir_shader *nir)
347 {
348 bool progress;
349 unsigned lower_flrp =
350 (nir->options->lower_flrp16 ? 16 : 0) |
351 (nir->options->lower_flrp32 ? 32 : 0) |
352 (nir->options->lower_flrp64 ? 64 : 0);
353
354 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
355 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
356 NIR_PASS(progress, nir, nir_lower_idiv);
357
358 nir_lower_tex_options lower_tex_options = {
359 .lower_rect = true,
360 .lower_txp = ~0
361 };
362
363 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
364
365 do {
366 progress = false;
367
368 NIR_PASS(progress, nir, nir_lower_var_copies);
369 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
370
371 NIR_PASS(progress, nir, nir_copy_prop);
372 NIR_PASS(progress, nir, nir_opt_dce);
373 NIR_PASS(progress, nir, nir_opt_dead_cf);
374 NIR_PASS(progress, nir, nir_opt_cse);
375 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
376 NIR_PASS(progress, nir, nir_opt_algebraic);
377 NIR_PASS(progress, nir, nir_opt_constant_folding);
378
379 if (lower_flrp != 0) {
380 bool lower_flrp_progress = false;
381 NIR_PASS(lower_flrp_progress,
382 nir,
383 nir_lower_flrp,
384 lower_flrp,
385 false /* always_precise */,
386 nir->options->lower_ffma);
387 if (lower_flrp_progress) {
388 NIR_PASS(progress, nir,
389 nir_opt_constant_folding);
390 progress = true;
391 }
392
393 /* Nothing should rematerialize any flrps, so we only
394 * need to do this lowering once.
395 */
396 lower_flrp = 0;
397 }
398
399 NIR_PASS(progress, nir, nir_opt_undef);
400 NIR_PASS(progress, nir, nir_opt_loop_unroll,
401 nir_var_shader_in |
402 nir_var_shader_out |
403 nir_var_function_temp);
404
405 /* TODO: Enable vectorize when merged upstream */
406 // NIR_PASS(progress, nir, nir_opt_vectorize);
407 } while (progress);
408
409 /* Must be run at the end to prevent creation of fsin/fcos ops */
410 NIR_PASS(progress, nir, midgard_nir_scale_trig);
411
412 do {
413 progress = false;
414
415 NIR_PASS(progress, nir, nir_opt_dce);
416 NIR_PASS(progress, nir, nir_opt_algebraic);
417 NIR_PASS(progress, nir, nir_opt_constant_folding);
418 NIR_PASS(progress, nir, nir_copy_prop);
419 } while (progress);
420
421 NIR_PASS(progress, nir, nir_opt_algebraic_late);
422
423 /* We implement booleans as 32-bit 0/~0 */
424 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
425
426 /* Now that booleans are lowered, we can run out late opts */
427 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
428
429 /* Lower mods for float ops only. Integer ops don't support modifiers
430 * (saturate doesn't make sense on integers, neg/abs require dedicated
431 * instructions) */
432
433 NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
434 NIR_PASS(progress, nir, nir_copy_prop);
435 NIR_PASS(progress, nir, nir_opt_dce);
436
437 /* Take us out of SSA */
438 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
439 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
440
441 /* We are a vector architecture; write combine where possible */
442 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
443 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
444
445 NIR_PASS(progress, nir, nir_opt_dce);
446 }
447
448 /* Front-half of aliasing the SSA slots, merely by inserting the flag in the
449 * appropriate hash table. Intentional off-by-one to avoid confusing NULL with
450 * r0. See the comments in compiler_context */
451
452 static void
453 alias_ssa(compiler_context *ctx, int dest, int src)
454 {
455 _mesa_hash_table_u64_insert(ctx->ssa_to_alias, dest + 1, (void *) ((uintptr_t) src + 1));
456 _mesa_set_add(ctx->leftover_ssa_to_alias, (void *) (uintptr_t) (dest + 1));
457 }
458
459 /* ...or undo it, after which the original index will be used (dummy move should be emitted alongside this) */
460
461 static void
462 unalias_ssa(compiler_context *ctx, int dest)
463 {
464 _mesa_hash_table_u64_remove(ctx->ssa_to_alias, dest + 1);
465 /* TODO: Remove from leftover or no? */
466 }
467
468 /* Do not actually emit a load; instead, cache the constant for inlining */
469
470 static void
471 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
472 {
473 nir_ssa_def def = instr->def;
474
475 float *v = rzalloc_array(NULL, float, 4);
476 nir_const_load_to_arr(v, instr, f32);
477 _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
478 }
479
480 static unsigned
481 nir_src_index(compiler_context *ctx, nir_src *src)
482 {
483 if (src->is_ssa)
484 return src->ssa->index;
485 else {
486 assert(!src->reg.indirect);
487 return ctx->func->impl->ssa_alloc + src->reg.reg->index;
488 }
489 }
490
491 static unsigned
492 nir_dest_index(compiler_context *ctx, nir_dest *dst)
493 {
494 if (dst->is_ssa)
495 return dst->ssa.index;
496 else {
497 assert(!dst->reg.indirect);
498 return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
499 }
500 }
501
502 static unsigned
503 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
504 {
505 return nir_src_index(ctx, &src->src);
506 }
507
508 static bool
509 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
510 {
511 unsigned comp = src->swizzle[0];
512
513 for (unsigned c = 1; c < nr_components; ++c) {
514 if (src->swizzle[c] != comp)
515 return true;
516 }
517
518 return false;
519 }
520
521 /* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the
522 * output of a conditional test) into that register */
523
524 static void
525 emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
526 {
527 int condition = nir_src_index(ctx, src);
528
529 /* Source to swizzle the desired component into w */
530
531 const midgard_vector_alu_src alu_src = {
532 .swizzle = SWIZZLE(component, component, component, component),
533 };
534
535 /* There is no boolean move instruction. Instead, we simulate a move by
536 * ANDing the condition with itself to get it into r31.w */
537
538 midgard_instruction ins = {
539 .type = TAG_ALU_4,
540
541 /* We need to set the conditional as close as possible */
542 .precede_break = true,
543 .unit = for_branch ? UNIT_SMUL : UNIT_SADD,
544
545 .ssa_args = {
546 .src0 = condition,
547 .src1 = condition,
548 .dest = SSA_FIXED_REGISTER(31),
549 },
550
551 .alu = {
552 .op = midgard_alu_op_iand,
553 .outmod = midgard_outmod_int_wrap,
554 .reg_mode = midgard_reg_mode_32,
555 .dest_override = midgard_dest_override_none,
556 .mask = (0x3 << 6), /* w */
557 .src1 = vector_alu_srco_unsigned(alu_src),
558 .src2 = vector_alu_srco_unsigned(alu_src)
559 },
560 };
561
562 emit_mir_instruction(ctx, ins);
563 }
564
565 /* Or, for mixed conditions (with csel_v), here's a vector version using all of
566 * r31 instead */
567
568 static void
569 emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp)
570 {
571 int condition = nir_src_index(ctx, &src->src);
572
573 /* Source to swizzle the desired component into w */
574
575 const midgard_vector_alu_src alu_src = {
576 .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle),
577 };
578
579 /* There is no boolean move instruction. Instead, we simulate a move by
580 * ANDing the condition with itself to get it into r31.w */
581
582 midgard_instruction ins = {
583 .type = TAG_ALU_4,
584 .precede_break = true,
585 .ssa_args = {
586 .src0 = condition,
587 .src1 = condition,
588 .dest = SSA_FIXED_REGISTER(31),
589 },
590 .alu = {
591 .op = midgard_alu_op_iand,
592 .outmod = midgard_outmod_int_wrap,
593 .reg_mode = midgard_reg_mode_32,
594 .dest_override = midgard_dest_override_none,
595 .mask = expand_writemask((1 << nr_comp) - 1),
596 .src1 = vector_alu_srco_unsigned(alu_src),
597 .src2 = vector_alu_srco_unsigned(alu_src)
598 },
599 };
600
601 emit_mir_instruction(ctx, ins);
602 }
603
604
605
606 /* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
607 * pinning to eliminate this move in all known cases */
608
609 static void
610 emit_indirect_offset(compiler_context *ctx, nir_src *src)
611 {
612 int offset = nir_src_index(ctx, src);
613
614 midgard_instruction ins = {
615 .type = TAG_ALU_4,
616 .ssa_args = {
617 .src0 = SSA_UNUSED_1,
618 .src1 = offset,
619 .dest = SSA_FIXED_REGISTER(REGISTER_OFFSET),
620 },
621 .alu = {
622 .op = midgard_alu_op_imov,
623 .outmod = midgard_outmod_int_wrap,
624 .reg_mode = midgard_reg_mode_32,
625 .dest_override = midgard_dest_override_none,
626 .mask = (0x3 << 6), /* w */
627 .src1 = vector_alu_srco_unsigned(zero_alu_src),
628 .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
629 },
630 };
631
632 emit_mir_instruction(ctx, ins);
633 }
634
635 #define ALU_CASE(nir, _op) \
636 case nir_op_##nir: \
637 op = midgard_alu_op_##_op; \
638 break;
639 static bool
640 nir_is_fzero_constant(nir_src src)
641 {
642 if (!nir_src_is_const(src))
643 return false;
644
645 for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
646 if (nir_src_comp_as_float(src, c) != 0.0)
647 return false;
648 }
649
650 return true;
651 }
652
653 static void
654 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
655 {
656 bool is_ssa = instr->dest.dest.is_ssa;
657
658 unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
659 unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components;
660 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
661
662 /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
663 * supported. A few do not and are commented for now. Also, there are a
664 * number of NIR ops which Midgard does not support and need to be
665 * lowered, also TODO. This switch block emits the opcode and calling
666 * convention of the Midgard instruction; actual packing is done in
667 * emit_alu below */
668
669 unsigned op;
670
671 switch (instr->op) {
672 ALU_CASE(fadd, fadd);
673 ALU_CASE(fmul, fmul);
674 ALU_CASE(fmin, fmin);
675 ALU_CASE(fmax, fmax);
676 ALU_CASE(imin, imin);
677 ALU_CASE(imax, imax);
678 ALU_CASE(umin, umin);
679 ALU_CASE(umax, umax);
680 ALU_CASE(ffloor, ffloor);
681 ALU_CASE(fround_even, froundeven);
682 ALU_CASE(ftrunc, ftrunc);
683 ALU_CASE(fceil, fceil);
684 ALU_CASE(fdot3, fdot3);
685 ALU_CASE(fdot4, fdot4);
686 ALU_CASE(iadd, iadd);
687 ALU_CASE(isub, isub);
688 ALU_CASE(imul, imul);
689
690 /* Zero shoved as second-arg */
691 ALU_CASE(iabs, iabsdiff);
692
693 ALU_CASE(mov, imov);
694
695 ALU_CASE(feq32, feq);
696 ALU_CASE(fne32, fne);
697 ALU_CASE(flt32, flt);
698 ALU_CASE(ieq32, ieq);
699 ALU_CASE(ine32, ine);
700 ALU_CASE(ilt32, ilt);
701 ALU_CASE(ult32, ult);
702
703 /* We don't have a native b2f32 instruction. Instead, like many
704 * GPUs, we exploit booleans as 0/~0 for false/true, and
705 * correspondingly AND
706 * by 1.0 to do the type conversion. For the moment, prime us
707 * to emit:
708 *
709 * iand [whatever], #0
710 *
711 * At the end of emit_alu (as MIR), we'll fix-up the constant
712 */
713
714 ALU_CASE(b2f32, iand);
715 ALU_CASE(b2i32, iand);
716
717 /* Likewise, we don't have a dedicated f2b32 instruction, but
718 * we can do a "not equal to 0.0" test. */
719
720 ALU_CASE(f2b32, fne);
721 ALU_CASE(i2b32, ine);
722
723 ALU_CASE(frcp, frcp);
724 ALU_CASE(frsq, frsqrt);
725 ALU_CASE(fsqrt, fsqrt);
726 ALU_CASE(fexp2, fexp2);
727 ALU_CASE(flog2, flog2);
728
729 ALU_CASE(f2i32, f2i);
730 ALU_CASE(f2u32, f2u);
731 ALU_CASE(i2f32, i2f);
732 ALU_CASE(u2f32, u2f);
733
734 ALU_CASE(fsin, fsin);
735 ALU_CASE(fcos, fcos);
736
737 /* Second op implicit #0 */
738 ALU_CASE(inot, inor);
739 ALU_CASE(iand, iand);
740 ALU_CASE(ior, ior);
741 ALU_CASE(ixor, ixor);
742 ALU_CASE(ishl, ishl);
743 ALU_CASE(ishr, iasr);
744 ALU_CASE(ushr, ilsr);
745
746 ALU_CASE(b32all_fequal2, fball_eq);
747 ALU_CASE(b32all_fequal3, fball_eq);
748 ALU_CASE(b32all_fequal4, fball_eq);
749
750 ALU_CASE(b32any_fnequal2, fbany_neq);
751 ALU_CASE(b32any_fnequal3, fbany_neq);
752 ALU_CASE(b32any_fnequal4, fbany_neq);
753
754 ALU_CASE(b32all_iequal2, iball_eq);
755 ALU_CASE(b32all_iequal3, iball_eq);
756 ALU_CASE(b32all_iequal4, iball_eq);
757
758 ALU_CASE(b32any_inequal2, ibany_neq);
759 ALU_CASE(b32any_inequal3, ibany_neq);
760 ALU_CASE(b32any_inequal4, ibany_neq);
761
762 /* Source mods will be shoved in later */
763 ALU_CASE(fabs, fmov);
764 ALU_CASE(fneg, fmov);
765 ALU_CASE(fsat, fmov);
766
767 /* For greater-or-equal, we lower to less-or-equal and flip the
768 * arguments */
769
770 case nir_op_fge:
771 case nir_op_fge32:
772 case nir_op_ige32:
773 case nir_op_uge32: {
774 op =
775 instr->op == nir_op_fge ? midgard_alu_op_fle :
776 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
777 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
778 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
779 0;
780
781 /* Swap via temporary */
782 nir_alu_src temp = instr->src[1];
783 instr->src[1] = instr->src[0];
784 instr->src[0] = temp;
785
786 break;
787 }
788
789 case nir_op_b32csel: {
790 /* Midgard features both fcsel and icsel, depending on
791 * the type of the arguments/output. However, as long
792 * as we're careful we can _always_ use icsel and
793 * _never_ need fcsel, since the latter does additional
794 * floating-point-specific processing whereas the
795 * former just moves bits on the wire. It's not obvious
796 * why these are separate opcodes, save for the ability
797 * to do things like sat/pos/abs/neg for free */
798
799 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
800 op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
801
802 /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
803 nr_inputs = 2;
804
805 /* Emit the condition into r31 */
806
807 if (mixed)
808 emit_condition_mixed(ctx, &instr->src[0], nr_components);
809 else
810 emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]);
811
812 /* The condition is the first argument; move the other
813 * arguments up one to be a binary instruction for
814 * Midgard */
815
816 memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
817 break;
818 }
819
820 default:
821 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
822 assert(0);
823 return;
824 }
825
826 /* Midgard can perform certain modifiers on output of an ALU op */
827 unsigned outmod;
828
829 if (midgard_is_integer_out_op(op)) {
830 outmod = midgard_outmod_int_wrap;
831 } else {
832 bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
833 outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
834 }
835
836 /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
837
838 if (instr->op == nir_op_fmax) {
839 if (nir_is_fzero_constant(instr->src[0].src)) {
840 op = midgard_alu_op_fmov;
841 nr_inputs = 1;
842 outmod = midgard_outmod_pos;
843 instr->src[0] = instr->src[1];
844 } else if (nir_is_fzero_constant(instr->src[1].src)) {
845 op = midgard_alu_op_fmov;
846 nr_inputs = 1;
847 outmod = midgard_outmod_pos;
848 }
849 }
850
851 /* Fetch unit, quirks, etc information */
852 unsigned opcode_props = alu_opcode_props[op].props;
853 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
854
855 /* src0 will always exist afaik, but src1 will not for 1-argument
856 * instructions. The latter can only be fetched if the instruction
857 * needs it, or else we may segfault. */
858
859 unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
860 unsigned src1 = nr_inputs == 2 ? nir_alu_src_index(ctx, &instr->src[1]) : SSA_UNUSED_0;
861
862 /* Rather than use the instruction generation helpers, we do it
863 * ourselves here to avoid the mess */
864
865 midgard_instruction ins = {
866 .type = TAG_ALU_4,
867 .ssa_args = {
868 .src0 = quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
869 .src1 = quirk_flipped_r24 ? src0 : src1,
870 .dest = dest,
871 }
872 };
873
874 nir_alu_src *nirmods[2] = { NULL };
875
876 if (nr_inputs == 2) {
877 nirmods[0] = &instr->src[0];
878 nirmods[1] = &instr->src[1];
879 } else if (nr_inputs == 1) {
880 nirmods[quirk_flipped_r24] = &instr->src[0];
881 } else {
882 assert(0);
883 }
884
885 /* These were lowered to a move, so apply the corresponding mod */
886
887 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
888 nir_alu_src *s = nirmods[quirk_flipped_r24];
889
890 if (instr->op == nir_op_fneg)
891 s->negate = !s->negate;
892
893 if (instr->op == nir_op_fabs)
894 s->abs = !s->abs;
895 }
896
897 bool is_int = midgard_is_integer_op(op);
898
899 midgard_vector_alu alu = {
900 .op = op,
901 .reg_mode = midgard_reg_mode_32,
902 .dest_override = midgard_dest_override_none,
903 .outmod = outmod,
904
905 /* Writemask only valid for non-SSA NIR */
906 .mask = expand_writemask((1 << nr_components) - 1),
907
908 .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
909 .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
910 };
911
912 /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
913
914 if (!is_ssa)
915 alu.mask &= expand_writemask(instr->dest.write_mask);
916
917 ins.alu = alu;
918
919 /* Late fixup for emulated instructions */
920
921 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
922 /* Presently, our second argument is an inline #0 constant.
923 * Switch over to an embedded 1.0 constant (that can't fit
924 * inline, since we're 32-bit, not 16-bit like the inline
925 * constants) */
926
927 ins.ssa_args.inline_constant = false;
928 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
929 ins.has_constants = true;
930
931 if (instr->op == nir_op_b2f32) {
932 ins.constants[0] = 1.0f;
933 } else {
934 /* Type pun it into place */
935 uint32_t one = 0x1;
936 memcpy(&ins.constants[0], &one, sizeof(uint32_t));
937 }
938
939 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
940 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
941 /* Lots of instructions need a 0 plonked in */
942 ins.ssa_args.inline_constant = false;
943 ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
944 ins.has_constants = true;
945 ins.constants[0] = 0.0f;
946 ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
947 } else if (instr->op == nir_op_inot) {
948 /* ~b = ~(b & b), so duplicate the source */
949 ins.ssa_args.src1 = ins.ssa_args.src0;
950 ins.alu.src2 = ins.alu.src1;
951 }
952
953 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
954 /* To avoid duplicating the lookup tables (probably), true LUT
955 * instructions can only operate as if they were scalars. Lower
956 * them here by changing the component. */
957
958 uint8_t original_swizzle[4];
959 memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle));
960
961 for (int i = 0; i < nr_components; ++i) {
962 ins.alu.mask = (0x3) << (2 * i); /* Mask the associated component */
963
964 for (int j = 0; j < 4; ++j)
965 nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
966
967 ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
968 emit_mir_instruction(ctx, ins);
969 }
970 } else {
971 emit_mir_instruction(ctx, ins);
972 }
973 }
974
975 #undef ALU_CASE
976
977 static void
978 emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
979 {
980 /* TODO: half-floats */
981
982 if (!indirect_offset && offset < ctx->uniform_cutoff) {
983 /* Fast path: For the first 16 uniforms, direct accesses are
984 * 0-cycle, since they're just a register fetch in the usual
985 * case. So, we alias the registers while we're still in
986 * SSA-space */
987
988 int reg_slot = 23 - offset;
989 alias_ssa(ctx, dest, SSA_FIXED_REGISTER(reg_slot));
990 } else {
991 /* Otherwise, read from the 'special' UBO to access
992 * higher-indexed uniforms, at a performance cost. More
993 * generally, we're emitting a UBO read instruction. */
994
995 midgard_instruction ins = m_ld_uniform_32(dest, offset);
996
997 /* TODO: Don't split */
998 ins.load_store.varying_parameters = (offset & 7) << 7;
999 ins.load_store.address = offset >> 3;
1000
1001 if (indirect_offset) {
1002 emit_indirect_offset(ctx, indirect_offset);
1003 ins.load_store.unknown = 0x8700; /* xxx: what is this? */
1004 } else {
1005 ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
1006 }
1007
1008 emit_mir_instruction(ctx, ins);
1009 }
1010 }
1011
1012 static void
1013 emit_varying_read(
1014 compiler_context *ctx,
1015 unsigned dest, unsigned offset,
1016 unsigned nr_comp, unsigned component,
1017 nir_src *indirect_offset)
1018 {
1019 /* XXX: Half-floats? */
1020 /* TODO: swizzle, mask */
1021
1022 midgard_instruction ins = m_ld_vary_32(dest, offset);
1023 ins.load_store.mask = (1 << nr_comp) - 1;
1024 ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component);
1025
1026 midgard_varying_parameter p = {
1027 .is_varying = 1,
1028 .interpolation = midgard_interp_default,
1029 .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0
1030 };
1031
1032 unsigned u;
1033 memcpy(&u, &p, sizeof(p));
1034 ins.load_store.varying_parameters = u;
1035
1036 if (indirect_offset) {
1037 /* We need to add in the dynamic index, moved to r27.w */
1038 emit_indirect_offset(ctx, indirect_offset);
1039 ins.load_store.unknown = 0x79e; /* xxx: what is this? */
1040 } else {
1041 /* Just a direct load */
1042 ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
1043 }
1044
1045 emit_mir_instruction(ctx, ins);
1046 }
1047
1048 static void
1049 emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
1050 {
1051 /* First, pull out the destination */
1052 unsigned dest = nir_dest_index(ctx, &instr->dest);
1053
1054 /* Now, figure out which uniform this is */
1055 int sysval = midgard_nir_sysval_for_intrinsic(instr);
1056 void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
1057
1058 /* Sysvals are prefix uniforms */
1059 unsigned uniform = ((uintptr_t) val) - 1;
1060
1061 /* Emit the read itself -- this is never indirect */
1062 emit_uniform_read(ctx, dest, uniform, NULL);
1063 }
1064
1065 /* Reads RGBA8888 value from the tilebuffer and converts to a RGBA32F register,
1066 * using scalar ops functional on earlier Midgard generations. Newer Midgard
1067 * generations have faster vectorized reads. This operation is for blend
1068 * shaders in particular; reading the tilebuffer from the fragment shader
1069 * remains an open problem. */
1070
1071 static void
1072 emit_fb_read_blend_scalar(compiler_context *ctx, unsigned reg)
1073 {
1074 midgard_instruction ins = m_ld_color_buffer_8(reg, 0);
1075 ins.load_store.swizzle = 0; /* xxxx */
1076
1077 /* Read each component sequentially */
1078
1079 for (unsigned c = 0; c < 4; ++c) {
1080 ins.load_store.mask = (1 << c);
1081 ins.load_store.unknown = c;
1082 emit_mir_instruction(ctx, ins);
1083 }
1084
1085 /* vadd.u2f hr2, zext(hr2), #0 */
1086
1087 midgard_vector_alu_src alu_src = blank_alu_src;
1088 alu_src.mod = midgard_int_zero_extend;
1089 alu_src.half = true;
1090
1091 midgard_instruction u2f = {
1092 .type = TAG_ALU_4,
1093 .ssa_args = {
1094 .src0 = reg,
1095 .src1 = SSA_UNUSED_0,
1096 .dest = reg,
1097 .inline_constant = true
1098 },
1099 .alu = {
1100 .op = midgard_alu_op_u2f,
1101 .reg_mode = midgard_reg_mode_16,
1102 .dest_override = midgard_dest_override_none,
1103 .mask = 0xF,
1104 .src1 = vector_alu_srco_unsigned(alu_src),
1105 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1106 }
1107 };
1108
1109 emit_mir_instruction(ctx, u2f);
1110
1111 /* vmul.fmul.sat r1, hr2, #0.00392151 */
1112
1113 alu_src.mod = 0;
1114
1115 midgard_instruction fmul = {
1116 .type = TAG_ALU_4,
1117 .inline_constant = _mesa_float_to_half(1.0 / 255.0),
1118 .ssa_args = {
1119 .src0 = reg,
1120 .dest = reg,
1121 .src1 = SSA_UNUSED_0,
1122 .inline_constant = true
1123 },
1124 .alu = {
1125 .op = midgard_alu_op_fmul,
1126 .reg_mode = midgard_reg_mode_32,
1127 .dest_override = midgard_dest_override_none,
1128 .outmod = midgard_outmod_sat,
1129 .mask = 0xFF,
1130 .src1 = vector_alu_srco_unsigned(alu_src),
1131 .src2 = vector_alu_srco_unsigned(blank_alu_src),
1132 }
1133 };
1134
1135 emit_mir_instruction(ctx, fmul);
1136 }
1137
1138 static void
1139 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1140 {
1141 unsigned offset, reg;
1142
1143 switch (instr->intrinsic) {
1144 case nir_intrinsic_discard_if:
1145 emit_condition(ctx, &instr->src[0], true, COMPONENT_X);
1146
1147 /* fallthrough */
1148
1149 case nir_intrinsic_discard: {
1150 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1151 struct midgard_instruction discard = v_branch(conditional, false);
1152 discard.branch.target_type = TARGET_DISCARD;
1153 emit_mir_instruction(ctx, discard);
1154
1155 ctx->can_discard = true;
1156 break;
1157 }
1158
1159 case nir_intrinsic_load_uniform:
1160 case nir_intrinsic_load_input:
1161 offset = nir_intrinsic_base(instr);
1162
1163 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1164 bool direct = nir_src_is_const(instr->src[0]);
1165
1166 if (direct) {
1167 offset += nir_src_as_uint(instr->src[0]);
1168 }
1169
1170 /* We may need to apply a fractional offset */
1171 int component = instr->intrinsic == nir_intrinsic_load_input ?
1172 nir_intrinsic_component(instr) : 0;
1173 reg = nir_dest_index(ctx, &instr->dest);
1174
1175 if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
1176 emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
1177 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1178 emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL);
1179 } else if (ctx->is_blend) {
1180 /* For blend shaders, load the input color, which is
1181 * preloaded to r0 */
1182
1183 midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1184 emit_mir_instruction(ctx, move);
1185 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1186 midgard_instruction ins = m_ld_attr_32(reg, offset);
1187 ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */
1188 ins.load_store.mask = (1 << nr_comp) - 1;
1189 emit_mir_instruction(ctx, ins);
1190 } else {
1191 DBG("Unknown load\n");
1192 assert(0);
1193 }
1194
1195 break;
1196
1197 case nir_intrinsic_load_output:
1198 assert(nir_src_is_const(instr->src[0]));
1199 reg = nir_dest_index(ctx, &instr->dest);
1200
1201 if (ctx->is_blend) {
1202 /* TODO: MRT */
1203 emit_fb_read_blend_scalar(ctx, reg);
1204 } else {
1205 DBG("Unknown output load\n");
1206 assert(0);
1207 }
1208
1209 break;
1210
1211 case nir_intrinsic_load_blend_const_color_rgba: {
1212 assert(ctx->is_blend);
1213 reg = nir_dest_index(ctx, &instr->dest);
1214
1215 /* Blend constants are embedded directly in the shader and
1216 * patched in, so we use some magic routing */
1217
1218 midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
1219 ins.has_constants = true;
1220 ins.has_blend_constant = true;
1221 emit_mir_instruction(ctx, ins);
1222 break;
1223 }
1224
1225 case nir_intrinsic_store_output:
1226 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1227
1228 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1229
1230 reg = nir_src_index(ctx, &instr->src[0]);
1231
1232 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1233 /* gl_FragColor is not emitted with load/store
1234 * instructions. Instead, it gets plonked into
1235 * r0 at the end of the shader and we do the
1236 * framebuffer writeout dance. TODO: Defer
1237 * writes */
1238
1239 midgard_instruction move = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(0));
1240 emit_mir_instruction(ctx, move);
1241
1242 /* Save the index we're writing to for later reference
1243 * in the epilogue */
1244
1245 ctx->fragment_output = reg;
1246 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1247 /* Varyings are written into one of two special
1248 * varying register, r26 or r27. The register itself is
1249 * selected as the register in the st_vary instruction,
1250 * minus the base of 26. E.g. write into r27 and then
1251 * call st_vary(1) */
1252
1253 midgard_instruction ins = v_fmov(reg, blank_alu_src, SSA_FIXED_REGISTER(26));
1254 emit_mir_instruction(ctx, ins);
1255
1256 /* We should have been vectorized. That also lets us
1257 * ignore the mask. because the mask component on
1258 * st_vary is (as far as I can tell) ignored [the blob
1259 * sets it to zero] */
1260 assert(nir_intrinsic_component(instr) == 0);
1261
1262 midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset);
1263 st.load_store.unknown = 0x1E9E; /* XXX: What is this? */
1264 emit_mir_instruction(ctx, st);
1265 } else {
1266 DBG("Unknown store\n");
1267 assert(0);
1268 }
1269
1270 break;
1271
1272 case nir_intrinsic_load_alpha_ref_float:
1273 assert(instr->dest.is_ssa);
1274
1275 float ref_value = ctx->alpha_ref;
1276
1277 float *v = ralloc_array(NULL, float, 4);
1278 memcpy(v, &ref_value, sizeof(float));
1279 _mesa_hash_table_u64_insert(ctx->ssa_constants, instr->dest.ssa.index + 1, v);
1280 break;
1281
1282 case nir_intrinsic_load_viewport_scale:
1283 case nir_intrinsic_load_viewport_offset:
1284 emit_sysval_read(ctx, instr);
1285 break;
1286
1287 default:
1288 printf ("Unhandled intrinsic\n");
1289 assert(0);
1290 break;
1291 }
1292 }
1293
1294 static unsigned
1295 midgard_tex_format(enum glsl_sampler_dim dim)
1296 {
1297 switch (dim) {
1298 case GLSL_SAMPLER_DIM_2D:
1299 case GLSL_SAMPLER_DIM_EXTERNAL:
1300 return TEXTURE_2D;
1301
1302 case GLSL_SAMPLER_DIM_3D:
1303 return TEXTURE_3D;
1304
1305 case GLSL_SAMPLER_DIM_CUBE:
1306 return TEXTURE_CUBE;
1307
1308 default:
1309 DBG("Unknown sampler dim type\n");
1310 assert(0);
1311 return 0;
1312 }
1313 }
1314
1315 static unsigned
1316 midgard_tex_op(nir_texop op)
1317 {
1318 switch (op) {
1319 case nir_texop_tex:
1320 case nir_texop_txb:
1321 return TEXTURE_OP_NORMAL;
1322 case nir_texop_txl:
1323 return TEXTURE_OP_LOD;
1324 default:
1325 unreachable("Unhanlded texture op");
1326 }
1327 }
1328
1329 static void
1330 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
1331 {
1332 /* TODO */
1333 //assert (!instr->sampler);
1334 //assert (!instr->texture_array_size);
1335
1336 /* Allocate registers via a round robin scheme to alternate between the two registers */
1337 int reg = ctx->texture_op_count & 1;
1338 int in_reg = reg, out_reg = reg;
1339
1340 /* Make room for the reg */
1341
1342 if (ctx->texture_index[reg] > -1)
1343 unalias_ssa(ctx, ctx->texture_index[reg]);
1344
1345 int texture_index = instr->texture_index;
1346 int sampler_index = texture_index;
1347
1348 for (unsigned i = 0; i < instr->num_srcs; ++i) {
1349 int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
1350 int index = nir_src_index(ctx, &instr->src[i].src);
1351 midgard_vector_alu_src alu_src = blank_alu_src;
1352
1353 switch (instr->src[i].src_type) {
1354 case nir_tex_src_coord: {
1355 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1356 /* For cubemaps, we need to load coords into
1357 * special r27, and then use a special ld/st op
1358 * to select the face and copy the xy into the
1359 * texture register */
1360
1361 alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
1362
1363 midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
1364 emit_mir_instruction(ctx, move);
1365
1366 midgard_instruction st = m_st_cubemap_coords(reg, 0);
1367 st.load_store.unknown = 0x24; /* XXX: What is this? */
1368 st.load_store.mask = 0x3; /* xy */
1369 st.load_store.swizzle = alu_src.swizzle;
1370 emit_mir_instruction(ctx, st);
1371
1372 } else {
1373 alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X);
1374
1375 midgard_instruction ins = v_fmov(index, alu_src, reg);
1376 ins.alu.mask = expand_writemask(0x3); /* xy */
1377 emit_mir_instruction(ctx, ins);
1378 }
1379
1380 break;
1381 }
1382
1383 case nir_tex_src_bias:
1384 case nir_tex_src_lod: {
1385 /* To keep RA simple, we put the bias/LOD into the w
1386 * component of the input source, which is otherwise in xy */
1387
1388 alu_src.swizzle = SWIZZLE_XXXX;
1389
1390 midgard_instruction ins = v_fmov(index, alu_src, reg);
1391 ins.alu.mask = expand_writemask(1 << COMPONENT_W);
1392 emit_mir_instruction(ctx, ins);
1393 break;
1394 };
1395
1396 default: {
1397 DBG("Unknown source type\n");
1398 //assert(0);
1399 break;
1400 }
1401 }
1402 }
1403
1404 /* No helper to build texture words -- we do it all here */
1405 midgard_instruction ins = {
1406 .type = TAG_TEXTURE_4,
1407 .texture = {
1408 .op = midgard_tex_op(instr->op),
1409 .format = midgard_tex_format(instr->sampler_dim),
1410 .texture_handle = texture_index,
1411 .sampler_handle = sampler_index,
1412
1413 /* TODO: Regalloc it in */
1414 .swizzle = SWIZZLE_XYZW,
1415 .mask = 0xF,
1416
1417 /* TODO: half */
1418 .in_reg_full = 1,
1419 .in_reg_swizzle = SWIZZLE_XYZW,
1420 .out_full = 1,
1421
1422 /* Always 1 */
1423 .unknown7 = 1,
1424 }
1425 };
1426
1427 /* Set registers to read and write from the same place */
1428 ins.texture.in_reg_select = in_reg;
1429 ins.texture.out_reg_select = out_reg;
1430
1431 /* Setup bias/LOD if necessary. Only register mode support right now.
1432 * TODO: Immediate mode for performance gains */
1433
1434 if (instr->op == nir_texop_txb || instr->op == nir_texop_txl) {
1435 ins.texture.lod_register = true;
1436
1437 midgard_tex_register_select sel = {
1438 .select = in_reg,
1439 .full = 1,
1440
1441 /* w */
1442 .component_lo = 1,
1443 .component_hi = 1
1444 };
1445
1446 uint8_t packed;
1447 memcpy(&packed, &sel, sizeof(packed));
1448 ins.texture.bias = packed;
1449 }
1450
1451 emit_mir_instruction(ctx, ins);
1452
1453 /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */
1454
1455 int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
1456 alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg));
1457 ctx->texture_index[reg] = o_index;
1458
1459 midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
1460 emit_mir_instruction(ctx, ins2);
1461
1462 /* Used for .cont and .last hinting */
1463 ctx->texture_op_count++;
1464 }
1465
1466 static void
1467 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
1468 {
1469 switch (instr->type) {
1470 case nir_jump_break: {
1471 /* Emit a branch out of the loop */
1472 struct midgard_instruction br = v_branch(false, false);
1473 br.branch.target_type = TARGET_BREAK;
1474 br.branch.target_break = ctx->current_loop_depth;
1475 emit_mir_instruction(ctx, br);
1476
1477 DBG("break..\n");
1478 break;
1479 }
1480
1481 default:
1482 DBG("Unknown jump type %d\n", instr->type);
1483 break;
1484 }
1485 }
1486
1487 static void
1488 emit_instr(compiler_context *ctx, struct nir_instr *instr)
1489 {
1490 switch (instr->type) {
1491 case nir_instr_type_load_const:
1492 emit_load_const(ctx, nir_instr_as_load_const(instr));
1493 break;
1494
1495 case nir_instr_type_intrinsic:
1496 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1497 break;
1498
1499 case nir_instr_type_alu:
1500 emit_alu(ctx, nir_instr_as_alu(instr));
1501 break;
1502
1503 case nir_instr_type_tex:
1504 emit_tex(ctx, nir_instr_as_tex(instr));
1505 break;
1506
1507 case nir_instr_type_jump:
1508 emit_jump(ctx, nir_instr_as_jump(instr));
1509 break;
1510
1511 case nir_instr_type_ssa_undef:
1512 /* Spurious */
1513 break;
1514
1515 default:
1516 DBG("Unhandled instruction type\n");
1517 break;
1518 }
1519 }
1520
1521
1522 /* ALU instructions can inline or embed constants, which decreases register
1523 * pressure and saves space. */
1524
1525 #define CONDITIONAL_ATTACH(src) { \
1526 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src + 1); \
1527 \
1528 if (entry) { \
1529 attach_constants(ctx, alu, entry, alu->ssa_args.src + 1); \
1530 alu->ssa_args.src = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
1531 } \
1532 }
1533
1534 static void
1535 inline_alu_constants(compiler_context *ctx)
1536 {
1537 mir_foreach_instr(ctx, alu) {
1538 /* Other instructions cannot inline constants */
1539 if (alu->type != TAG_ALU_4) continue;
1540
1541 /* If there is already a constant here, we can do nothing */
1542 if (alu->has_constants) continue;
1543
1544 /* It makes no sense to inline constants on a branch */
1545 if (alu->compact_branch || alu->prepacked_branch) continue;
1546
1547 CONDITIONAL_ATTACH(src0);
1548
1549 if (!alu->has_constants) {
1550 CONDITIONAL_ATTACH(src1)
1551 } else if (!alu->inline_constant) {
1552 /* Corner case: _two_ vec4 constants, for instance with a
1553 * csel. For this case, we can only use a constant
1554 * register for one, we'll have to emit a move for the
1555 * other. Note, if both arguments are constants, then
1556 * necessarily neither argument depends on the value of
1557 * any particular register. As the destination register
1558 * will be wiped, that means we can spill the constant
1559 * to the destination register.
1560 */
1561
1562 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->ssa_args.src1 + 1);
1563 unsigned scratch = alu->ssa_args.dest;
1564
1565 if (entry) {
1566 midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
1567 attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1);
1568
1569 /* Force a break XXX Defer r31 writes */
1570 ins.unit = UNIT_VLUT;
1571
1572 /* Set the source */
1573 alu->ssa_args.src1 = scratch;
1574
1575 /* Inject us -before- the last instruction which set r31 */
1576 mir_insert_instruction_before(mir_prev_op(alu), ins);
1577 }
1578 }
1579 }
1580 }
1581
1582 /* Midgard supports two types of constants, embedded constants (128-bit) and
1583 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
1584 * constants can be demoted to inline constants, for space savings and
1585 * sometimes a performance boost */
1586
1587 static void
1588 embedded_to_inline_constant(compiler_context *ctx)
1589 {
1590 mir_foreach_instr(ctx, ins) {
1591 if (!ins->has_constants) continue;
1592
1593 if (ins->ssa_args.inline_constant) continue;
1594
1595 /* Blend constants must not be inlined by definition */
1596 if (ins->has_blend_constant) continue;
1597
1598 /* src1 cannot be an inline constant due to encoding
1599 * restrictions. So, if possible we try to flip the arguments
1600 * in that case */
1601
1602 int op = ins->alu.op;
1603
1604 if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1605 switch (op) {
1606 /* These ops require an operational change to flip
1607 * their arguments TODO */
1608 case midgard_alu_op_flt:
1609 case midgard_alu_op_fle:
1610 case midgard_alu_op_ilt:
1611 case midgard_alu_op_ile:
1612 case midgard_alu_op_fcsel:
1613 case midgard_alu_op_icsel:
1614 DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
1615 default:
1616 break;
1617 }
1618
1619 if (alu_opcode_props[op].props & OP_COMMUTES) {
1620 /* Flip the SSA numbers */
1621 ins->ssa_args.src0 = ins->ssa_args.src1;
1622 ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1623
1624 /* And flip the modifiers */
1625
1626 unsigned src_temp;
1627
1628 src_temp = ins->alu.src2;
1629 ins->alu.src2 = ins->alu.src1;
1630 ins->alu.src1 = src_temp;
1631 }
1632 }
1633
1634 if (ins->ssa_args.src1 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
1635 /* Extract the source information */
1636
1637 midgard_vector_alu_src *src;
1638 int q = ins->alu.src2;
1639 midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
1640 src = m;
1641
1642 /* Component is from the swizzle, e.g. r26.w -> w component. TODO: What if x is masked out? */
1643 int component = src->swizzle & 3;
1644
1645 /* Scale constant appropriately, if we can legally */
1646 uint16_t scaled_constant = 0;
1647
1648 if (midgard_is_integer_op(op)) {
1649 unsigned int *iconstants = (unsigned int *) ins->constants;
1650 scaled_constant = (uint16_t) iconstants[component];
1651
1652 /* Constant overflow after resize */
1653 if (scaled_constant != iconstants[component])
1654 continue;
1655 } else {
1656 float original = (float) ins->constants[component];
1657 scaled_constant = _mesa_float_to_half(original);
1658
1659 /* Check for loss of precision. If this is
1660 * mediump, we don't care, but for a highp
1661 * shader, we need to pay attention. NIR
1662 * doesn't yet tell us which mode we're in!
1663 * Practically this prevents most constants
1664 * from being inlined, sadly. */
1665
1666 float fp32 = _mesa_half_to_float(scaled_constant);
1667
1668 if (fp32 != original)
1669 continue;
1670 }
1671
1672 /* We don't know how to handle these with a constant */
1673
1674 if (src->mod || src->half || src->rep_low || src->rep_high) {
1675 DBG("Bailing inline constant...\n");
1676 continue;
1677 }
1678
1679 /* Make sure that the constant is not itself a
1680 * vector by checking if all accessed values
1681 * (by the swizzle) are the same. */
1682
1683 uint32_t *cons = (uint32_t *) ins->constants;
1684 uint32_t value = cons[component];
1685
1686 bool is_vector = false;
1687 unsigned mask = effective_writemask(&ins->alu);
1688
1689 for (int c = 1; c < 4; ++c) {
1690 /* We only care if this component is actually used */
1691 if (!(mask & (1 << c)))
1692 continue;
1693
1694 uint32_t test = cons[(src->swizzle >> (2 * c)) & 3];
1695
1696 if (test != value) {
1697 is_vector = true;
1698 break;
1699 }
1700 }
1701
1702 if (is_vector)
1703 continue;
1704
1705 /* Get rid of the embedded constant */
1706 ins->has_constants = false;
1707 ins->ssa_args.src1 = SSA_UNUSED_0;
1708 ins->ssa_args.inline_constant = true;
1709 ins->inline_constant = scaled_constant;
1710 }
1711 }
1712 }
1713
1714 /* Map normal SSA sources to other SSA sources / fixed registers (like
1715 * uniforms) */
1716
1717 static void
1718 map_ssa_to_alias(compiler_context *ctx, int *ref)
1719 {
1720 /* Sign is used quite deliberately for unused */
1721 if (*ref < 0)
1722 return;
1723
1724 unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1);
1725
1726 if (alias) {
1727 /* Remove entry in leftovers to avoid a redunant fmov */
1728
1729 struct set_entry *leftover = _mesa_set_search(ctx->leftover_ssa_to_alias, ((void *) (uintptr_t) (*ref + 1)));
1730
1731 if (leftover)
1732 _mesa_set_remove(ctx->leftover_ssa_to_alias, leftover);
1733
1734 /* Assign the alias map */
1735 *ref = alias - 1;
1736 return;
1737 }
1738 }
1739
1740 /* Basic dead code elimination on the MIR itself, which cleans up e.g. the
1741 * texture pipeline */
1742
1743 static bool
1744 midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
1745 {
1746 bool progress = false;
1747
1748 mir_foreach_instr_in_block_safe(block, ins) {
1749 if (ins->type != TAG_ALU_4) continue;
1750 if (ins->compact_branch) continue;
1751
1752 if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
1753 if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
1754
1755 mir_remove_instruction(ins);
1756 progress = true;
1757 }
1758
1759 return progress;
1760 }
1761
1762 /* Dead code elimination for branches at the end of a block - only one branch
1763 * per block is legal semantically */
1764
1765 static void
1766 midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
1767 {
1768 bool branched = false;
1769
1770 mir_foreach_instr_in_block_safe(block, ins) {
1771 if (!midgard_is_branch_unit(ins->unit)) continue;
1772
1773 /* We ignore prepacked branches since the fragment epilogue is
1774 * just generally special */
1775 if (ins->prepacked_branch) continue;
1776
1777 /* Discards are similarly special and may not correspond to the
1778 * end of a block */
1779
1780 if (ins->branch.target_type == TARGET_DISCARD) continue;
1781
1782 if (branched) {
1783 /* We already branched, so this is dead */
1784 mir_remove_instruction(ins);
1785 }
1786
1787 branched = true;
1788 }
1789 }
1790
1791 static bool
1792 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
1793 {
1794 /* abs or neg */
1795 if (!is_int && src.mod) return true;
1796
1797 /* swizzle */
1798 for (unsigned c = 0; c < 4; ++c) {
1799 if (!(mask & (1 << c))) continue;
1800 if (((src.swizzle >> (2*c)) & 3) != c) return true;
1801 }
1802
1803 return false;
1804 }
1805
1806 static bool
1807 mir_nontrivial_source2_mod(midgard_instruction *ins)
1808 {
1809 unsigned mask = squeeze_writemask(ins->alu.mask);
1810 bool is_int = midgard_is_integer_op(ins->alu.op);
1811
1812 midgard_vector_alu_src src2 =
1813 vector_alu_from_unsigned(ins->alu.src2);
1814
1815 return mir_nontrivial_mod(src2, is_int, mask);
1816 }
1817
1818 static bool
1819 mir_nontrivial_outmod(midgard_instruction *ins)
1820 {
1821 bool is_int = midgard_is_integer_op(ins->alu.op);
1822 unsigned mod = ins->alu.outmod;
1823
1824 if (is_int)
1825 return mod != midgard_outmod_int_wrap;
1826 else
1827 return mod != midgard_outmod_none;
1828 }
1829
1830 static bool
1831 midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
1832 {
1833 bool progress = false;
1834
1835 mir_foreach_instr_in_block_safe(block, ins) {
1836 if (ins->type != TAG_ALU_4) continue;
1837 if (!OP_IS_MOVE(ins->alu.op)) continue;
1838
1839 unsigned from = ins->ssa_args.src1;
1840 unsigned to = ins->ssa_args.dest;
1841
1842 /* We only work on pure SSA */
1843
1844 if (to >= SSA_FIXED_MINIMUM) continue;
1845 if (from >= SSA_FIXED_MINIMUM) continue;
1846 if (to >= ctx->func->impl->ssa_alloc) continue;
1847 if (from >= ctx->func->impl->ssa_alloc) continue;
1848
1849 /* Constant propagation is not handled here, either */
1850 if (ins->ssa_args.inline_constant) continue;
1851 if (ins->has_constants) continue;
1852
1853 if (mir_nontrivial_source2_mod(ins)) continue;
1854 if (mir_nontrivial_outmod(ins)) continue;
1855
1856 /* We're clear -- rewrite */
1857 mir_rewrite_index_src(ctx, to, from);
1858 mir_remove_instruction(ins);
1859 progress |= true;
1860 }
1861
1862 return progress;
1863 }
1864
1865 /* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
1866 * the move can be propagated away entirely */
1867
1868 static bool
1869 mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
1870 {
1871 /* Nothing to do */
1872 if (comp == midgard_outmod_none)
1873 return true;
1874
1875 if (*outmod == midgard_outmod_none) {
1876 *outmod = comp;
1877 return true;
1878 }
1879
1880 /* TODO: Compose rules */
1881 return false;
1882 }
1883
1884 static bool
1885 midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
1886 {
1887 bool progress = false;
1888
1889 mir_foreach_instr_in_block_safe(block, ins) {
1890 if (ins->type != TAG_ALU_4) continue;
1891 if (ins->alu.op != midgard_alu_op_fmov) continue;
1892 if (ins->alu.outmod != midgard_outmod_pos) continue;
1893
1894 /* TODO: Registers? */
1895 unsigned src = ins->ssa_args.src1;
1896 if (src >= ctx->func->impl->ssa_alloc) continue;
1897 assert(!mir_has_multiple_writes(ctx, src));
1898
1899 /* There might be a source modifier, too */
1900 if (mir_nontrivial_source2_mod(ins)) continue;
1901
1902 /* Backpropagate the modifier */
1903 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
1904 if (v->type != TAG_ALU_4) continue;
1905 if (v->ssa_args.dest != src) continue;
1906
1907 /* Can we even take a float outmod? */
1908 if (midgard_is_integer_out_op(v->alu.op)) continue;
1909
1910 midgard_outmod_float temp = v->alu.outmod;
1911 progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
1912
1913 /* Throw in the towel.. */
1914 if (!progress) break;
1915
1916 /* Otherwise, transfer the modifier */
1917 v->alu.outmod = temp;
1918 ins->alu.outmod = midgard_outmod_none;
1919
1920 break;
1921 }
1922 }
1923
1924 return progress;
1925 }
1926
1927 static bool
1928 midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
1929 {
1930 bool progress = false;
1931
1932 mir_foreach_instr_in_block_safe(block, ins) {
1933 if (ins->type != TAG_ALU_4) continue;
1934 if (!OP_IS_MOVE(ins->alu.op)) continue;
1935
1936 unsigned from = ins->ssa_args.src1;
1937 unsigned to = ins->ssa_args.dest;
1938
1939 /* Make sure it's simple enough for us to handle */
1940
1941 if (from >= SSA_FIXED_MINIMUM) continue;
1942 if (from >= ctx->func->impl->ssa_alloc) continue;
1943 if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
1944 if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
1945
1946 bool eliminated = false;
1947
1948 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
1949 /* The texture registers are not SSA so be careful.
1950 * Conservatively, just stop if we hit a texture op
1951 * (even if it may not write) to where we are */
1952
1953 if (v->type != TAG_ALU_4)
1954 break;
1955
1956 if (v->ssa_args.dest == from) {
1957 /* We don't want to track partial writes ... */
1958 if (v->alu.mask == 0xF) {
1959 v->ssa_args.dest = to;
1960 eliminated = true;
1961 }
1962
1963 break;
1964 }
1965 }
1966
1967 if (eliminated)
1968 mir_remove_instruction(ins);
1969
1970 progress |= eliminated;
1971 }
1972
1973 return progress;
1974 }
1975
1976 /* The following passes reorder MIR instructions to enable better scheduling */
1977
1978 static void
1979 midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
1980 {
1981 mir_foreach_instr_in_block_safe(block, ins) {
1982 if (ins->type != TAG_LOAD_STORE_4) continue;
1983
1984 /* We've found a load/store op. Check if next is also load/store. */
1985 midgard_instruction *next_op = mir_next_op(ins);
1986 if (&next_op->link != &block->instructions) {
1987 if (next_op->type == TAG_LOAD_STORE_4) {
1988 /* If so, we're done since we're a pair */
1989 ins = mir_next_op(ins);
1990 continue;
1991 }
1992
1993 /* Maximum search distance to pair, to avoid register pressure disasters */
1994 int search_distance = 8;
1995
1996 /* Otherwise, we have an orphaned load/store -- search for another load */
1997 mir_foreach_instr_in_block_from(block, c, mir_next_op(ins)) {
1998 /* Terminate search if necessary */
1999 if (!(search_distance--)) break;
2000
2001 if (c->type != TAG_LOAD_STORE_4) continue;
2002
2003 /* Stores cannot be reordered, since they have
2004 * dependencies. For the same reason, indirect
2005 * loads cannot be reordered as their index is
2006 * loaded in r27.w */
2007
2008 if (OP_IS_STORE(c->load_store.op)) continue;
2009
2010 /* It appears the 0x800 bit is set whenever a
2011 * load is direct, unset when it is indirect.
2012 * Skip indirect loads. */
2013
2014 if (!(c->load_store.unknown & 0x800)) continue;
2015
2016 /* We found one! Move it up to pair and remove it from the old location */
2017
2018 mir_insert_instruction_before(ins, *c);
2019 mir_remove_instruction(c);
2020
2021 break;
2022 }
2023 }
2024 }
2025 }
2026
2027 /* If there are leftovers after the below pass, emit actual fmov
2028 * instructions for the slow-but-correct path */
2029
2030 static void
2031 emit_leftover_move(compiler_context *ctx)
2032 {
2033 set_foreach(ctx->leftover_ssa_to_alias, leftover) {
2034 int base = ((uintptr_t) leftover->key) - 1;
2035 int mapped = base;
2036
2037 map_ssa_to_alias(ctx, &mapped);
2038 EMIT(fmov, mapped, blank_alu_src, base);
2039 }
2040 }
2041
2042 static void
2043 actualise_ssa_to_alias(compiler_context *ctx)
2044 {
2045 mir_foreach_instr(ctx, ins) {
2046 map_ssa_to_alias(ctx, &ins->ssa_args.src0);
2047 map_ssa_to_alias(ctx, &ins->ssa_args.src1);
2048 }
2049
2050 emit_leftover_move(ctx);
2051 }
2052
2053 static void
2054 emit_fragment_epilogue(compiler_context *ctx)
2055 {
2056 /* Special case: writing out constants requires us to include the move
2057 * explicitly now, so shove it into r0 */
2058
2059 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1);
2060
2061 if (constant_value) {
2062 midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0));
2063 attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1);
2064 emit_mir_instruction(ctx, ins);
2065 }
2066
2067 /* Perform the actual fragment writeout. We have two writeout/branch
2068 * instructions, forming a loop until writeout is successful as per the
2069 * docs. TODO: gl_FragDepth */
2070
2071 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2072 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2073 }
2074
2075 /* For the blend epilogue, we need to convert the blended fragment vec4 (stored
2076 * in r0) to a RGBA8888 value by scaling and type converting. We then output it
2077 * with the int8 analogue to the fragment epilogue */
2078
2079 static void
2080 emit_blend_epilogue(compiler_context *ctx)
2081 {
2082 /* vmul.fmul.none.fulllow hr48, r0, #255 */
2083
2084 midgard_instruction scale = {
2085 .type = TAG_ALU_4,
2086 .unit = UNIT_VMUL,
2087 .inline_constant = _mesa_float_to_half(255.0),
2088 .ssa_args = {
2089 .src0 = SSA_FIXED_REGISTER(0),
2090 .src1 = SSA_UNUSED_0,
2091 .dest = SSA_FIXED_REGISTER(24),
2092 .inline_constant = true
2093 },
2094 .alu = {
2095 .op = midgard_alu_op_fmul,
2096 .reg_mode = midgard_reg_mode_32,
2097 .dest_override = midgard_dest_override_lower,
2098 .mask = 0xFF,
2099 .src1 = vector_alu_srco_unsigned(blank_alu_src),
2100 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2101 }
2102 };
2103
2104 emit_mir_instruction(ctx, scale);
2105
2106 /* vadd.f2u8.pos.low hr0, hr48, #0 */
2107
2108 midgard_vector_alu_src alu_src = blank_alu_src;
2109 alu_src.half = true;
2110
2111 midgard_instruction f2u8 = {
2112 .type = TAG_ALU_4,
2113 .ssa_args = {
2114 .src0 = SSA_FIXED_REGISTER(24),
2115 .src1 = SSA_UNUSED_0,
2116 .dest = SSA_FIXED_REGISTER(0),
2117 .inline_constant = true
2118 },
2119 .alu = {
2120 .op = midgard_alu_op_f2u8,
2121 .reg_mode = midgard_reg_mode_16,
2122 .dest_override = midgard_dest_override_lower,
2123 .outmod = midgard_outmod_pos,
2124 .mask = 0xF,
2125 .src1 = vector_alu_srco_unsigned(alu_src),
2126 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2127 }
2128 };
2129
2130 emit_mir_instruction(ctx, f2u8);
2131
2132 /* vmul.imov.quarter r0, r0, r0 */
2133
2134 midgard_instruction imov_8 = {
2135 .type = TAG_ALU_4,
2136 .ssa_args = {
2137 .src0 = SSA_UNUSED_1,
2138 .src1 = SSA_FIXED_REGISTER(0),
2139 .dest = SSA_FIXED_REGISTER(0),
2140 },
2141 .alu = {
2142 .op = midgard_alu_op_imov,
2143 .reg_mode = midgard_reg_mode_8,
2144 .dest_override = midgard_dest_override_none,
2145 .outmod = midgard_outmod_int_wrap,
2146 .mask = 0xFF,
2147 .src1 = vector_alu_srco_unsigned(blank_alu_src),
2148 .src2 = vector_alu_srco_unsigned(blank_alu_src),
2149 }
2150 };
2151
2152 /* Emit branch epilogue with the 8-bit move as the source */
2153
2154 emit_mir_instruction(ctx, imov_8);
2155 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always);
2156
2157 emit_mir_instruction(ctx, imov_8);
2158 EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always);
2159 }
2160
2161 static midgard_block *
2162 emit_block(compiler_context *ctx, nir_block *block)
2163 {
2164 midgard_block *this_block = calloc(sizeof(midgard_block), 1);
2165 list_addtail(&this_block->link, &ctx->blocks);
2166
2167 this_block->is_scheduled = false;
2168 ++ctx->block_count;
2169
2170 ctx->texture_index[0] = -1;
2171 ctx->texture_index[1] = -1;
2172
2173 /* Add us as a successor to the block we are following */
2174 if (ctx->current_block)
2175 midgard_block_add_successor(ctx->current_block, this_block);
2176
2177 /* Set up current block */
2178 list_inithead(&this_block->instructions);
2179 ctx->current_block = this_block;
2180
2181 nir_foreach_instr(instr, block) {
2182 emit_instr(ctx, instr);
2183 ++ctx->instruction_count;
2184 }
2185
2186 inline_alu_constants(ctx);
2187 embedded_to_inline_constant(ctx);
2188
2189 /* Perform heavylifting for aliasing */
2190 actualise_ssa_to_alias(ctx);
2191
2192 midgard_pair_load_store(ctx, this_block);
2193
2194 /* Append fragment shader epilogue (value writeout) */
2195 if (ctx->stage == MESA_SHADER_FRAGMENT) {
2196 if (block == nir_impl_last_block(ctx->func->impl)) {
2197 if (ctx->is_blend)
2198 emit_blend_epilogue(ctx);
2199 else
2200 emit_fragment_epilogue(ctx);
2201 }
2202 }
2203
2204 if (block == nir_start_block(ctx->func->impl))
2205 ctx->initial_block = this_block;
2206
2207 if (block == nir_impl_last_block(ctx->func->impl))
2208 ctx->final_block = this_block;
2209
2210 /* Allow the next control flow to access us retroactively, for
2211 * branching etc */
2212 ctx->current_block = this_block;
2213
2214 /* Document the fallthrough chain */
2215 ctx->previous_source_block = this_block;
2216
2217 return this_block;
2218 }
2219
2220 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2221
2222 static void
2223 emit_if(struct compiler_context *ctx, nir_if *nif)
2224 {
2225 /* Conditional branches expect the condition in r31.w; emit a move for
2226 * that in the _previous_ block (which is the current block). */
2227 emit_condition(ctx, &nif->condition, true, COMPONENT_X);
2228
2229 /* Speculatively emit the branch, but we can't fill it in until later */
2230 EMIT(branch, true, true);
2231 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2232
2233 /* Emit the two subblocks */
2234 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2235
2236 /* Emit a jump from the end of the then block to the end of the else */
2237 EMIT(branch, false, false);
2238 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2239
2240 /* Emit second block, and check if it's empty */
2241
2242 int else_idx = ctx->block_count;
2243 int count_in = ctx->instruction_count;
2244 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2245 int after_else_idx = ctx->block_count;
2246
2247 /* Now that we have the subblocks emitted, fix up the branches */
2248
2249 assert(then_block);
2250 assert(else_block);
2251
2252 if (ctx->instruction_count == count_in) {
2253 /* The else block is empty, so don't emit an exit jump */
2254 mir_remove_instruction(then_exit);
2255 then_branch->branch.target_block = after_else_idx;
2256 } else {
2257 then_branch->branch.target_block = else_idx;
2258 then_exit->branch.target_block = after_else_idx;
2259 }
2260 }
2261
2262 static void
2263 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2264 {
2265 /* Remember where we are */
2266 midgard_block *start_block = ctx->current_block;
2267
2268 /* Allocate a loop number, growing the current inner loop depth */
2269 int loop_idx = ++ctx->current_loop_depth;
2270
2271 /* Get index from before the body so we can loop back later */
2272 int start_idx = ctx->block_count;
2273
2274 /* Emit the body itself */
2275 emit_cf_list(ctx, &nloop->body);
2276
2277 /* Branch back to loop back */
2278 struct midgard_instruction br_back = v_branch(false, false);
2279 br_back.branch.target_block = start_idx;
2280 emit_mir_instruction(ctx, br_back);
2281
2282 /* Mark down that branch in the graph. Note that we're really branching
2283 * to the block *after* we started in. TODO: Why doesn't the branch
2284 * itself have an off-by-one then...? */
2285 midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
2286
2287 /* Find the index of the block about to follow us (note: we don't add
2288 * one; blocks are 0-indexed so we get a fencepost problem) */
2289 int break_block_idx = ctx->block_count;
2290
2291 /* Fix up the break statements we emitted to point to the right place,
2292 * now that we can allocate a block number for them */
2293
2294 list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
2295 mir_foreach_instr_in_block(block, ins) {
2296 if (ins->type != TAG_ALU_4) continue;
2297 if (!ins->compact_branch) continue;
2298 if (ins->prepacked_branch) continue;
2299
2300 /* We found a branch -- check the type to see if we need to do anything */
2301 if (ins->branch.target_type != TARGET_BREAK) continue;
2302
2303 /* It's a break! Check if it's our break */
2304 if (ins->branch.target_break != loop_idx) continue;
2305
2306 /* Okay, cool, we're breaking out of this loop.
2307 * Rewrite from a break to a goto */
2308
2309 ins->branch.target_type = TARGET_GOTO;
2310 ins->branch.target_block = break_block_idx;
2311 }
2312 }
2313
2314 /* Now that we've finished emitting the loop, free up the depth again
2315 * so we play nice with recursion amid nested loops */
2316 --ctx->current_loop_depth;
2317 }
2318
2319 static midgard_block *
2320 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2321 {
2322 midgard_block *start_block = NULL;
2323
2324 foreach_list_typed(nir_cf_node, node, node, list) {
2325 switch (node->type) {
2326 case nir_cf_node_block: {
2327 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2328
2329 if (!start_block)
2330 start_block = block;
2331
2332 break;
2333 }
2334
2335 case nir_cf_node_if:
2336 emit_if(ctx, nir_cf_node_as_if(node));
2337 break;
2338
2339 case nir_cf_node_loop:
2340 emit_loop(ctx, nir_cf_node_as_loop(node));
2341 break;
2342
2343 case nir_cf_node_function:
2344 assert(0);
2345 break;
2346 }
2347 }
2348
2349 return start_block;
2350 }
2351
2352 /* Due to lookahead, we need to report the first tag executed in the command
2353 * stream and in branch targets. An initial block might be empty, so iterate
2354 * until we find one that 'works' */
2355
2356 static unsigned
2357 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2358 {
2359 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2360
2361 unsigned first_tag = 0;
2362
2363 do {
2364 midgard_bundle *initial_bundle = util_dynarray_element(&initial_block->bundles, midgard_bundle, 0);
2365
2366 if (initial_bundle) {
2367 first_tag = initial_bundle->tag;
2368 break;
2369 }
2370
2371 /* Initial block is empty, try the next block */
2372 initial_block = list_first_entry(&(initial_block->link), midgard_block, link);
2373 } while(initial_block != NULL);
2374
2375 assert(first_tag);
2376 return first_tag;
2377 }
2378
2379 int
2380 midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend)
2381 {
2382 struct util_dynarray *compiled = &program->compiled;
2383
2384 midgard_debug = debug_get_option_midgard_debug();
2385
2386 compiler_context ictx = {
2387 .nir = nir,
2388 .stage = nir->info.stage,
2389
2390 .is_blend = is_blend,
2391 .blend_constant_offset = -1,
2392
2393 .alpha_ref = program->alpha_ref
2394 };
2395
2396 compiler_context *ctx = &ictx;
2397
2398 /* TODO: Decide this at runtime */
2399 ctx->uniform_cutoff = 8;
2400
2401 /* Initialize at a global (not block) level hash tables */
2402
2403 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2404 ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
2405 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2406 ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
2407 ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
2408
2409 /* Record the varying mapping for the command stream's bookkeeping */
2410
2411 struct exec_list *varyings =
2412 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
2413
2414 unsigned max_varying = 0;
2415 nir_foreach_variable(var, varyings) {
2416 unsigned loc = var->data.driver_location;
2417 unsigned sz = glsl_type_size(var->type, FALSE);
2418
2419 for (int c = loc; c < (loc + sz); ++c) {
2420 program->varyings[c] = var->data.location;
2421 max_varying = MAX2(max_varying, c);
2422 }
2423 }
2424
2425 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2426 * (so we don't accidentally duplicate the epilogue since mesa/st has
2427 * messed with our I/O quite a bit already) */
2428
2429 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2430
2431 if (ctx->stage == MESA_SHADER_VERTEX)
2432 NIR_PASS_V(nir, nir_lower_viewport_transform);
2433
2434 NIR_PASS_V(nir, nir_lower_var_copies);
2435 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2436 NIR_PASS_V(nir, nir_split_var_copies);
2437 NIR_PASS_V(nir, nir_lower_var_copies);
2438 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2439 NIR_PASS_V(nir, nir_lower_var_copies);
2440 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2441
2442 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
2443
2444 /* Optimisation passes */
2445
2446 optimise_nir(nir);
2447
2448 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2449 nir_print_shader(nir, stdout);
2450 }
2451
2452 /* Assign sysvals and counts, now that we're sure
2453 * (post-optimisation) */
2454
2455 midgard_nir_assign_sysvals(ctx, nir);
2456
2457 program->uniform_count = nir->num_uniforms;
2458 program->sysval_count = ctx->sysval_count;
2459 memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
2460
2461 program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
2462 program->varying_count = max_varying + 1; /* Fencepost off-by-one */
2463
2464 nir_foreach_function(func, nir) {
2465 if (!func->impl)
2466 continue;
2467
2468 list_inithead(&ctx->blocks);
2469 ctx->block_count = 0;
2470 ctx->func = func;
2471
2472 emit_cf_list(ctx, &func->impl->body);
2473 emit_block(ctx, func->impl->end_block);
2474
2475 break; /* TODO: Multi-function shaders */
2476 }
2477
2478 util_dynarray_init(compiled, NULL);
2479
2480 /* MIR-level optimizations */
2481
2482 bool progress = false;
2483
2484 do {
2485 progress = false;
2486
2487 mir_foreach_block(ctx, block) {
2488 progress |= midgard_opt_pos_propagate(ctx, block);
2489 progress |= midgard_opt_copy_prop(ctx, block);
2490 progress |= midgard_opt_copy_prop_tex(ctx, block);
2491 progress |= midgard_opt_dead_code_eliminate(ctx, block);
2492 }
2493 } while (progress);
2494
2495 /* Nested control-flow can result in dead branches at the end of the
2496 * block. This messes with our analysis and is just dead code, so cull
2497 * them */
2498 mir_foreach_block(ctx, block) {
2499 midgard_opt_cull_dead_branch(ctx, block);
2500 }
2501
2502 /* Schedule! */
2503 schedule_program(ctx);
2504
2505 /* Now that all the bundles are scheduled and we can calculate block
2506 * sizes, emit actual branch instructions rather than placeholders */
2507
2508 int br_block_idx = 0;
2509
2510 mir_foreach_block(ctx, block) {
2511 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2512 for (int c = 0; c < bundle->instruction_count; ++c) {
2513 midgard_instruction *ins = bundle->instructions[c];
2514
2515 if (!midgard_is_branch_unit(ins->unit)) continue;
2516
2517 if (ins->prepacked_branch) continue;
2518
2519 /* Parse some basic branch info */
2520 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2521 bool is_conditional = ins->branch.conditional;
2522 bool is_inverted = ins->branch.invert_conditional;
2523 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2524
2525 /* Determine the block we're jumping to */
2526 int target_number = ins->branch.target_block;
2527
2528 /* Report the destination tag */
2529 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
2530
2531 /* Count up the number of quadwords we're
2532 * jumping over = number of quadwords until
2533 * (br_block_idx, target_number) */
2534
2535 int quadword_offset = 0;
2536
2537 if (is_discard) {
2538 /* Jump to the end of the shader. We
2539 * need to include not only the
2540 * following blocks, but also the
2541 * contents of our current block (since
2542 * discard can come in the middle of
2543 * the block) */
2544
2545 midgard_block *blk = mir_get_block(ctx, br_block_idx + 1);
2546
2547 for (midgard_bundle *bun = bundle + 1; bun < (midgard_bundle *)((char*) block->bundles.data + block->bundles.size); ++bun) {
2548 quadword_offset += quadword_size(bun->tag);
2549 }
2550
2551 mir_foreach_block_from(ctx, blk, b) {
2552 quadword_offset += b->quadword_count;
2553 }
2554
2555 } else if (target_number > br_block_idx) {
2556 /* Jump forward */
2557
2558 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2559 midgard_block *blk = mir_get_block(ctx, idx);
2560 assert(blk);
2561
2562 quadword_offset += blk->quadword_count;
2563 }
2564 } else {
2565 /* Jump backwards */
2566
2567 for (int idx = br_block_idx; idx >= target_number; --idx) {
2568 midgard_block *blk = mir_get_block(ctx, idx);
2569 assert(blk);
2570
2571 quadword_offset -= blk->quadword_count;
2572 }
2573 }
2574
2575 /* Unconditional extended branches (far jumps)
2576 * have issues, so we always use a conditional
2577 * branch, setting the condition to always for
2578 * unconditional. For compact unconditional
2579 * branches, cond isn't used so it doesn't
2580 * matter what we pick. */
2581
2582 midgard_condition cond =
2583 !is_conditional ? midgard_condition_always :
2584 is_inverted ? midgard_condition_false :
2585 midgard_condition_true;
2586
2587 midgard_jmp_writeout_op op =
2588 is_discard ? midgard_jmp_writeout_op_discard :
2589 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2590 midgard_jmp_writeout_op_branch_cond;
2591
2592 if (!is_compact) {
2593 midgard_branch_extended branch =
2594 midgard_create_branch_extended(
2595 cond, op,
2596 dest_tag,
2597 quadword_offset);
2598
2599 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2600 } else if (is_conditional || is_discard) {
2601 midgard_branch_cond branch = {
2602 .op = op,
2603 .dest_tag = dest_tag,
2604 .offset = quadword_offset,
2605 .cond = cond
2606 };
2607
2608 assert(branch.offset == quadword_offset);
2609
2610 memcpy(&ins->br_compact, &branch, sizeof(branch));
2611 } else {
2612 assert(op == midgard_jmp_writeout_op_branch_uncond);
2613
2614 midgard_branch_uncond branch = {
2615 .op = op,
2616 .dest_tag = dest_tag,
2617 .offset = quadword_offset,
2618 .unknown = 1
2619 };
2620
2621 assert(branch.offset == quadword_offset);
2622
2623 memcpy(&ins->br_compact, &branch, sizeof(branch));
2624 }
2625 }
2626 }
2627
2628 ++br_block_idx;
2629 }
2630
2631 /* Emit flat binary from the instruction arrays. Iterate each block in
2632 * sequence. Save instruction boundaries such that lookahead tags can
2633 * be assigned easily */
2634
2635 /* Cache _all_ bundles in source order for lookahead across failed branches */
2636
2637 int bundle_count = 0;
2638 mir_foreach_block(ctx, block) {
2639 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2640 }
2641 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2642 int bundle_idx = 0;
2643 mir_foreach_block(ctx, block) {
2644 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2645 source_order_bundles[bundle_idx++] = bundle;
2646 }
2647 }
2648
2649 int current_bundle = 0;
2650
2651 /* Midgard prefetches instruction types, so during emission we
2652 * need to lookahead. Unless this is the last instruction, in
2653 * which we return 1. Or if this is the second to last and the
2654 * last is an ALU, then it's also 1... */
2655
2656 mir_foreach_block(ctx, block) {
2657 mir_foreach_bundle_in_block(block, bundle) {
2658 int lookahead = 1;
2659
2660 if (current_bundle + 1 < bundle_count) {
2661 uint8_t next = source_order_bundles[current_bundle + 1]->tag;
2662
2663 if (!(current_bundle + 2 < bundle_count) && IS_ALU(next)) {
2664 lookahead = 1;
2665 } else {
2666 lookahead = next;
2667 }
2668 }
2669
2670 emit_binary_bundle(ctx, bundle, compiled, lookahead);
2671 ++current_bundle;
2672 }
2673
2674 /* TODO: Free deeper */
2675 //util_dynarray_fini(&block->instructions);
2676 }
2677
2678 free(source_order_bundles);
2679
2680 /* Report the very first tag executed */
2681 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
2682
2683 /* Deal with off-by-one related to the fencepost problem */
2684 program->work_register_count = ctx->work_registers + 1;
2685
2686 program->can_discard = ctx->can_discard;
2687 program->uniform_cutoff = ctx->uniform_cutoff;
2688
2689 program->blend_patch_offset = ctx->blend_constant_offset;
2690
2691 if (midgard_debug & MIDGARD_DBG_SHADERS)
2692 disassemble_midgard(program->compiled.data, program->compiled.size);
2693
2694 return 0;
2695 }