pan/mdg: Defer nir_fuse_io_16 until after opts
[mesa.git] / src / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "util/half_float.h"
38 #include "util/u_math.h"
39 #include "util/u_debug.h"
40 #include "util/u_dynarray.h"
41 #include "util/list.h"
42 #include "main/mtypes.h"
43
44 #include "midgard.h"
45 #include "midgard_nir.h"
46 #include "midgard_compile.h"
47 #include "midgard_ops.h"
48 #include "helpers.h"
49 #include "compiler.h"
50 #include "midgard_quirks.h"
51
52 #include "disassemble.h"
53
54 static const struct debug_named_value debug_options[] = {
55 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
56 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
57 {"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
58 DEBUG_NAMED_VALUE_END
59 };
60
61 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
62
63 unsigned SHADER_DB_COUNT = 0;
64
65 int midgard_debug = 0;
66
67 #define DBG(fmt, ...) \
68 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
69 fprintf(stderr, "%s:%d: "fmt, \
70 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
71 static midgard_block *
72 create_empty_block(compiler_context *ctx)
73 {
74 midgard_block *blk = rzalloc(ctx, midgard_block);
75
76 blk->base.predecessors = _mesa_set_create(blk,
77 _mesa_hash_pointer,
78 _mesa_key_pointer_equal);
79
80 blk->base.name = ctx->block_source_count++;
81
82 return blk;
83 }
84
85 static void
86 schedule_barrier(compiler_context *ctx)
87 {
88 midgard_block *temp = ctx->after_block;
89 ctx->after_block = create_empty_block(ctx);
90 ctx->block_count++;
91 list_addtail(&ctx->after_block->base.link, &ctx->blocks);
92 list_inithead(&ctx->after_block->base.instructions);
93 pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
94 ctx->current_block = ctx->after_block;
95 ctx->after_block = temp;
96 }
97
98 /* Helpers to generate midgard_instruction's using macro magic, since every
99 * driver seems to do it that way */
100
101 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
102
103 #define M_LOAD_STORE(name, store, T) \
104 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
105 midgard_instruction i = { \
106 .type = TAG_LOAD_STORE_4, \
107 .mask = 0xF, \
108 .dest = ~0, \
109 .src = { ~0, ~0, ~0, ~0 }, \
110 .swizzle = SWIZZLE_IDENTITY_4, \
111 .load_store = { \
112 .op = midgard_op_##name, \
113 .address = address \
114 } \
115 }; \
116 \
117 if (store) { \
118 i.src[0] = ssa; \
119 i.src_types[0] = T; \
120 i.dest_type = T; \
121 } else { \
122 i.dest = ssa; \
123 i.dest_type = T; \
124 } \
125 return i; \
126 }
127
128 #define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
129 #define M_STORE(name, T) M_LOAD_STORE(name, true, T)
130
131 M_LOAD(ld_attr_32, nir_type_uint32);
132 M_LOAD(ld_vary_32, nir_type_uint32);
133 M_LOAD(ld_ubo_int4, nir_type_uint32);
134 M_LOAD(ld_int4, nir_type_uint32);
135 M_STORE(st_int4, nir_type_uint32);
136 M_LOAD(ld_color_buffer_32u, nir_type_uint32);
137 M_LOAD(ld_color_buffer_as_fp16, nir_type_float16);
138 M_STORE(st_vary_32, nir_type_uint32);
139 M_LOAD(ld_cubemap_coords, nir_type_uint32);
140 M_LOAD(ld_compute_id, nir_type_uint32);
141
142 static midgard_instruction
143 v_branch(bool conditional, bool invert)
144 {
145 midgard_instruction ins = {
146 .type = TAG_ALU_4,
147 .unit = ALU_ENAB_BRANCH,
148 .compact_branch = true,
149 .branch = {
150 .conditional = conditional,
151 .invert_conditional = invert
152 },
153 .dest = ~0,
154 .src = { ~0, ~0, ~0, ~0 },
155 };
156
157 return ins;
158 }
159
160 static midgard_branch_extended
161 midgard_create_branch_extended( midgard_condition cond,
162 midgard_jmp_writeout_op op,
163 unsigned dest_tag,
164 signed quadword_offset)
165 {
166 /* The condition code is actually a LUT describing a function to
167 * combine multiple condition codes. However, we only support a single
168 * condition code at the moment, so we just duplicate over a bunch of
169 * times. */
170
171 uint16_t duplicated_cond =
172 (cond << 14) |
173 (cond << 12) |
174 (cond << 10) |
175 (cond << 8) |
176 (cond << 6) |
177 (cond << 4) |
178 (cond << 2) |
179 (cond << 0);
180
181 midgard_branch_extended branch = {
182 .op = op,
183 .dest_tag = dest_tag,
184 .offset = quadword_offset,
185 .cond = duplicated_cond
186 };
187
188 return branch;
189 }
190
191 static void
192 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
193 {
194 ins->has_constants = true;
195 memcpy(&ins->constants, constants, 16);
196 }
197
198 static int
199 glsl_type_size(const struct glsl_type *type, bool bindless)
200 {
201 return glsl_count_attribute_slots(type, false);
202 }
203
204 /* Lower fdot2 to a vector multiplication followed by channel addition */
205 static void
206 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
207 {
208 if (alu->op != nir_op_fdot2)
209 return;
210
211 b->cursor = nir_before_instr(&alu->instr);
212
213 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
214 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
215
216 nir_ssa_def *product = nir_fmul(b, src0, src1);
217
218 nir_ssa_def *sum = nir_fadd(b,
219 nir_channel(b, product, 0),
220 nir_channel(b, product, 1));
221
222 /* Replace the fdot2 with this sum */
223 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
224 }
225
226 static bool
227 midgard_nir_lower_fdot2(nir_shader *shader)
228 {
229 bool progress = false;
230
231 nir_foreach_function(function, shader) {
232 if (!function->impl) continue;
233
234 nir_builder _b;
235 nir_builder *b = &_b;
236 nir_builder_init(b, function->impl);
237
238 nir_foreach_block(block, function->impl) {
239 nir_foreach_instr_safe(instr, block) {
240 if (instr->type != nir_instr_type_alu) continue;
241
242 nir_alu_instr *alu = nir_instr_as_alu(instr);
243 midgard_nir_lower_fdot2_body(b, alu);
244
245 progress |= true;
246 }
247 }
248
249 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
250
251 }
252
253 return progress;
254 }
255
256 static const nir_variable *
257 search_var(struct exec_list *vars, unsigned driver_loc)
258 {
259 nir_foreach_variable(var, vars) {
260 if (var->data.driver_location == driver_loc)
261 return var;
262 }
263
264 return NULL;
265 }
266
267 /* Midgard can write all of color, depth and stencil in a single writeout
268 * operation, so we merge depth/stencil stores with color stores.
269 * If there are no color stores, we add a write to the "depth RT".
270 */
271 static bool
272 midgard_nir_lower_zs_store(nir_shader *nir)
273 {
274 if (nir->info.stage != MESA_SHADER_FRAGMENT)
275 return false;
276
277 nir_variable *z_var = NULL, *s_var = NULL;
278
279 nir_foreach_variable(var, &nir->outputs) {
280 if (var->data.location == FRAG_RESULT_DEPTH)
281 z_var = var;
282 else if (var->data.location == FRAG_RESULT_STENCIL)
283 s_var = var;
284 }
285
286 if (!z_var && !s_var)
287 return false;
288
289 bool progress = false;
290
291 nir_foreach_function(function, nir) {
292 if (!function->impl) continue;
293
294 nir_intrinsic_instr *z_store = NULL, *s_store = NULL;
295
296 nir_foreach_block(block, function->impl) {
297 nir_foreach_instr_safe(instr, block) {
298 if (instr->type != nir_instr_type_intrinsic)
299 continue;
300
301 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
302 if (intr->intrinsic != nir_intrinsic_store_output)
303 continue;
304
305 if (z_var && nir_intrinsic_base(intr) == z_var->data.driver_location) {
306 assert(!z_store);
307 z_store = intr;
308 }
309
310 if (s_var && nir_intrinsic_base(intr) == s_var->data.driver_location) {
311 assert(!s_store);
312 s_store = intr;
313 }
314 }
315 }
316
317 if (!z_store && !s_store) continue;
318
319 bool replaced = false;
320
321 nir_foreach_block(block, function->impl) {
322 nir_foreach_instr_safe(instr, block) {
323 if (instr->type != nir_instr_type_intrinsic)
324 continue;
325
326 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
327 if (intr->intrinsic != nir_intrinsic_store_output)
328 continue;
329
330 const nir_variable *var = search_var(&nir->outputs, nir_intrinsic_base(intr));
331 assert(var);
332
333 if (var->data.location != FRAG_RESULT_COLOR &&
334 var->data.location < FRAG_RESULT_DATA0)
335 continue;
336
337 assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
338
339 nir_builder b;
340 nir_builder_init(&b, function->impl);
341
342 assert(!z_store || z_store->instr.block == instr->block);
343 assert(!s_store || s_store->instr.block == instr->block);
344 b.cursor = nir_after_block_before_jump(instr->block);
345
346 nir_intrinsic_instr *combined_store;
347 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
348
349 combined_store->num_components = intr->src[0].ssa->num_components;
350
351 nir_intrinsic_set_base(combined_store, nir_intrinsic_base(intr));
352
353 unsigned writeout = PAN_WRITEOUT_C;
354 if (z_store)
355 writeout |= PAN_WRITEOUT_Z;
356 if (s_store)
357 writeout |= PAN_WRITEOUT_S;
358
359 nir_intrinsic_set_component(combined_store, writeout);
360
361 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
362
363 struct nir_ssa_def *src[4] = {
364 intr->src[0].ssa,
365 intr->src[1].ssa,
366 z_store ? z_store->src[0].ssa : zero,
367 s_store ? s_store->src[0].ssa : zero,
368 };
369
370 for (int i = 0; i < 4; ++i)
371 combined_store->src[i] = nir_src_for_ssa(src[i]);
372
373 nir_builder_instr_insert(&b, &combined_store->instr);
374
375 nir_instr_remove(instr);
376
377 replaced = true;
378 }
379 }
380
381 /* Insert a store to the depth RT (0xff) if needed */
382 if (!replaced) {
383 nir_builder b;
384 nir_builder_init(&b, function->impl);
385
386 nir_block *block = NULL;
387 if (z_store && s_store)
388 assert(z_store->instr.block == s_store->instr.block);
389
390 if (z_store)
391 block = z_store->instr.block;
392 else
393 block = s_store->instr.block;
394
395 b.cursor = nir_after_block_before_jump(block);
396
397 nir_intrinsic_instr *combined_store;
398 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
399
400 combined_store->num_components = 4;
401
402 nir_intrinsic_set_base(combined_store, 0);
403
404 unsigned writeout = 0;
405 if (z_store)
406 writeout |= PAN_WRITEOUT_Z;
407 if (s_store)
408 writeout |= PAN_WRITEOUT_S;
409
410 nir_intrinsic_set_component(combined_store, writeout);
411
412 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
413
414 struct nir_ssa_def *src[4] = {
415 nir_imm_vec4(&b, 0, 0, 0, 0),
416 zero,
417 z_store ? z_store->src[0].ssa : zero,
418 s_store ? s_store->src[0].ssa : zero,
419 };
420
421 for (int i = 0; i < 4; ++i)
422 combined_store->src[i] = nir_src_for_ssa(src[i]);
423
424 nir_builder_instr_insert(&b, &combined_store->instr);
425 }
426
427 if (z_store)
428 nir_instr_remove(&z_store->instr);
429
430 if (s_store)
431 nir_instr_remove(&s_store->instr);
432
433 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
434 progress = true;
435 }
436
437 return progress;
438 }
439
440 /* Flushes undefined values to zero */
441
442 static void
443 optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
444 {
445 bool progress;
446 unsigned lower_flrp =
447 (nir->options->lower_flrp16 ? 16 : 0) |
448 (nir->options->lower_flrp32 ? 32 : 0) |
449 (nir->options->lower_flrp64 ? 64 : 0);
450
451 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
452 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
453
454 nir_lower_tex_options lower_tex_options = {
455 .lower_txs_lod = true,
456 .lower_txp = ~0,
457 .lower_tex_without_implicit_lod =
458 (quirks & MIDGARD_EXPLICIT_LOD),
459
460 /* TODO: we have native gradient.. */
461 .lower_txd = true,
462 };
463
464 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
465
466 /* Must lower fdot2 after tex is lowered */
467 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
468
469 /* T720 is broken. */
470
471 if (quirks & MIDGARD_BROKEN_LOD)
472 NIR_PASS_V(nir, midgard_nir_lod_errata);
473
474 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_early);
475
476 do {
477 progress = false;
478
479 NIR_PASS(progress, nir, nir_lower_var_copies);
480 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
481
482 NIR_PASS(progress, nir, nir_copy_prop);
483 NIR_PASS(progress, nir, nir_opt_remove_phis);
484 NIR_PASS(progress, nir, nir_opt_dce);
485 NIR_PASS(progress, nir, nir_opt_dead_cf);
486 NIR_PASS(progress, nir, nir_opt_cse);
487 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
488 NIR_PASS(progress, nir, nir_opt_algebraic);
489 NIR_PASS(progress, nir, nir_opt_constant_folding);
490
491 if (lower_flrp != 0) {
492 bool lower_flrp_progress = false;
493 NIR_PASS(lower_flrp_progress,
494 nir,
495 nir_lower_flrp,
496 lower_flrp,
497 false /* always_precise */,
498 nir->options->lower_ffma);
499 if (lower_flrp_progress) {
500 NIR_PASS(progress, nir,
501 nir_opt_constant_folding);
502 progress = true;
503 }
504
505 /* Nothing should rematerialize any flrps, so we only
506 * need to do this lowering once.
507 */
508 lower_flrp = 0;
509 }
510
511 NIR_PASS(progress, nir, nir_opt_undef);
512 NIR_PASS(progress, nir, nir_undef_to_zero);
513
514 NIR_PASS(progress, nir, nir_opt_loop_unroll,
515 nir_var_shader_in |
516 nir_var_shader_out |
517 nir_var_function_temp);
518
519 NIR_PASS(progress, nir, nir_opt_vectorize);
520 } while (progress);
521
522 /* Run after opts so it can hit more */
523 if (!is_blend)
524 NIR_PASS(progress, nir, nir_fuse_io_16);
525
526 /* Must be run at the end to prevent creation of fsin/fcos ops */
527 NIR_PASS(progress, nir, midgard_nir_scale_trig);
528
529 do {
530 progress = false;
531
532 NIR_PASS(progress, nir, nir_opt_dce);
533 NIR_PASS(progress, nir, nir_opt_algebraic);
534 NIR_PASS(progress, nir, nir_opt_constant_folding);
535 NIR_PASS(progress, nir, nir_copy_prop);
536 } while (progress);
537
538 NIR_PASS(progress, nir, nir_opt_algebraic_late);
539 NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
540
541 /* We implement booleans as 32-bit 0/~0 */
542 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
543
544 /* Now that booleans are lowered, we can run out late opts */
545 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
546 NIR_PASS(progress, nir, midgard_nir_cancel_inot);
547
548 NIR_PASS(progress, nir, nir_copy_prop);
549 NIR_PASS(progress, nir, nir_opt_dce);
550
551 /* Take us out of SSA */
552 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
553 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
554
555 /* We are a vector architecture; write combine where possible */
556 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
557 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
558
559 NIR_PASS(progress, nir, nir_opt_dce);
560 }
561
562 /* Do not actually emit a load; instead, cache the constant for inlining */
563
564 static void
565 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
566 {
567 nir_ssa_def def = instr->def;
568
569 midgard_constants *consts = rzalloc(NULL, midgard_constants);
570
571 assert(instr->def.num_components * instr->def.bit_size <= sizeof(*consts) * 8);
572
573 #define RAW_CONST_COPY(bits) \
574 nir_const_value_to_array(consts->u##bits, instr->value, \
575 instr->def.num_components, u##bits)
576
577 switch (instr->def.bit_size) {
578 case 64:
579 RAW_CONST_COPY(64);
580 break;
581 case 32:
582 RAW_CONST_COPY(32);
583 break;
584 case 16:
585 RAW_CONST_COPY(16);
586 break;
587 case 8:
588 RAW_CONST_COPY(8);
589 break;
590 default:
591 unreachable("Invalid bit_size for load_const instruction\n");
592 }
593
594 /* Shifted for SSA, +1 for off-by-one */
595 _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, consts);
596 }
597
598 /* Normally constants are embedded implicitly, but for I/O and such we have to
599 * explicitly emit a move with the constant source */
600
601 static void
602 emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
603 {
604 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
605
606 if (constant_value) {
607 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
608 attach_constants(ctx, &ins, constant_value, node + 1);
609 emit_mir_instruction(ctx, ins);
610 }
611 }
612
613 static bool
614 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
615 {
616 unsigned comp = src->swizzle[0];
617
618 for (unsigned c = 1; c < nr_components; ++c) {
619 if (src->swizzle[c] != comp)
620 return true;
621 }
622
623 return false;
624 }
625
626 #define ALU_CASE(nir, _op) \
627 case nir_op_##nir: \
628 op = midgard_alu_op_##_op; \
629 assert(src_bitsize == dst_bitsize); \
630 break;
631
632 #define ALU_CASE_RTZ(nir, _op) \
633 case nir_op_##nir: \
634 op = midgard_alu_op_##_op; \
635 roundmode = MIDGARD_RTZ; \
636 break;
637
638 #define ALU_CHECK_CMP(sext) \
639 assert(src_bitsize == 16 || src_bitsize == 32); \
640 assert(dst_bitsize == 16 || dst_bitsize == 32); \
641
642 #define ALU_CASE_BCAST(nir, _op, count) \
643 case nir_op_##nir: \
644 op = midgard_alu_op_##_op; \
645 broadcast_swizzle = count; \
646 ALU_CHECK_CMP(true); \
647 break;
648
649 #define ALU_CASE_CMP(nir, _op, sext) \
650 case nir_op_##nir: \
651 op = midgard_alu_op_##_op; \
652 ALU_CHECK_CMP(sext); \
653 break;
654
655 /* Analyze the sizes of the dest and inputs to determine reg mode. */
656
657 static midgard_reg_mode
658 reg_mode_for_nir(nir_alu_instr *instr)
659 {
660 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
661 unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
662 unsigned max_bitsize = MAX2(src_bitsize, dst_bitsize);
663
664 /* We don't have fp16 LUTs, so we'll want to emit code like:
665 *
666 * vlut.fsinr hr0, hr0
667 *
668 * where both input and output are 16-bit but the operation is carried
669 * out in 32-bit
670 */
671
672 switch (instr->op) {
673 case nir_op_fsqrt:
674 case nir_op_frcp:
675 case nir_op_frsq:
676 case nir_op_fsin:
677 case nir_op_fcos:
678 case nir_op_fexp2:
679 case nir_op_flog2:
680 max_bitsize = MAX2(max_bitsize, 32);
681 break;
682
683 /* These get lowered to moves */
684 case nir_op_pack_32_4x8:
685 max_bitsize = 8;
686 break;
687 case nir_op_pack_32_2x16:
688 max_bitsize = 16;
689 break;
690 default:
691 break;
692 }
693
694
695 switch (max_bitsize) {
696 /* Use 16 pipe for 8 since we don't support vec16 yet */
697 case 8:
698 case 16:
699 return midgard_reg_mode_16;
700 case 32:
701 return midgard_reg_mode_32;
702 case 64:
703 return midgard_reg_mode_64;
704 default:
705 unreachable("Invalid bit size");
706 }
707 }
708
709 /* Compare mir_lower_invert */
710 static bool
711 nir_accepts_inot(nir_op op, unsigned src)
712 {
713 switch (op) {
714 case nir_op_ior:
715 case nir_op_iand: /* TODO: b2f16 */
716 case nir_op_ixor:
717 return true;
718 case nir_op_b32csel:
719 /* Only the condition */
720 return (src == 0);
721 default:
722 return false;
723 }
724 }
725
726 static bool
727 mir_accept_dest_mod(compiler_context *ctx, nir_dest **dest, nir_op op)
728 {
729 if (pan_has_dest_mod(dest, op)) {
730 assert((*dest)->is_ssa);
731 BITSET_SET(ctx->already_emitted, (*dest)->ssa.index);
732 return true;
733 }
734
735 return false;
736 }
737
738 static void
739 mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i, unsigned to, bool *abs, bool *neg, bool *not, enum midgard_roundmode *roundmode, bool is_int, unsigned bcast_count)
740 {
741 nir_alu_src src = instr->src[i];
742
743 if (!is_int) {
744 if (pan_has_source_mod(&src, nir_op_fneg))
745 *neg = !(*neg);
746
747 if (pan_has_source_mod(&src, nir_op_fabs))
748 *abs = true;
749 }
750
751 if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
752 *not = true;
753
754 if (roundmode) {
755 if (pan_has_source_mod(&src, nir_op_fround_even))
756 *roundmode = MIDGARD_RTE;
757
758 if (pan_has_source_mod(&src, nir_op_ftrunc))
759 *roundmode = MIDGARD_RTZ;
760
761 if (pan_has_source_mod(&src, nir_op_ffloor))
762 *roundmode = MIDGARD_RTN;
763
764 if (pan_has_source_mod(&src, nir_op_fceil))
765 *roundmode = MIDGARD_RTP;
766 }
767
768 unsigned bits = nir_src_bit_size(src.src);
769
770 ins->src[to] = nir_src_index(NULL, &src.src);
771 ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
772
773 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
774 ins->swizzle[to][c] = src.swizzle[
775 (!bcast_count || c < bcast_count) ? c :
776 (bcast_count - 1)];
777 }
778 }
779
780 /* Midgard features both fcsel and icsel, depending on whether you want int or
781 * float modifiers. NIR's csel is typeless, so we want a heuristic to guess if
782 * we should emit an int or float csel depending on what modifiers could be
783 * placed. In the absense of modifiers, this is probably arbitrary. */
784
785 static bool
786 mir_is_bcsel_float(nir_alu_instr *instr)
787 {
788 nir_op intmods[] = {
789 nir_op_i2i8, nir_op_i2i16,
790 nir_op_i2i32, nir_op_i2i64
791 };
792
793 nir_op floatmods[] = {
794 nir_op_fabs, nir_op_fneg,
795 nir_op_f2f16, nir_op_f2f32,
796 nir_op_f2f64
797 };
798
799 nir_op floatdestmods[] = {
800 nir_op_fsat, nir_op_fsat_signed, nir_op_fclamp_pos,
801 nir_op_f2f16, nir_op_f2f32
802 };
803
804 signed score = 0;
805
806 for (unsigned i = 1; i < 3; ++i) {
807 nir_alu_src s = instr->src[i];
808 for (unsigned q = 0; q < ARRAY_SIZE(intmods); ++q) {
809 if (pan_has_source_mod(&s, intmods[q]))
810 score--;
811 }
812 }
813
814 for (unsigned i = 1; i < 3; ++i) {
815 nir_alu_src s = instr->src[i];
816 for (unsigned q = 0; q < ARRAY_SIZE(floatmods); ++q) {
817 if (pan_has_source_mod(&s, floatmods[q]))
818 score++;
819 }
820 }
821
822 for (unsigned q = 0; q < ARRAY_SIZE(floatdestmods); ++q) {
823 nir_dest *dest = &instr->dest.dest;
824 if (pan_has_dest_mod(&dest, floatdestmods[q]))
825 score++;
826 }
827
828 return (score > 0);
829 }
830
831 static void
832 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
833 {
834 nir_dest *dest = &instr->dest.dest;
835
836 if (dest->is_ssa && BITSET_TEST(ctx->already_emitted, dest->ssa.index))
837 return;
838
839 /* Derivatives end up emitted on the texture pipe, not the ALUs. This
840 * is handled elsewhere */
841
842 if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
843 midgard_emit_derivatives(ctx, instr);
844 return;
845 }
846
847 bool is_ssa = dest->is_ssa;
848
849 unsigned nr_components = nir_dest_num_components(*dest);
850 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
851 unsigned op = 0;
852
853 /* Number of components valid to check for the instruction (the rest
854 * will be forced to the last), or 0 to use as-is. Relevant as
855 * ball-type instructions have a channel count in NIR but are all vec4
856 * in Midgard */
857
858 unsigned broadcast_swizzle = 0;
859
860 /* What register mode should we operate in? */
861 midgard_reg_mode reg_mode =
862 reg_mode_for_nir(instr);
863
864 /* Should we swap arguments? */
865 bool flip_src12 = false;
866
867 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
868 unsigned dst_bitsize = nir_dest_bit_size(*dest);
869
870 enum midgard_roundmode roundmode = MIDGARD_RTE;
871
872 switch (instr->op) {
873 ALU_CASE(fadd, fadd);
874 ALU_CASE(fmul, fmul);
875 ALU_CASE(fmin, fmin);
876 ALU_CASE(fmax, fmax);
877 ALU_CASE(imin, imin);
878 ALU_CASE(imax, imax);
879 ALU_CASE(umin, umin);
880 ALU_CASE(umax, umax);
881 ALU_CASE(ffloor, ffloor);
882 ALU_CASE(fround_even, froundeven);
883 ALU_CASE(ftrunc, ftrunc);
884 ALU_CASE(fceil, fceil);
885 ALU_CASE(fdot3, fdot3);
886 ALU_CASE(fdot4, fdot4);
887 ALU_CASE(iadd, iadd);
888 ALU_CASE(isub, isub);
889 ALU_CASE(imul, imul);
890
891 /* Zero shoved as second-arg */
892 ALU_CASE(iabs, iabsdiff);
893
894 ALU_CASE(mov, imov);
895
896 ALU_CASE_CMP(feq32, feq, false);
897 ALU_CASE_CMP(fne32, fne, false);
898 ALU_CASE_CMP(flt32, flt, false);
899 ALU_CASE_CMP(ieq32, ieq, true);
900 ALU_CASE_CMP(ine32, ine, true);
901 ALU_CASE_CMP(ilt32, ilt, true);
902 ALU_CASE_CMP(ult32, ult, false);
903
904 /* We don't have a native b2f32 instruction. Instead, like many
905 * GPUs, we exploit booleans as 0/~0 for false/true, and
906 * correspondingly AND
907 * by 1.0 to do the type conversion. For the moment, prime us
908 * to emit:
909 *
910 * iand [whatever], #0
911 *
912 * At the end of emit_alu (as MIR), we'll fix-up the constant
913 */
914
915 ALU_CASE_CMP(b2f32, iand, true);
916 ALU_CASE_CMP(b2f16, iand, true);
917 ALU_CASE_CMP(b2i32, iand, true);
918
919 /* Likewise, we don't have a dedicated f2b32 instruction, but
920 * we can do a "not equal to 0.0" test. */
921
922 ALU_CASE_CMP(f2b32, fne, false);
923 ALU_CASE_CMP(i2b32, ine, true);
924
925 ALU_CASE(frcp, frcp);
926 ALU_CASE(frsq, frsqrt);
927 ALU_CASE(fsqrt, fsqrt);
928 ALU_CASE(fexp2, fexp2);
929 ALU_CASE(flog2, flog2);
930
931 ALU_CASE_RTZ(f2i64, f2i_rte);
932 ALU_CASE_RTZ(f2u64, f2u_rte);
933 ALU_CASE_RTZ(i2f64, i2f_rte);
934 ALU_CASE_RTZ(u2f64, u2f_rte);
935
936 ALU_CASE_RTZ(f2i32, f2i_rte);
937 ALU_CASE_RTZ(f2u32, f2u_rte);
938 ALU_CASE_RTZ(i2f32, i2f_rte);
939 ALU_CASE_RTZ(u2f32, u2f_rte);
940
941 ALU_CASE_RTZ(f2i8, f2i_rte);
942 ALU_CASE_RTZ(f2u8, f2u_rte);
943
944 ALU_CASE_RTZ(f2i16, f2i_rte);
945 ALU_CASE_RTZ(f2u16, f2u_rte);
946 ALU_CASE_RTZ(i2f16, i2f_rte);
947 ALU_CASE_RTZ(u2f16, u2f_rte);
948
949 ALU_CASE(fsin, fsin);
950 ALU_CASE(fcos, fcos);
951
952 /* We'll get 0 in the second arg, so:
953 * ~a = ~(a | 0) = nor(a, 0) */
954 ALU_CASE(inot, inor);
955 ALU_CASE(iand, iand);
956 ALU_CASE(ior, ior);
957 ALU_CASE(ixor, ixor);
958 ALU_CASE(ishl, ishl);
959 ALU_CASE(ishr, iasr);
960 ALU_CASE(ushr, ilsr);
961
962 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
963 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
964 ALU_CASE_CMP(b32all_fequal4, fball_eq, true);
965
966 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
967 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
968 ALU_CASE_CMP(b32any_fnequal4, fbany_neq, true);
969
970 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
971 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
972 ALU_CASE_CMP(b32all_iequal4, iball_eq, true);
973
974 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
975 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
976 ALU_CASE_CMP(b32any_inequal4, ibany_neq, true);
977
978 /* Source mods will be shoved in later */
979 ALU_CASE(fabs, fmov);
980 ALU_CASE(fneg, fmov);
981 ALU_CASE(fsat, fmov);
982 ALU_CASE(fsat_signed, fmov);
983 ALU_CASE(fclamp_pos, fmov);
984
985 /* For size conversion, we use a move. Ideally though we would squash
986 * these ops together; maybe that has to happen after in NIR as part of
987 * propagation...? An earlier algebraic pass ensured we step down by
988 * only / exactly one size. If stepping down, we use a dest override to
989 * reduce the size; if stepping up, we use a larger-sized move with a
990 * half source and a sign/zero-extension modifier */
991
992 case nir_op_i2i8:
993 case nir_op_i2i16:
994 case nir_op_i2i32:
995 case nir_op_i2i64:
996 case nir_op_u2u8:
997 case nir_op_u2u16:
998 case nir_op_u2u32:
999 case nir_op_u2u64:
1000 case nir_op_f2f16:
1001 case nir_op_f2f32:
1002 case nir_op_f2f64: {
1003 if (instr->op == nir_op_f2f16 || instr->op == nir_op_f2f32 ||
1004 instr->op == nir_op_f2f64)
1005 op = midgard_alu_op_fmov;
1006 else
1007 op = midgard_alu_op_imov;
1008
1009 break;
1010 }
1011
1012 /* For greater-or-equal, we lower to less-or-equal and flip the
1013 * arguments */
1014
1015 case nir_op_fge:
1016 case nir_op_fge32:
1017 case nir_op_ige32:
1018 case nir_op_uge32: {
1019 op =
1020 instr->op == nir_op_fge ? midgard_alu_op_fle :
1021 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
1022 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
1023 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
1024 0;
1025
1026 flip_src12 = true;
1027 ALU_CHECK_CMP(false);
1028 break;
1029 }
1030
1031 case nir_op_b32csel: {
1032 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
1033 bool is_float = mir_is_bcsel_float(instr);
1034 op = is_float ?
1035 (mixed ? midgard_alu_op_fcsel_v : midgard_alu_op_fcsel) :
1036 (mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel);
1037
1038 break;
1039 }
1040
1041 case nir_op_unpack_32_2x16:
1042 case nir_op_unpack_32_4x8:
1043 case nir_op_pack_32_2x16:
1044 case nir_op_pack_32_4x8: {
1045 op = midgard_alu_op_imov;
1046 break;
1047 }
1048
1049 default:
1050 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
1051 assert(0);
1052 return;
1053 }
1054
1055 /* Promote imov to fmov if it might help inline a constant */
1056 if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src)
1057 && nir_src_bit_size(instr->src[0].src) == 32
1058 && nir_is_same_comp_swizzle(instr->src[0].swizzle,
1059 nir_src_num_components(instr->src[0].src))) {
1060 op = midgard_alu_op_fmov;
1061 }
1062
1063 /* Midgard can perform certain modifiers on output of an ALU op */
1064
1065 unsigned outmod = 0;
1066 bool is_int = midgard_is_integer_op(op);
1067
1068 if (midgard_is_integer_out_op(op)) {
1069 outmod = midgard_outmod_int_wrap;
1070 } else if (instr->op == nir_op_fsat) {
1071 outmod = midgard_outmod_sat;
1072 } else if (instr->op == nir_op_fsat_signed) {
1073 outmod = midgard_outmod_sat_signed;
1074 } else if (instr->op == nir_op_fclamp_pos) {
1075 outmod = midgard_outmod_pos;
1076 }
1077
1078 /* Fetch unit, quirks, etc information */
1079 unsigned opcode_props = alu_opcode_props[op].props;
1080 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
1081
1082 /* Look for floating point mods. We have the mods fsat, fsat_signed,
1083 * and fpos. We also have the relations (note 3 * 2 = 6 cases):
1084 *
1085 * fsat_signed(fpos(x)) = fsat(x)
1086 * fsat_signed(fsat(x)) = fsat(x)
1087 * fpos(fsat_signed(x)) = fsat(x)
1088 * fpos(fsat(x)) = fsat(x)
1089 * fsat(fsat_signed(x)) = fsat(x)
1090 * fsat(fpos(x)) = fsat(x)
1091 *
1092 * So by cases any composition of output modifiers is equivalent to
1093 * fsat alone.
1094 */
1095
1096 if (!is_int && !(opcode_props & OP_TYPE_CONVERT)) {
1097 bool fpos = mir_accept_dest_mod(ctx, &dest, nir_op_fclamp_pos);
1098 bool fsat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat);
1099 bool ssat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat_signed);
1100 bool prior = (outmod != midgard_outmod_none);
1101 int count = (int) prior + (int) fpos + (int) ssat + (int) fsat;
1102
1103 outmod = ((count > 1) || fsat) ? midgard_outmod_sat :
1104 fpos ? midgard_outmod_pos :
1105 ssat ? midgard_outmod_sat_signed :
1106 outmod;
1107 }
1108
1109 midgard_instruction ins = {
1110 .type = TAG_ALU_4,
1111 .dest = nir_dest_index(dest),
1112 .dest_type = nir_op_infos[instr->op].output_type
1113 | nir_dest_bit_size(*dest),
1114 .roundmode = roundmode,
1115 };
1116
1117 enum midgard_roundmode *roundptr = (opcode_props & MIDGARD_ROUNDS) ?
1118 &ins.roundmode : NULL;
1119
1120 for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
1121 ins.src[i] = ~0;
1122
1123 if (quirk_flipped_r24) {
1124 ins.src[0] = ~0;
1125 mir_copy_src(&ins, instr, 0, 1, &ins.src_abs[1], &ins.src_neg[1], &ins.src_invert[1], roundptr, is_int, broadcast_swizzle);
1126 } else {
1127 for (unsigned i = 0; i < nr_inputs; ++i) {
1128 unsigned to = i;
1129
1130 if (instr->op == nir_op_b32csel) {
1131 /* The condition is the first argument; move
1132 * the other arguments up one to be a binary
1133 * instruction for Midgard with the condition
1134 * last */
1135
1136 if (i == 0)
1137 to = 2;
1138 else if (flip_src12)
1139 to = 2 - i;
1140 else
1141 to = i - 1;
1142 } else if (flip_src12) {
1143 to = 1 - to;
1144 }
1145
1146 mir_copy_src(&ins, instr, i, to, &ins.src_abs[to], &ins.src_neg[to], &ins.src_invert[to], roundptr, is_int, broadcast_swizzle);
1147
1148 /* (!c) ? a : b = c ? b : a */
1149 if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
1150 ins.src_invert[2] = false;
1151 flip_src12 ^= true;
1152 }
1153 }
1154 }
1155
1156 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
1157 /* Lowered to move */
1158 if (instr->op == nir_op_fneg)
1159 ins.src_neg[1] ^= true;
1160
1161 if (instr->op == nir_op_fabs)
1162 ins.src_abs[1] = true;
1163 }
1164
1165 ins.mask = mask_of(nr_components);
1166
1167 midgard_vector_alu alu = {
1168 .op = op,
1169 .reg_mode = reg_mode,
1170 .outmod = outmod,
1171 };
1172
1173 /* Apply writemask if non-SSA, keeping in mind that we can't write to
1174 * components that don't exist. Note modifier => SSA => !reg => no
1175 * writemask, so we don't have to worry about writemasks here.*/
1176
1177 if (!is_ssa)
1178 ins.mask &= instr->dest.write_mask;
1179
1180 ins.alu = alu;
1181
1182 /* Late fixup for emulated instructions */
1183
1184 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
1185 /* Presently, our second argument is an inline #0 constant.
1186 * Switch over to an embedded 1.0 constant (that can't fit
1187 * inline, since we're 32-bit, not 16-bit like the inline
1188 * constants) */
1189
1190 ins.has_inline_constant = false;
1191 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1192 ins.src_types[1] = nir_type_float32;
1193 ins.has_constants = true;
1194
1195 if (instr->op == nir_op_b2f32)
1196 ins.constants.f32[0] = 1.0f;
1197 else
1198 ins.constants.i32[0] = 1;
1199
1200 for (unsigned c = 0; c < 16; ++c)
1201 ins.swizzle[1][c] = 0;
1202 } else if (instr->op == nir_op_b2f16) {
1203 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1204 ins.src_types[1] = nir_type_float16;
1205 ins.has_constants = true;
1206 ins.constants.i16[0] = _mesa_float_to_half(1.0);
1207
1208 for (unsigned c = 0; c < 16; ++c)
1209 ins.swizzle[1][c] = 0;
1210 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1211 /* Lots of instructions need a 0 plonked in */
1212 ins.has_inline_constant = false;
1213 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1214 ins.src_types[1] = nir_type_uint32;
1215 ins.has_constants = true;
1216 ins.constants.u32[0] = 0;
1217
1218 for (unsigned c = 0; c < 16; ++c)
1219 ins.swizzle[1][c] = 0;
1220 } else if (instr->op == nir_op_pack_32_2x16) {
1221 ins.dest_type = nir_type_uint16;
1222 ins.mask = mask_of(nr_components * 2);
1223 ins.is_pack = true;
1224 } else if (instr->op == nir_op_pack_32_4x8) {
1225 ins.dest_type = nir_type_uint8;
1226 ins.mask = mask_of(nr_components * 4);
1227 ins.is_pack = true;
1228 } else if (instr->op == nir_op_unpack_32_2x16) {
1229 ins.dest_type = nir_type_uint32;
1230 ins.mask = mask_of(nr_components >> 1);
1231 ins.is_pack = true;
1232 } else if (instr->op == nir_op_unpack_32_4x8) {
1233 ins.dest_type = nir_type_uint32;
1234 ins.mask = mask_of(nr_components >> 2);
1235 ins.is_pack = true;
1236 }
1237
1238 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1239 /* To avoid duplicating the lookup tables (probably), true LUT
1240 * instructions can only operate as if they were scalars. Lower
1241 * them here by changing the component. */
1242
1243 unsigned orig_mask = ins.mask;
1244
1245 unsigned swizzle_back[MIR_VEC_COMPONENTS];
1246 memcpy(&swizzle_back, ins.swizzle[0], sizeof(swizzle_back));
1247
1248 for (int i = 0; i < nr_components; ++i) {
1249 /* Mask the associated component, dropping the
1250 * instruction if needed */
1251
1252 ins.mask = 1 << i;
1253 ins.mask &= orig_mask;
1254
1255 if (!ins.mask)
1256 continue;
1257
1258 for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
1259 ins.swizzle[0][j] = swizzle_back[i]; /* Pull from the correct component */
1260
1261 emit_mir_instruction(ctx, ins);
1262 }
1263 } else {
1264 emit_mir_instruction(ctx, ins);
1265 }
1266 }
1267
1268 #undef ALU_CASE
1269
1270 static void
1271 mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
1272 {
1273 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1274 unsigned nir_mask = 0;
1275 unsigned dsize = 0;
1276
1277 if (is_read) {
1278 nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1279 dsize = nir_dest_bit_size(intr->dest);
1280 } else {
1281 nir_mask = nir_intrinsic_write_mask(intr);
1282 dsize = 32;
1283 }
1284
1285 /* Once we have the NIR mask, we need to normalize to work in 32-bit space */
1286 unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
1287 mir_set_bytemask(ins, bytemask);
1288 ins->dest_type = nir_type_uint | dsize;
1289 }
1290
1291 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1292 * optimized) versions of UBO #0 */
1293
1294 static midgard_instruction *
1295 emit_ubo_read(
1296 compiler_context *ctx,
1297 nir_instr *instr,
1298 unsigned dest,
1299 unsigned offset,
1300 nir_src *indirect_offset,
1301 unsigned indirect_shift,
1302 unsigned index)
1303 {
1304 /* TODO: half-floats */
1305
1306 midgard_instruction ins = m_ld_ubo_int4(dest, 0);
1307 ins.constants.u32[0] = offset;
1308
1309 if (instr->type == nir_instr_type_intrinsic)
1310 mir_set_intr_mask(instr, &ins, true);
1311
1312 if (indirect_offset) {
1313 ins.src[2] = nir_src_index(ctx, indirect_offset);
1314 ins.src_types[2] = nir_type_uint32;
1315 ins.load_store.arg_2 = (indirect_shift << 5);
1316 } else {
1317 ins.load_store.arg_2 = 0x1E;
1318 }
1319
1320 ins.load_store.arg_1 = index;
1321
1322 return emit_mir_instruction(ctx, ins);
1323 }
1324
1325 /* Globals are like UBOs if you squint. And shared memory is like globals if
1326 * you squint even harder */
1327
1328 static void
1329 emit_global(
1330 compiler_context *ctx,
1331 nir_instr *instr,
1332 bool is_read,
1333 unsigned srcdest,
1334 nir_src *offset,
1335 bool is_shared)
1336 {
1337 /* TODO: types */
1338
1339 midgard_instruction ins;
1340
1341 if (is_read)
1342 ins = m_ld_int4(srcdest, 0);
1343 else
1344 ins = m_st_int4(srcdest, 0);
1345
1346 mir_set_offset(ctx, &ins, offset, is_shared);
1347 mir_set_intr_mask(instr, &ins, is_read);
1348
1349 emit_mir_instruction(ctx, ins);
1350 }
1351
1352 static void
1353 emit_varying_read(
1354 compiler_context *ctx,
1355 unsigned dest, unsigned offset,
1356 unsigned nr_comp, unsigned component,
1357 nir_src *indirect_offset, nir_alu_type type, bool flat)
1358 {
1359 /* XXX: Half-floats? */
1360 /* TODO: swizzle, mask */
1361
1362 midgard_instruction ins = m_ld_vary_32(dest, offset);
1363 ins.mask = mask_of(nr_comp);
1364 ins.dest_type = type;
1365
1366 if (type == nir_type_float16) {
1367 /* Ensure we are aligned so we can pack it later */
1368 ins.mask = mask_of(ALIGN_POT(nr_comp, 2));
1369 }
1370
1371 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1372 ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
1373
1374 midgard_varying_parameter p = {
1375 .is_varying = 1,
1376 .interpolation = midgard_interp_default,
1377 .flat = flat,
1378 };
1379
1380 unsigned u;
1381 memcpy(&u, &p, sizeof(p));
1382 ins.load_store.varying_parameters = u;
1383
1384 if (indirect_offset) {
1385 ins.src[2] = nir_src_index(ctx, indirect_offset);
1386 ins.src_types[2] = nir_type_uint32;
1387 } else
1388 ins.load_store.arg_2 = 0x1E;
1389
1390 ins.load_store.arg_1 = 0x9E;
1391
1392 /* Use the type appropriate load */
1393 switch (type) {
1394 case nir_type_uint32:
1395 case nir_type_bool32:
1396 ins.load_store.op = midgard_op_ld_vary_32u;
1397 break;
1398 case nir_type_int32:
1399 ins.load_store.op = midgard_op_ld_vary_32i;
1400 break;
1401 case nir_type_float32:
1402 ins.load_store.op = midgard_op_ld_vary_32;
1403 break;
1404 case nir_type_float16:
1405 ins.load_store.op = midgard_op_ld_vary_16;
1406 break;
1407 default:
1408 unreachable("Attempted to load unknown type");
1409 break;
1410 }
1411
1412 emit_mir_instruction(ctx, ins);
1413 }
1414
1415 static void
1416 emit_attr_read(
1417 compiler_context *ctx,
1418 unsigned dest, unsigned offset,
1419 unsigned nr_comp, nir_alu_type t)
1420 {
1421 midgard_instruction ins = m_ld_attr_32(dest, offset);
1422 ins.load_store.arg_1 = 0x1E;
1423 ins.load_store.arg_2 = 0x1E;
1424 ins.mask = mask_of(nr_comp);
1425
1426 /* Use the type appropriate load */
1427 switch (t) {
1428 case nir_type_uint:
1429 case nir_type_bool:
1430 ins.load_store.op = midgard_op_ld_attr_32u;
1431 break;
1432 case nir_type_int:
1433 ins.load_store.op = midgard_op_ld_attr_32i;
1434 break;
1435 case nir_type_float:
1436 ins.load_store.op = midgard_op_ld_attr_32;
1437 break;
1438 default:
1439 unreachable("Attempted to load unknown type");
1440 break;
1441 }
1442
1443 emit_mir_instruction(ctx, ins);
1444 }
1445
1446 static void
1447 emit_sysval_read(compiler_context *ctx, nir_instr *instr,
1448 unsigned nr_components, unsigned offset)
1449 {
1450 nir_dest nir_dest;
1451
1452 /* Figure out which uniform this is */
1453 int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
1454 void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
1455
1456 unsigned dest = nir_dest_index(&nir_dest);
1457
1458 /* Sysvals are prefix uniforms */
1459 unsigned uniform = ((uintptr_t) val) - 1;
1460
1461 /* Emit the read itself -- this is never indirect */
1462 midgard_instruction *ins =
1463 emit_ubo_read(ctx, instr, dest, (uniform * 16) + offset, NULL, 0, 0);
1464
1465 ins->mask = mask_of(nr_components);
1466 }
1467
1468 static unsigned
1469 compute_builtin_arg(nir_op op)
1470 {
1471 switch (op) {
1472 case nir_intrinsic_load_work_group_id:
1473 return 0x14;
1474 case nir_intrinsic_load_local_invocation_id:
1475 return 0x10;
1476 default:
1477 unreachable("Invalid compute paramater loaded");
1478 }
1479 }
1480
1481 static void
1482 emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, unsigned src_s, enum midgard_rt_id rt)
1483 {
1484 assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1485
1486 midgard_instruction *br = ctx->writeout_branch[rt];
1487
1488 assert(!br);
1489
1490 emit_explicit_constant(ctx, src, src);
1491
1492 struct midgard_instruction ins =
1493 v_branch(false, false);
1494
1495 bool depth_only = (rt == MIDGARD_ZS_RT);
1496
1497 ins.writeout = depth_only ? 0 : PAN_WRITEOUT_C;
1498
1499 /* Add dependencies */
1500 ins.src[0] = src;
1501 ins.src_types[0] = nir_type_uint32;
1502 ins.constants.u32[0] = depth_only ? 0xFF : (rt - MIDGARD_COLOR_RT0) * 0x100;
1503 for (int i = 0; i < 4; ++i)
1504 ins.swizzle[0][i] = i;
1505
1506 if (~src_z) {
1507 emit_explicit_constant(ctx, src_z, src_z);
1508 ins.src[2] = src_z;
1509 ins.src_types[2] = nir_type_uint32;
1510 ins.writeout |= PAN_WRITEOUT_Z;
1511 }
1512 if (~src_s) {
1513 emit_explicit_constant(ctx, src_s, src_s);
1514 ins.src[3] = src_s;
1515 ins.src_types[3] = nir_type_uint32;
1516 ins.writeout |= PAN_WRITEOUT_S;
1517 }
1518
1519 /* Emit the branch */
1520 br = emit_mir_instruction(ctx, ins);
1521 schedule_barrier(ctx);
1522 ctx->writeout_branch[rt] = br;
1523
1524 /* Push our current location = current block count - 1 = where we'll
1525 * jump to. Maybe a bit too clever for my own good */
1526
1527 br->branch.target_block = ctx->block_count - 1;
1528 }
1529
1530 static void
1531 emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1532 {
1533 unsigned reg = nir_dest_index(&instr->dest);
1534 midgard_instruction ins = m_ld_compute_id(reg, 0);
1535 ins.mask = mask_of(3);
1536 ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
1537 ins.load_store.arg_1 = compute_builtin_arg(instr->intrinsic);
1538 emit_mir_instruction(ctx, ins);
1539 }
1540
1541 static unsigned
1542 vertex_builtin_arg(nir_op op)
1543 {
1544 switch (op) {
1545 case nir_intrinsic_load_vertex_id:
1546 return PAN_VERTEX_ID;
1547 case nir_intrinsic_load_instance_id:
1548 return PAN_INSTANCE_ID;
1549 default:
1550 unreachable("Invalid vertex builtin");
1551 }
1552 }
1553
1554 static void
1555 emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1556 {
1557 unsigned reg = nir_dest_index(&instr->dest);
1558 emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
1559 }
1560
1561 static void
1562 emit_control_barrier(compiler_context *ctx)
1563 {
1564 midgard_instruction ins = {
1565 .type = TAG_TEXTURE_4,
1566 .dest = ~0,
1567 .src = { ~0, ~0, ~0, ~0 },
1568 .texture = {
1569 .op = TEXTURE_OP_BARRIER,
1570
1571 /* TODO: optimize */
1572 .out_of_order = MIDGARD_BARRIER_BUFFER |
1573 MIDGARD_BARRIER_SHARED ,
1574 }
1575 };
1576
1577 emit_mir_instruction(ctx, ins);
1578 }
1579
1580 static unsigned
1581 mir_get_branch_cond(nir_src *src, bool *invert)
1582 {
1583 /* Wrap it. No swizzle since it's a scalar */
1584
1585 nir_alu_src alu = {
1586 .src = *src
1587 };
1588
1589 *invert = pan_has_source_mod(&alu, nir_op_inot);
1590 return nir_src_index(NULL, &alu.src);
1591 }
1592
1593 static void
1594 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1595 {
1596 unsigned offset = 0, reg;
1597
1598 switch (instr->intrinsic) {
1599 case nir_intrinsic_discard_if:
1600 case nir_intrinsic_discard: {
1601 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1602 struct midgard_instruction discard = v_branch(conditional, false);
1603 discard.branch.target_type = TARGET_DISCARD;
1604
1605 if (conditional) {
1606 discard.src[0] = mir_get_branch_cond(&instr->src[0],
1607 &discard.branch.invert_conditional);
1608 discard.src_types[0] = nir_type_uint32;
1609 }
1610
1611 emit_mir_instruction(ctx, discard);
1612 schedule_barrier(ctx);
1613
1614 break;
1615 }
1616
1617 case nir_intrinsic_load_uniform:
1618 case nir_intrinsic_load_ubo:
1619 case nir_intrinsic_load_global:
1620 case nir_intrinsic_load_shared:
1621 case nir_intrinsic_load_input:
1622 case nir_intrinsic_load_interpolated_input: {
1623 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1624 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1625 bool is_global = instr->intrinsic == nir_intrinsic_load_global;
1626 bool is_shared = instr->intrinsic == nir_intrinsic_load_shared;
1627 bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1628 bool is_interp = instr->intrinsic == nir_intrinsic_load_interpolated_input;
1629
1630 /* Get the base type of the intrinsic */
1631 /* TODO: Infer type? Does it matter? */
1632 nir_alu_type t =
1633 (is_ubo || is_global || is_shared) ? nir_type_uint :
1634 (is_interp) ? nir_type_float :
1635 nir_intrinsic_type(instr);
1636
1637 t = nir_alu_type_get_base_type(t);
1638
1639 if (!(is_ubo || is_global)) {
1640 offset = nir_intrinsic_base(instr);
1641 }
1642
1643 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1644
1645 nir_src *src_offset = nir_get_io_offset_src(instr);
1646
1647 bool direct = nir_src_is_const(*src_offset);
1648 nir_src *indirect_offset = direct ? NULL : src_offset;
1649
1650 if (direct)
1651 offset += nir_src_as_uint(*src_offset);
1652
1653 /* We may need to apply a fractional offset */
1654 int component = (is_flat || is_interp) ?
1655 nir_intrinsic_component(instr) : 0;
1656 reg = nir_dest_index(&instr->dest);
1657
1658 if (is_uniform && !ctx->is_blend) {
1659 emit_ubo_read(ctx, &instr->instr, reg, (ctx->sysvals.sysval_count + offset) * 16, indirect_offset, 4, 0);
1660 } else if (is_ubo) {
1661 nir_src index = instr->src[0];
1662
1663 /* TODO: Is indirect block number possible? */
1664 assert(nir_src_is_const(index));
1665
1666 uint32_t uindex = nir_src_as_uint(index) + 1;
1667 emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex);
1668 } else if (is_global || is_shared) {
1669 emit_global(ctx, &instr->instr, true, reg, src_offset, is_shared);
1670 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1671 emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
1672 } else if (ctx->is_blend) {
1673 /* ctx->blend_input will be precoloured to r0, where
1674 * the input is preloaded */
1675
1676 if (ctx->blend_input == ~0)
1677 ctx->blend_input = reg;
1678 else
1679 emit_mir_instruction(ctx, v_mov(ctx->blend_input, reg));
1680 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1681 emit_attr_read(ctx, reg, offset, nr_comp, t);
1682 } else {
1683 DBG("Unknown load\n");
1684 assert(0);
1685 }
1686
1687 break;
1688 }
1689
1690 /* Artefact of load_interpolated_input. TODO: other barycentric modes */
1691 case nir_intrinsic_load_barycentric_pixel:
1692 case nir_intrinsic_load_barycentric_centroid:
1693 break;
1694
1695 /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1696
1697 case nir_intrinsic_load_raw_output_pan: {
1698 reg = nir_dest_index(&instr->dest);
1699 assert(ctx->is_blend);
1700
1701 /* T720 and below use different blend opcodes with slightly
1702 * different semantics than T760 and up */
1703
1704 midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
1705
1706 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1707 ld.load_store.op = midgard_op_ld_color_buffer_32u_old;
1708 ld.load_store.address = 16;
1709 ld.load_store.arg_2 = 0x1E;
1710 }
1711
1712 emit_mir_instruction(ctx, ld);
1713 break;
1714 }
1715
1716 case nir_intrinsic_load_output: {
1717 reg = nir_dest_index(&instr->dest);
1718 assert(ctx->is_blend);
1719
1720 midgard_instruction ld = m_ld_color_buffer_as_fp16(reg, 0);
1721
1722 for (unsigned c = 4; c < 16; ++c)
1723 ld.swizzle[0][c] = 0;
1724
1725 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1726 ld.load_store.op = midgard_op_ld_color_buffer_as_fp16_old;
1727 ld.load_store.address = 1;
1728 ld.load_store.arg_2 = 0x1E;
1729 }
1730
1731 emit_mir_instruction(ctx, ld);
1732 break;
1733 }
1734
1735 case nir_intrinsic_load_blend_const_color_rgba: {
1736 assert(ctx->is_blend);
1737 reg = nir_dest_index(&instr->dest);
1738
1739 /* Blend constants are embedded directly in the shader and
1740 * patched in, so we use some magic routing */
1741
1742 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
1743 ins.has_constants = true;
1744 ins.has_blend_constant = true;
1745 emit_mir_instruction(ctx, ins);
1746 break;
1747 }
1748
1749 case nir_intrinsic_store_output:
1750 case nir_intrinsic_store_combined_output_pan:
1751 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1752
1753 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1754
1755 reg = nir_src_index(ctx, &instr->src[0]);
1756
1757 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1758 bool combined = instr->intrinsic ==
1759 nir_intrinsic_store_combined_output_pan;
1760
1761 const nir_variable *var;
1762 enum midgard_rt_id rt;
1763
1764 var = search_var(&ctx->nir->outputs,
1765 nir_intrinsic_base(instr));
1766 assert(var);
1767 if (var->data.location == FRAG_RESULT_COLOR)
1768 rt = MIDGARD_COLOR_RT0;
1769 else if (var->data.location >= FRAG_RESULT_DATA0)
1770 rt = MIDGARD_COLOR_RT0 + var->data.location -
1771 FRAG_RESULT_DATA0;
1772 else if (combined)
1773 rt = MIDGARD_ZS_RT;
1774 else
1775 assert(0);
1776
1777 unsigned reg_z = ~0, reg_s = ~0;
1778 if (combined) {
1779 unsigned writeout = nir_intrinsic_component(instr);
1780 if (writeout & PAN_WRITEOUT_Z)
1781 reg_z = nir_src_index(ctx, &instr->src[2]);
1782 if (writeout & PAN_WRITEOUT_S)
1783 reg_s = nir_src_index(ctx, &instr->src[3]);
1784 }
1785
1786 emit_fragment_store(ctx, reg, reg_z, reg_s, rt);
1787 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1788 assert(instr->intrinsic == nir_intrinsic_store_output);
1789
1790 /* We should have been vectorized, though we don't
1791 * currently check that st_vary is emitted only once
1792 * per slot (this is relevant, since there's not a mask
1793 * parameter available on the store [set to 0 by the
1794 * blob]). We do respect the component by adjusting the
1795 * swizzle. If this is a constant source, we'll need to
1796 * emit that explicitly. */
1797
1798 emit_explicit_constant(ctx, reg, reg);
1799
1800 unsigned dst_component = nir_intrinsic_component(instr);
1801 unsigned nr_comp = nir_src_num_components(instr->src[0]);
1802
1803 midgard_instruction st = m_st_vary_32(reg, offset);
1804 st.load_store.arg_1 = 0x9E;
1805 st.load_store.arg_2 = 0x1E;
1806
1807 switch (nir_alu_type_get_base_type(nir_intrinsic_type(instr))) {
1808 case nir_type_uint:
1809 case nir_type_bool:
1810 st.load_store.op = midgard_op_st_vary_32u;
1811 break;
1812 case nir_type_int:
1813 st.load_store.op = midgard_op_st_vary_32i;
1814 break;
1815 case nir_type_float:
1816 st.load_store.op = midgard_op_st_vary_32;
1817 break;
1818 default:
1819 unreachable("Attempted to store unknown type");
1820 break;
1821 }
1822
1823 /* nir_intrinsic_component(store_intr) encodes the
1824 * destination component start. Source component offset
1825 * adjustment is taken care of in
1826 * install_registers_instr(), when offset_swizzle() is
1827 * called.
1828 */
1829 unsigned src_component = COMPONENT_X;
1830
1831 assert(nr_comp > 0);
1832 for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle); ++i) {
1833 st.swizzle[0][i] = src_component;
1834 if (i >= dst_component && i < dst_component + nr_comp - 1)
1835 src_component++;
1836 }
1837
1838 emit_mir_instruction(ctx, st);
1839 } else {
1840 DBG("Unknown store\n");
1841 assert(0);
1842 }
1843
1844 break;
1845
1846 /* Special case of store_output for lowered blend shaders */
1847 case nir_intrinsic_store_raw_output_pan:
1848 assert (ctx->stage == MESA_SHADER_FRAGMENT);
1849 reg = nir_src_index(ctx, &instr->src[0]);
1850 emit_fragment_store(ctx, reg, ~0, ~0, ctx->blend_rt);
1851 break;
1852
1853 case nir_intrinsic_store_global:
1854 case nir_intrinsic_store_shared:
1855 reg = nir_src_index(ctx, &instr->src[0]);
1856 emit_explicit_constant(ctx, reg, reg);
1857
1858 emit_global(ctx, &instr->instr, false, reg, &instr->src[1], instr->intrinsic == nir_intrinsic_store_shared);
1859 break;
1860
1861 case nir_intrinsic_load_ssbo_address:
1862 emit_sysval_read(ctx, &instr->instr, 1, 0);
1863 break;
1864
1865 case nir_intrinsic_get_buffer_size:
1866 emit_sysval_read(ctx, &instr->instr, 1, 8);
1867 break;
1868
1869 case nir_intrinsic_load_viewport_scale:
1870 case nir_intrinsic_load_viewport_offset:
1871 case nir_intrinsic_load_num_work_groups:
1872 case nir_intrinsic_load_sampler_lod_parameters_pan:
1873 emit_sysval_read(ctx, &instr->instr, 3, 0);
1874 break;
1875
1876 case nir_intrinsic_load_work_group_id:
1877 case nir_intrinsic_load_local_invocation_id:
1878 emit_compute_builtin(ctx, instr);
1879 break;
1880
1881 case nir_intrinsic_load_vertex_id:
1882 case nir_intrinsic_load_instance_id:
1883 emit_vertex_builtin(ctx, instr);
1884 break;
1885
1886 case nir_intrinsic_memory_barrier_buffer:
1887 case nir_intrinsic_memory_barrier_shared:
1888 break;
1889
1890 case nir_intrinsic_control_barrier:
1891 schedule_barrier(ctx);
1892 emit_control_barrier(ctx);
1893 schedule_barrier(ctx);
1894 break;
1895
1896 default:
1897 fprintf(stderr, "Unhandled intrinsic %s\n", nir_intrinsic_infos[instr->intrinsic].name);
1898 assert(0);
1899 break;
1900 }
1901 }
1902
1903 static unsigned
1904 midgard_tex_format(enum glsl_sampler_dim dim)
1905 {
1906 switch (dim) {
1907 case GLSL_SAMPLER_DIM_1D:
1908 case GLSL_SAMPLER_DIM_BUF:
1909 return MALI_TEX_1D;
1910
1911 case GLSL_SAMPLER_DIM_2D:
1912 case GLSL_SAMPLER_DIM_EXTERNAL:
1913 case GLSL_SAMPLER_DIM_RECT:
1914 return MALI_TEX_2D;
1915
1916 case GLSL_SAMPLER_DIM_3D:
1917 return MALI_TEX_3D;
1918
1919 case GLSL_SAMPLER_DIM_CUBE:
1920 return MALI_TEX_CUBE;
1921
1922 default:
1923 DBG("Unknown sampler dim type\n");
1924 assert(0);
1925 return 0;
1926 }
1927 }
1928
1929 /* Tries to attach an explicit LOD or bias as a constant. Returns whether this
1930 * was successful */
1931
1932 static bool
1933 pan_attach_constant_bias(
1934 compiler_context *ctx,
1935 nir_src lod,
1936 midgard_texture_word *word)
1937 {
1938 /* To attach as constant, it has to *be* constant */
1939
1940 if (!nir_src_is_const(lod))
1941 return false;
1942
1943 float f = nir_src_as_float(lod);
1944
1945 /* Break into fixed-point */
1946 signed lod_int = f;
1947 float lod_frac = f - lod_int;
1948
1949 /* Carry over negative fractions */
1950 if (lod_frac < 0.0) {
1951 lod_int--;
1952 lod_frac += 1.0;
1953 }
1954
1955 /* Encode */
1956 word->bias = float_to_ubyte(lod_frac);
1957 word->bias_int = lod_int;
1958
1959 return true;
1960 }
1961
1962 static void
1963 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
1964 unsigned midgard_texop)
1965 {
1966 /* TODO */
1967 //assert (!instr->sampler);
1968
1969 int texture_index = instr->texture_index;
1970 int sampler_index = texture_index;
1971
1972 nir_alu_type dest_base = nir_alu_type_get_base_type(instr->dest_type);
1973 nir_alu_type dest_type = dest_base | nir_dest_bit_size(instr->dest);
1974
1975 midgard_instruction ins = {
1976 .type = TAG_TEXTURE_4,
1977 .mask = 0xF,
1978 .dest = nir_dest_index(&instr->dest),
1979 .src = { ~0, ~0, ~0, ~0 },
1980 .dest_type = dest_type,
1981 .swizzle = SWIZZLE_IDENTITY_4,
1982 .texture = {
1983 .op = midgard_texop,
1984 .format = midgard_tex_format(instr->sampler_dim),
1985 .texture_handle = texture_index,
1986 .sampler_handle = sampler_index,
1987 .shadow = instr->is_shadow,
1988 }
1989 };
1990
1991 if (instr->is_shadow && !instr->is_new_style_shadow)
1992 for (int i = 0; i < 4; ++i)
1993 ins.swizzle[0][i] = COMPONENT_X;
1994
1995 /* We may need a temporary for the coordinate */
1996
1997 bool needs_temp_coord =
1998 (midgard_texop == TEXTURE_OP_TEXEL_FETCH) ||
1999 (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) ||
2000 (instr->is_shadow);
2001
2002 unsigned coords = needs_temp_coord ? make_compiler_temp_reg(ctx) : 0;
2003
2004 for (unsigned i = 0; i < instr->num_srcs; ++i) {
2005 int index = nir_src_index(ctx, &instr->src[i].src);
2006 unsigned nr_components = nir_src_num_components(instr->src[i].src);
2007 unsigned sz = nir_src_bit_size(instr->src[i].src);
2008 nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
2009
2010 switch (instr->src[i].src_type) {
2011 case nir_tex_src_coord: {
2012 emit_explicit_constant(ctx, index, index);
2013
2014 unsigned coord_mask = mask_of(instr->coord_components);
2015
2016 bool flip_zw = (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) && (coord_mask & (1 << COMPONENT_Z));
2017
2018 if (flip_zw)
2019 coord_mask ^= ((1 << COMPONENT_Z) | (1 << COMPONENT_W));
2020
2021 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
2022 /* texelFetch is undefined on samplerCube */
2023 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
2024
2025 /* For cubemaps, we use a special ld/st op to
2026 * select the face and copy the xy into the
2027 * texture register */
2028
2029 midgard_instruction ld = m_ld_cubemap_coords(coords, 0);
2030 ld.src[1] = index;
2031 ld.src_types[1] = T;
2032 ld.mask = 0x3; /* xy */
2033 ld.load_store.arg_1 = 0x20;
2034 ld.swizzle[1][3] = COMPONENT_X;
2035 emit_mir_instruction(ctx, ld);
2036
2037 /* xyzw -> xyxx */
2038 ins.swizzle[1][2] = instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
2039 ins.swizzle[1][3] = COMPONENT_X;
2040 } else if (needs_temp_coord) {
2041 /* mov coord_temp, coords */
2042 midgard_instruction mov = v_mov(index, coords);
2043 mov.mask = coord_mask;
2044
2045 if (flip_zw)
2046 mov.swizzle[1][COMPONENT_W] = COMPONENT_Z;
2047
2048 emit_mir_instruction(ctx, mov);
2049 } else {
2050 coords = index;
2051 }
2052
2053 ins.src[1] = coords;
2054 ins.src_types[1] = T;
2055
2056 /* Texelfetch coordinates uses all four elements
2057 * (xyz/index) regardless of texture dimensionality,
2058 * which means it's necessary to zero the unused
2059 * components to keep everything happy */
2060
2061 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
2062 /* mov index.zw, #0, or generalized */
2063 midgard_instruction mov =
2064 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), coords);
2065 mov.has_constants = true;
2066 mov.mask = coord_mask ^ 0xF;
2067 emit_mir_instruction(ctx, mov);
2068 }
2069
2070 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
2071 /* Array component in w but NIR wants it in z,
2072 * but if we have a temp coord we already fixed
2073 * that up */
2074
2075 if (nr_components == 3) {
2076 ins.swizzle[1][2] = COMPONENT_Z;
2077 ins.swizzle[1][3] = needs_temp_coord ? COMPONENT_W : COMPONENT_Z;
2078 } else if (nr_components == 2) {
2079 ins.swizzle[1][2] =
2080 instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
2081 ins.swizzle[1][3] = COMPONENT_X;
2082 } else
2083 unreachable("Invalid texture 2D components");
2084 }
2085
2086 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
2087 /* We zeroed */
2088 ins.swizzle[1][2] = COMPONENT_Z;
2089 ins.swizzle[1][3] = COMPONENT_W;
2090 }
2091
2092 break;
2093 }
2094
2095 case nir_tex_src_bias:
2096 case nir_tex_src_lod: {
2097 /* Try as a constant if we can */
2098
2099 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
2100 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2101 break;
2102
2103 ins.texture.lod_register = true;
2104 ins.src[2] = index;
2105 ins.src_types[2] = T;
2106
2107 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2108 ins.swizzle[2][c] = COMPONENT_X;
2109
2110 emit_explicit_constant(ctx, index, index);
2111
2112 break;
2113 };
2114
2115 case nir_tex_src_offset: {
2116 ins.texture.offset_register = true;
2117 ins.src[3] = index;
2118 ins.src_types[3] = T;
2119
2120 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2121 ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
2122
2123 emit_explicit_constant(ctx, index, index);
2124 break;
2125 };
2126
2127 case nir_tex_src_comparator: {
2128 unsigned comp = COMPONENT_Z;
2129
2130 /* mov coord_temp.foo, coords */
2131 midgard_instruction mov = v_mov(index, coords);
2132 mov.mask = 1 << comp;
2133
2134 for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i)
2135 mov.swizzle[1][i] = COMPONENT_X;
2136
2137 emit_mir_instruction(ctx, mov);
2138 break;
2139 }
2140
2141 default: {
2142 fprintf(stderr, "Unknown texture source type: %d\n", instr->src[i].src_type);
2143 assert(0);
2144 }
2145 }
2146 }
2147
2148 emit_mir_instruction(ctx, ins);
2149 }
2150
2151 static void
2152 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
2153 {
2154 switch (instr->op) {
2155 case nir_texop_tex:
2156 case nir_texop_txb:
2157 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
2158 break;
2159 case nir_texop_txl:
2160 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
2161 break;
2162 case nir_texop_txf:
2163 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
2164 break;
2165 case nir_texop_txs:
2166 emit_sysval_read(ctx, &instr->instr, 4, 0);
2167 break;
2168 default: {
2169 fprintf(stderr, "Unhandled texture op: %d\n", instr->op);
2170 assert(0);
2171 }
2172 }
2173 }
2174
2175 static void
2176 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
2177 {
2178 switch (instr->type) {
2179 case nir_jump_break: {
2180 /* Emit a branch out of the loop */
2181 struct midgard_instruction br = v_branch(false, false);
2182 br.branch.target_type = TARGET_BREAK;
2183 br.branch.target_break = ctx->current_loop_depth;
2184 emit_mir_instruction(ctx, br);
2185 break;
2186 }
2187
2188 default:
2189 DBG("Unknown jump type %d\n", instr->type);
2190 break;
2191 }
2192 }
2193
2194 static void
2195 emit_instr(compiler_context *ctx, struct nir_instr *instr)
2196 {
2197 switch (instr->type) {
2198 case nir_instr_type_load_const:
2199 emit_load_const(ctx, nir_instr_as_load_const(instr));
2200 break;
2201
2202 case nir_instr_type_intrinsic:
2203 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2204 break;
2205
2206 case nir_instr_type_alu:
2207 emit_alu(ctx, nir_instr_as_alu(instr));
2208 break;
2209
2210 case nir_instr_type_tex:
2211 emit_tex(ctx, nir_instr_as_tex(instr));
2212 break;
2213
2214 case nir_instr_type_jump:
2215 emit_jump(ctx, nir_instr_as_jump(instr));
2216 break;
2217
2218 case nir_instr_type_ssa_undef:
2219 /* Spurious */
2220 break;
2221
2222 default:
2223 DBG("Unhandled instruction type\n");
2224 break;
2225 }
2226 }
2227
2228
2229 /* ALU instructions can inline or embed constants, which decreases register
2230 * pressure and saves space. */
2231
2232 #define CONDITIONAL_ATTACH(idx) { \
2233 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
2234 \
2235 if (entry) { \
2236 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2237 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
2238 } \
2239 }
2240
2241 static void
2242 inline_alu_constants(compiler_context *ctx, midgard_block *block)
2243 {
2244 mir_foreach_instr_in_block(block, alu) {
2245 /* Other instructions cannot inline constants */
2246 if (alu->type != TAG_ALU_4) continue;
2247 if (alu->compact_branch) continue;
2248
2249 /* If there is already a constant here, we can do nothing */
2250 if (alu->has_constants) continue;
2251
2252 CONDITIONAL_ATTACH(0);
2253
2254 if (!alu->has_constants) {
2255 CONDITIONAL_ATTACH(1)
2256 } else if (!alu->inline_constant) {
2257 /* Corner case: _two_ vec4 constants, for instance with a
2258 * csel. For this case, we can only use a constant
2259 * register for one, we'll have to emit a move for the
2260 * other. */
2261
2262 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2263 unsigned scratch = make_compiler_temp(ctx);
2264
2265 if (entry) {
2266 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
2267 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2268
2269 /* Set the source */
2270 alu->src[1] = scratch;
2271
2272 /* Inject us -before- the last instruction which set r31 */
2273 mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
2274 }
2275 }
2276 }
2277 }
2278
2279 /* Midgard supports two types of constants, embedded constants (128-bit) and
2280 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2281 * constants can be demoted to inline constants, for space savings and
2282 * sometimes a performance boost */
2283
2284 static void
2285 embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
2286 {
2287 mir_foreach_instr_in_block(block, ins) {
2288 if (!ins->has_constants) continue;
2289 if (ins->has_inline_constant) continue;
2290
2291 /* Blend constants must not be inlined by definition */
2292 if (ins->has_blend_constant) continue;
2293
2294 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
2295 bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
2296 bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
2297
2298 if (!(is_16 || is_32))
2299 continue;
2300
2301 /* src1 cannot be an inline constant due to encoding
2302 * restrictions. So, if possible we try to flip the arguments
2303 * in that case */
2304
2305 int op = ins->alu.op;
2306
2307 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2308 alu_opcode_props[op].props & OP_COMMUTES) {
2309 mir_flip(ins);
2310 }
2311
2312 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2313 /* Component is from the swizzle. Take a nonzero component */
2314 assert(ins->mask);
2315 unsigned first_comp = ffs(ins->mask) - 1;
2316 unsigned component = ins->swizzle[1][first_comp];
2317
2318 /* Scale constant appropriately, if we can legally */
2319 int16_t scaled_constant = 0;
2320
2321 if (is_16) {
2322 scaled_constant = ins->constants.u16[component];
2323 } else if (midgard_is_integer_op(op)) {
2324 scaled_constant = ins->constants.u32[component];
2325
2326 /* Constant overflow after resize */
2327 if (scaled_constant != ins->constants.u32[component])
2328 continue;
2329 } else {
2330 float original = ins->constants.f32[component];
2331 scaled_constant = _mesa_float_to_half(original);
2332
2333 /* Check for loss of precision. If this is
2334 * mediump, we don't care, but for a highp
2335 * shader, we need to pay attention. NIR
2336 * doesn't yet tell us which mode we're in!
2337 * Practically this prevents most constants
2338 * from being inlined, sadly. */
2339
2340 float fp32 = _mesa_half_to_float(scaled_constant);
2341
2342 if (fp32 != original)
2343 continue;
2344 }
2345
2346 /* Should've been const folded */
2347 if (ins->src_abs[1] || ins->src_neg[1])
2348 continue;
2349
2350 /* Make sure that the constant is not itself a vector
2351 * by checking if all accessed values are the same. */
2352
2353 const midgard_constants *cons = &ins->constants;
2354 uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
2355
2356 bool is_vector = false;
2357 unsigned mask = effective_writemask(&ins->alu, ins->mask);
2358
2359 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
2360 /* We only care if this component is actually used */
2361 if (!(mask & (1 << c)))
2362 continue;
2363
2364 uint32_t test = is_16 ?
2365 cons->u16[ins->swizzle[1][c]] :
2366 cons->u32[ins->swizzle[1][c]];
2367
2368 if (test != value) {
2369 is_vector = true;
2370 break;
2371 }
2372 }
2373
2374 if (is_vector)
2375 continue;
2376
2377 /* Get rid of the embedded constant */
2378 ins->has_constants = false;
2379 ins->src[1] = ~0;
2380 ins->has_inline_constant = true;
2381 ins->inline_constant = scaled_constant;
2382 }
2383 }
2384 }
2385
2386 /* Dead code elimination for branches at the end of a block - only one branch
2387 * per block is legal semantically */
2388
2389 static void
2390 midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2391 {
2392 bool branched = false;
2393
2394 mir_foreach_instr_in_block_safe(block, ins) {
2395 if (!midgard_is_branch_unit(ins->unit)) continue;
2396
2397 if (branched)
2398 mir_remove_instruction(ins);
2399
2400 branched = true;
2401 }
2402 }
2403
2404 /* We want to force the invert on AND/OR to the second slot to legalize into
2405 * iandnot/iornot. The relevant patterns are for AND (and OR respectively)
2406 *
2407 * ~a & #b = ~a & ~(#~b)
2408 * ~a & b = b & ~a
2409 */
2410
2411 static void
2412 midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
2413 {
2414 mir_foreach_instr_in_block(block, ins) {
2415 if (ins->type != TAG_ALU_4) continue;
2416
2417 if (ins->alu.op != midgard_alu_op_iand &&
2418 ins->alu.op != midgard_alu_op_ior) continue;
2419
2420 if (ins->src_invert[1] || !ins->src_invert[0]) continue;
2421
2422 if (ins->has_inline_constant) {
2423 /* ~(#~a) = ~(~#a) = a, so valid, and forces both
2424 * inverts on */
2425 ins->inline_constant = ~ins->inline_constant;
2426 ins->src_invert[1] = true;
2427 } else {
2428 /* Flip to the right invert order. Note
2429 * has_inline_constant false by assumption on the
2430 * branch, so flipping makes sense. */
2431 mir_flip(ins);
2432 }
2433 }
2434 }
2435
2436 static unsigned
2437 emit_fragment_epilogue(compiler_context *ctx, unsigned rt)
2438 {
2439 /* Loop to ourselves */
2440 midgard_instruction *br = ctx->writeout_branch[rt];
2441 struct midgard_instruction ins = v_branch(false, false);
2442 ins.writeout = br->writeout;
2443 ins.branch.target_block = ctx->block_count - 1;
2444 ins.constants.u32[0] = br->constants.u32[0];
2445 memcpy(&ins.src_types, &br->src_types, sizeof(ins.src_types));
2446 emit_mir_instruction(ctx, ins);
2447
2448 ctx->current_block->epilogue = true;
2449 schedule_barrier(ctx);
2450 return ins.branch.target_block;
2451 }
2452
2453 static midgard_block *
2454 emit_block(compiler_context *ctx, nir_block *block)
2455 {
2456 midgard_block *this_block = ctx->after_block;
2457 ctx->after_block = NULL;
2458
2459 if (!this_block)
2460 this_block = create_empty_block(ctx);
2461
2462 list_addtail(&this_block->base.link, &ctx->blocks);
2463
2464 this_block->scheduled = false;
2465 ++ctx->block_count;
2466
2467 /* Set up current block */
2468 list_inithead(&this_block->base.instructions);
2469 ctx->current_block = this_block;
2470
2471 nir_foreach_instr(instr, block) {
2472 emit_instr(ctx, instr);
2473 ++ctx->instruction_count;
2474 }
2475
2476 return this_block;
2477 }
2478
2479 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2480
2481 static void
2482 emit_if(struct compiler_context *ctx, nir_if *nif)
2483 {
2484 midgard_block *before_block = ctx->current_block;
2485
2486 /* Speculatively emit the branch, but we can't fill it in until later */
2487 bool inv = false;
2488 EMIT(branch, true, true);
2489 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2490 then_branch->src[0] = mir_get_branch_cond(&nif->condition, &inv);
2491 then_branch->src_types[0] = nir_type_uint32;
2492 then_branch->branch.invert_conditional = !inv;
2493
2494 /* Emit the two subblocks. */
2495 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2496 midgard_block *end_then_block = ctx->current_block;
2497
2498 /* Emit a jump from the end of the then block to the end of the else */
2499 EMIT(branch, false, false);
2500 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2501
2502 /* Emit second block, and check if it's empty */
2503
2504 int else_idx = ctx->block_count;
2505 int count_in = ctx->instruction_count;
2506 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2507 midgard_block *end_else_block = ctx->current_block;
2508 int after_else_idx = ctx->block_count;
2509
2510 /* Now that we have the subblocks emitted, fix up the branches */
2511
2512 assert(then_block);
2513 assert(else_block);
2514
2515 if (ctx->instruction_count == count_in) {
2516 /* The else block is empty, so don't emit an exit jump */
2517 mir_remove_instruction(then_exit);
2518 then_branch->branch.target_block = after_else_idx;
2519 } else {
2520 then_branch->branch.target_block = else_idx;
2521 then_exit->branch.target_block = after_else_idx;
2522 }
2523
2524 /* Wire up the successors */
2525
2526 ctx->after_block = create_empty_block(ctx);
2527
2528 pan_block_add_successor(&before_block->base, &then_block->base);
2529 pan_block_add_successor(&before_block->base, &else_block->base);
2530
2531 pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
2532 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
2533 }
2534
2535 static void
2536 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2537 {
2538 /* Remember where we are */
2539 midgard_block *start_block = ctx->current_block;
2540
2541 /* Allocate a loop number, growing the current inner loop depth */
2542 int loop_idx = ++ctx->current_loop_depth;
2543
2544 /* Get index from before the body so we can loop back later */
2545 int start_idx = ctx->block_count;
2546
2547 /* Emit the body itself */
2548 midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
2549
2550 /* Branch back to loop back */
2551 struct midgard_instruction br_back = v_branch(false, false);
2552 br_back.branch.target_block = start_idx;
2553 emit_mir_instruction(ctx, br_back);
2554
2555 /* Mark down that branch in the graph. */
2556 pan_block_add_successor(&start_block->base, &loop_block->base);
2557 pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
2558
2559 /* Find the index of the block about to follow us (note: we don't add
2560 * one; blocks are 0-indexed so we get a fencepost problem) */
2561 int break_block_idx = ctx->block_count;
2562
2563 /* Fix up the break statements we emitted to point to the right place,
2564 * now that we can allocate a block number for them */
2565 ctx->after_block = create_empty_block(ctx);
2566
2567 mir_foreach_block_from(ctx, start_block, _block) {
2568 mir_foreach_instr_in_block(((midgard_block *) _block), ins) {
2569 if (ins->type != TAG_ALU_4) continue;
2570 if (!ins->compact_branch) continue;
2571
2572 /* We found a branch -- check the type to see if we need to do anything */
2573 if (ins->branch.target_type != TARGET_BREAK) continue;
2574
2575 /* It's a break! Check if it's our break */
2576 if (ins->branch.target_break != loop_idx) continue;
2577
2578 /* Okay, cool, we're breaking out of this loop.
2579 * Rewrite from a break to a goto */
2580
2581 ins->branch.target_type = TARGET_GOTO;
2582 ins->branch.target_block = break_block_idx;
2583
2584 pan_block_add_successor(_block, &ctx->after_block->base);
2585 }
2586 }
2587
2588 /* Now that we've finished emitting the loop, free up the depth again
2589 * so we play nice with recursion amid nested loops */
2590 --ctx->current_loop_depth;
2591
2592 /* Dump loop stats */
2593 ++ctx->loop_count;
2594 }
2595
2596 static midgard_block *
2597 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2598 {
2599 midgard_block *start_block = NULL;
2600
2601 foreach_list_typed(nir_cf_node, node, node, list) {
2602 switch (node->type) {
2603 case nir_cf_node_block: {
2604 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2605
2606 if (!start_block)
2607 start_block = block;
2608
2609 break;
2610 }
2611
2612 case nir_cf_node_if:
2613 emit_if(ctx, nir_cf_node_as_if(node));
2614 break;
2615
2616 case nir_cf_node_loop:
2617 emit_loop(ctx, nir_cf_node_as_loop(node));
2618 break;
2619
2620 case nir_cf_node_function:
2621 assert(0);
2622 break;
2623 }
2624 }
2625
2626 return start_block;
2627 }
2628
2629 /* Due to lookahead, we need to report the first tag executed in the command
2630 * stream and in branch targets. An initial block might be empty, so iterate
2631 * until we find one that 'works' */
2632
2633 static unsigned
2634 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2635 {
2636 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2637
2638 mir_foreach_block_from(ctx, initial_block, _v) {
2639 midgard_block *v = (midgard_block *) _v;
2640 if (v->quadword_count) {
2641 midgard_bundle *initial_bundle =
2642 util_dynarray_element(&v->bundles, midgard_bundle, 0);
2643
2644 return initial_bundle->tag;
2645 }
2646 }
2647
2648 /* Default to a tag 1 which will break from the shader, in case we jump
2649 * to the exit block (i.e. `return` in a compute shader) */
2650
2651 return 1;
2652 }
2653
2654 /* For each fragment writeout instruction, generate a writeout loop to
2655 * associate with it */
2656
2657 static void
2658 mir_add_writeout_loops(compiler_context *ctx)
2659 {
2660 for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
2661 midgard_instruction *br = ctx->writeout_branch[rt];
2662 if (!br) continue;
2663
2664 unsigned popped = br->branch.target_block;
2665 pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base), &ctx->current_block->base);
2666 br->branch.target_block = emit_fragment_epilogue(ctx, rt);
2667 br->branch.target_type = TARGET_GOTO;
2668
2669 /* If we have more RTs, we'll need to restore back after our
2670 * loop terminates */
2671
2672 if ((rt + 1) < ARRAY_SIZE(ctx->writeout_branch) && ctx->writeout_branch[rt + 1]) {
2673 midgard_instruction uncond = v_branch(false, false);
2674 uncond.branch.target_block = popped;
2675 uncond.branch.target_type = TARGET_GOTO;
2676 emit_mir_instruction(ctx, uncond);
2677 pan_block_add_successor(&ctx->current_block->base, &(mir_get_block(ctx, popped)->base));
2678 schedule_barrier(ctx);
2679 } else {
2680 /* We're last, so we can terminate here */
2681 br->last_writeout = true;
2682 }
2683 }
2684 }
2685
2686 int
2687 midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id, bool shaderdb)
2688 {
2689 struct util_dynarray *compiled = &program->compiled;
2690
2691 midgard_debug = debug_get_option_midgard_debug();
2692
2693 /* TODO: Bound against what? */
2694 compiler_context *ctx = rzalloc(NULL, compiler_context);
2695
2696 ctx->nir = nir;
2697 ctx->stage = nir->info.stage;
2698 ctx->is_blend = is_blend;
2699 ctx->alpha_ref = program->alpha_ref;
2700 ctx->blend_rt = MIDGARD_COLOR_RT0 + blend_rt;
2701 ctx->blend_input = ~0;
2702 ctx->quirks = midgard_get_quirks(gpu_id);
2703
2704 /* Start off with a safe cutoff, allowing usage of all 16 work
2705 * registers. Later, we'll promote uniform reads to uniform registers
2706 * if we determine it is beneficial to do so */
2707 ctx->uniform_cutoff = 8;
2708
2709 /* Initialize at a global (not block) level hash tables */
2710
2711 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2712 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2713
2714 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2715 * (so we don't accidentally duplicate the epilogue since mesa/st has
2716 * messed with our I/O quite a bit already) */
2717
2718 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2719
2720 if (ctx->stage == MESA_SHADER_VERTEX) {
2721 NIR_PASS_V(nir, nir_lower_viewport_transform);
2722 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
2723 }
2724
2725 NIR_PASS_V(nir, nir_lower_var_copies);
2726 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2727 NIR_PASS_V(nir, nir_split_var_copies);
2728 NIR_PASS_V(nir, nir_lower_var_copies);
2729 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2730 NIR_PASS_V(nir, nir_lower_var_copies);
2731 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2732
2733 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
2734 NIR_PASS_V(nir, nir_lower_ssbo);
2735 NIR_PASS_V(nir, midgard_nir_lower_zs_store);
2736
2737 /* Optimisation passes */
2738
2739 optimise_nir(nir, ctx->quirks, is_blend);
2740
2741 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2742 nir_print_shader(nir, stdout);
2743 }
2744
2745 /* Assign sysvals and counts, now that we're sure
2746 * (post-optimisation) */
2747
2748 panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
2749 program->sysval_count = ctx->sysvals.sysval_count;
2750 memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
2751
2752 nir_foreach_function(func, nir) {
2753 if (!func->impl)
2754 continue;
2755
2756 list_inithead(&ctx->blocks);
2757 ctx->block_count = 0;
2758 ctx->func = func;
2759 ctx->already_emitted = calloc(BITSET_WORDS(func->impl->ssa_alloc), sizeof(BITSET_WORD));
2760
2761 emit_cf_list(ctx, &func->impl->body);
2762 free(ctx->already_emitted);
2763 break; /* TODO: Multi-function shaders */
2764 }
2765
2766 util_dynarray_init(compiled, NULL);
2767
2768 /* Per-block lowering before opts */
2769
2770 mir_foreach_block(ctx, _block) {
2771 midgard_block *block = (midgard_block *) _block;
2772 inline_alu_constants(ctx, block);
2773 embedded_to_inline_constant(ctx, block);
2774 }
2775 /* MIR-level optimizations */
2776
2777 bool progress = false;
2778
2779 do {
2780 progress = false;
2781 progress |= midgard_opt_dead_code_eliminate(ctx);
2782
2783 mir_foreach_block(ctx, _block) {
2784 midgard_block *block = (midgard_block *) _block;
2785 progress |= midgard_opt_copy_prop(ctx, block);
2786 progress |= midgard_opt_combine_projection(ctx, block);
2787 progress |= midgard_opt_varying_projection(ctx, block);
2788 }
2789 } while (progress);
2790
2791 mir_foreach_block(ctx, _block) {
2792 midgard_block *block = (midgard_block *) _block;
2793 midgard_lower_derivatives(ctx, block);
2794 midgard_legalize_invert(ctx, block);
2795 midgard_cull_dead_branch(ctx, block);
2796 }
2797
2798 if (ctx->stage == MESA_SHADER_FRAGMENT)
2799 mir_add_writeout_loops(ctx);
2800
2801 /* Analyze now that the code is known but before scheduling creates
2802 * pipeline registers which are harder to track */
2803 mir_analyze_helper_terminate(ctx);
2804 mir_analyze_helper_requirements(ctx);
2805
2806 /* Schedule! */
2807 midgard_schedule_program(ctx);
2808 mir_ra(ctx);
2809
2810 /* Now that all the bundles are scheduled and we can calculate block
2811 * sizes, emit actual branch instructions rather than placeholders */
2812
2813 int br_block_idx = 0;
2814
2815 mir_foreach_block(ctx, _block) {
2816 midgard_block *block = (midgard_block *) _block;
2817 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2818 for (int c = 0; c < bundle->instruction_count; ++c) {
2819 midgard_instruction *ins = bundle->instructions[c];
2820
2821 if (!midgard_is_branch_unit(ins->unit)) continue;
2822
2823 /* Parse some basic branch info */
2824 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2825 bool is_conditional = ins->branch.conditional;
2826 bool is_inverted = ins->branch.invert_conditional;
2827 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2828 bool is_writeout = ins->writeout;
2829
2830 /* Determine the block we're jumping to */
2831 int target_number = ins->branch.target_block;
2832
2833 /* Report the destination tag */
2834 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
2835
2836 /* Count up the number of quadwords we're
2837 * jumping over = number of quadwords until
2838 * (br_block_idx, target_number) */
2839
2840 int quadword_offset = 0;
2841
2842 if (is_discard) {
2843 /* Ignored */
2844 } else if (target_number > br_block_idx) {
2845 /* Jump forward */
2846
2847 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2848 midgard_block *blk = mir_get_block(ctx, idx);
2849 assert(blk);
2850
2851 quadword_offset += blk->quadword_count;
2852 }
2853 } else {
2854 /* Jump backwards */
2855
2856 for (int idx = br_block_idx; idx >= target_number; --idx) {
2857 midgard_block *blk = mir_get_block(ctx, idx);
2858 assert(blk);
2859
2860 quadword_offset -= blk->quadword_count;
2861 }
2862 }
2863
2864 /* Unconditional extended branches (far jumps)
2865 * have issues, so we always use a conditional
2866 * branch, setting the condition to always for
2867 * unconditional. For compact unconditional
2868 * branches, cond isn't used so it doesn't
2869 * matter what we pick. */
2870
2871 midgard_condition cond =
2872 !is_conditional ? midgard_condition_always :
2873 is_inverted ? midgard_condition_false :
2874 midgard_condition_true;
2875
2876 midgard_jmp_writeout_op op =
2877 is_discard ? midgard_jmp_writeout_op_discard :
2878 is_writeout ? midgard_jmp_writeout_op_writeout :
2879 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2880 midgard_jmp_writeout_op_branch_cond;
2881
2882 if (!is_compact) {
2883 midgard_branch_extended branch =
2884 midgard_create_branch_extended(
2885 cond, op,
2886 dest_tag,
2887 quadword_offset);
2888
2889 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2890 } else if (is_conditional || is_discard) {
2891 midgard_branch_cond branch = {
2892 .op = op,
2893 .dest_tag = dest_tag,
2894 .offset = quadword_offset,
2895 .cond = cond
2896 };
2897
2898 assert(branch.offset == quadword_offset);
2899
2900 memcpy(&ins->br_compact, &branch, sizeof(branch));
2901 } else {
2902 assert(op == midgard_jmp_writeout_op_branch_uncond);
2903
2904 midgard_branch_uncond branch = {
2905 .op = op,
2906 .dest_tag = dest_tag,
2907 .offset = quadword_offset,
2908 .unknown = 1
2909 };
2910
2911 assert(branch.offset == quadword_offset);
2912
2913 memcpy(&ins->br_compact, &branch, sizeof(branch));
2914 }
2915 }
2916 }
2917
2918 ++br_block_idx;
2919 }
2920
2921 /* Emit flat binary from the instruction arrays. Iterate each block in
2922 * sequence. Save instruction boundaries such that lookahead tags can
2923 * be assigned easily */
2924
2925 /* Cache _all_ bundles in source order for lookahead across failed branches */
2926
2927 int bundle_count = 0;
2928 mir_foreach_block(ctx, _block) {
2929 midgard_block *block = (midgard_block *) _block;
2930 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2931 }
2932 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2933 int bundle_idx = 0;
2934 mir_foreach_block(ctx, _block) {
2935 midgard_block *block = (midgard_block *) _block;
2936 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2937 source_order_bundles[bundle_idx++] = bundle;
2938 }
2939 }
2940
2941 int current_bundle = 0;
2942
2943 /* Midgard prefetches instruction types, so during emission we
2944 * need to lookahead. Unless this is the last instruction, in
2945 * which we return 1. */
2946
2947 mir_foreach_block(ctx, _block) {
2948 midgard_block *block = (midgard_block *) _block;
2949 mir_foreach_bundle_in_block(block, bundle) {
2950 int lookahead = 1;
2951
2952 if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
2953 lookahead = source_order_bundles[current_bundle + 1]->tag;
2954
2955 emit_binary_bundle(ctx, block, bundle, compiled, lookahead);
2956 ++current_bundle;
2957 }
2958
2959 /* TODO: Free deeper */
2960 //util_dynarray_fini(&block->instructions);
2961 }
2962
2963 free(source_order_bundles);
2964
2965 /* Report the very first tag executed */
2966 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
2967
2968 /* Deal with off-by-one related to the fencepost problem */
2969 program->work_register_count = ctx->work_registers + 1;
2970 program->uniform_cutoff = ctx->uniform_cutoff;
2971
2972 program->blend_patch_offset = ctx->blend_constant_offset;
2973 program->tls_size = ctx->tls_size;
2974
2975 if (midgard_debug & MIDGARD_DBG_SHADERS)
2976 disassemble_midgard(stdout, program->compiled.data, program->compiled.size, gpu_id, ctx->stage);
2977
2978 if (midgard_debug & MIDGARD_DBG_SHADERDB || shaderdb) {
2979 unsigned nr_bundles = 0, nr_ins = 0;
2980
2981 /* Count instructions and bundles */
2982
2983 mir_foreach_block(ctx, _block) {
2984 midgard_block *block = (midgard_block *) _block;
2985 nr_bundles += util_dynarray_num_elements(
2986 &block->bundles, midgard_bundle);
2987
2988 mir_foreach_bundle_in_block(block, bun)
2989 nr_ins += bun->instruction_count;
2990 }
2991
2992 /* Calculate thread count. There are certain cutoffs by
2993 * register count for thread count */
2994
2995 unsigned nr_registers = program->work_register_count;
2996
2997 unsigned nr_threads =
2998 (nr_registers <= 4) ? 4 :
2999 (nr_registers <= 8) ? 2 :
3000 1;
3001
3002 /* Dump stats */
3003
3004 fprintf(stderr, "shader%d - %s shader: "
3005 "%u inst, %u bundles, %u quadwords, "
3006 "%u registers, %u threads, %u loops, "
3007 "%u:%u spills:fills\n",
3008 SHADER_DB_COUNT++,
3009 ctx->is_blend ? "PAN_SHADER_BLEND" :
3010 gl_shader_stage_name(ctx->stage),
3011 nr_ins, nr_bundles, ctx->quadword_count,
3012 nr_registers, nr_threads,
3013 ctx->loop_count,
3014 ctx->spills, ctx->fills);
3015 }
3016
3017 ralloc_free(ctx);
3018
3019 return 0;
3020 }