pan/mdg: Handle tilebuffer wait loops
[mesa.git] / src / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "util/half_float.h"
38 #include "util/u_math.h"
39 #include "util/u_debug.h"
40 #include "util/u_dynarray.h"
41 #include "util/list.h"
42 #include "main/mtypes.h"
43
44 #include "midgard.h"
45 #include "midgard_nir.h"
46 #include "midgard_compile.h"
47 #include "midgard_ops.h"
48 #include "helpers.h"
49 #include "compiler.h"
50 #include "midgard_quirks.h"
51
52 #include "disassemble.h"
53
54 static const struct debug_named_value debug_options[] = {
55 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
56 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
57 {"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
58 DEBUG_NAMED_VALUE_END
59 };
60
61 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
62
63 unsigned SHADER_DB_COUNT = 0;
64
65 int midgard_debug = 0;
66
67 #define DBG(fmt, ...) \
68 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
69 fprintf(stderr, "%s:%d: "fmt, \
70 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
71 static midgard_block *
72 create_empty_block(compiler_context *ctx)
73 {
74 midgard_block *blk = rzalloc(ctx, midgard_block);
75
76 blk->base.predecessors = _mesa_set_create(blk,
77 _mesa_hash_pointer,
78 _mesa_key_pointer_equal);
79
80 blk->base.name = ctx->block_source_count++;
81
82 return blk;
83 }
84
85 static void
86 schedule_barrier(compiler_context *ctx)
87 {
88 midgard_block *temp = ctx->after_block;
89 ctx->after_block = create_empty_block(ctx);
90 ctx->block_count++;
91 list_addtail(&ctx->after_block->base.link, &ctx->blocks);
92 list_inithead(&ctx->after_block->base.instructions);
93 pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
94 ctx->current_block = ctx->after_block;
95 ctx->after_block = temp;
96 }
97
98 /* Helpers to generate midgard_instruction's using macro magic, since every
99 * driver seems to do it that way */
100
101 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
102
103 #define M_LOAD_STORE(name, store, T) \
104 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
105 midgard_instruction i = { \
106 .type = TAG_LOAD_STORE_4, \
107 .mask = 0xF, \
108 .dest = ~0, \
109 .src = { ~0, ~0, ~0, ~0 }, \
110 .swizzle = SWIZZLE_IDENTITY_4, \
111 .load_store = { \
112 .op = midgard_op_##name, \
113 .address = address \
114 } \
115 }; \
116 \
117 if (store) { \
118 i.src[0] = ssa; \
119 i.src_types[0] = T; \
120 i.dest_type = T; \
121 } else { \
122 i.dest = ssa; \
123 i.dest_type = T; \
124 } \
125 return i; \
126 }
127
128 #define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
129 #define M_STORE(name, T) M_LOAD_STORE(name, true, T)
130
131 M_LOAD(ld_attr_32, nir_type_uint32);
132 M_LOAD(ld_vary_32, nir_type_uint32);
133 M_LOAD(ld_ubo_int4, nir_type_uint32);
134 M_LOAD(ld_int4, nir_type_uint32);
135 M_STORE(st_int4, nir_type_uint32);
136 M_LOAD(ld_color_buffer_32u, nir_type_uint32);
137 M_LOAD(ld_color_buffer_as_fp16, nir_type_float16);
138 M_STORE(st_vary_32, nir_type_uint32);
139 M_LOAD(ld_cubemap_coords, nir_type_uint32);
140 M_LOAD(ld_compute_id, nir_type_uint32);
141
142 static midgard_instruction
143 v_branch(bool conditional, bool invert)
144 {
145 midgard_instruction ins = {
146 .type = TAG_ALU_4,
147 .unit = ALU_ENAB_BRANCH,
148 .compact_branch = true,
149 .branch = {
150 .conditional = conditional,
151 .invert_conditional = invert
152 },
153 .dest = ~0,
154 .src = { ~0, ~0, ~0, ~0 },
155 };
156
157 return ins;
158 }
159
160 static midgard_branch_extended
161 midgard_create_branch_extended( midgard_condition cond,
162 midgard_jmp_writeout_op op,
163 unsigned dest_tag,
164 signed quadword_offset)
165 {
166 /* The condition code is actually a LUT describing a function to
167 * combine multiple condition codes. However, we only support a single
168 * condition code at the moment, so we just duplicate over a bunch of
169 * times. */
170
171 uint16_t duplicated_cond =
172 (cond << 14) |
173 (cond << 12) |
174 (cond << 10) |
175 (cond << 8) |
176 (cond << 6) |
177 (cond << 4) |
178 (cond << 2) |
179 (cond << 0);
180
181 midgard_branch_extended branch = {
182 .op = op,
183 .dest_tag = dest_tag,
184 .offset = quadword_offset,
185 .cond = duplicated_cond
186 };
187
188 return branch;
189 }
190
191 static void
192 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
193 {
194 ins->has_constants = true;
195 memcpy(&ins->constants, constants, 16);
196 }
197
198 static int
199 glsl_type_size(const struct glsl_type *type, bool bindless)
200 {
201 return glsl_count_attribute_slots(type, false);
202 }
203
204 /* Lower fdot2 to a vector multiplication followed by channel addition */
205 static void
206 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
207 {
208 if (alu->op != nir_op_fdot2)
209 return;
210
211 b->cursor = nir_before_instr(&alu->instr);
212
213 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
214 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
215
216 nir_ssa_def *product = nir_fmul(b, src0, src1);
217
218 nir_ssa_def *sum = nir_fadd(b,
219 nir_channel(b, product, 0),
220 nir_channel(b, product, 1));
221
222 /* Replace the fdot2 with this sum */
223 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
224 }
225
226 static bool
227 midgard_nir_lower_fdot2(nir_shader *shader)
228 {
229 bool progress = false;
230
231 nir_foreach_function(function, shader) {
232 if (!function->impl) continue;
233
234 nir_builder _b;
235 nir_builder *b = &_b;
236 nir_builder_init(b, function->impl);
237
238 nir_foreach_block(block, function->impl) {
239 nir_foreach_instr_safe(instr, block) {
240 if (instr->type != nir_instr_type_alu) continue;
241
242 nir_alu_instr *alu = nir_instr_as_alu(instr);
243 midgard_nir_lower_fdot2_body(b, alu);
244
245 progress |= true;
246 }
247 }
248
249 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
250
251 }
252
253 return progress;
254 }
255
256 static const nir_variable *
257 search_var(struct exec_list *vars, unsigned driver_loc)
258 {
259 nir_foreach_variable(var, vars) {
260 if (var->data.driver_location == driver_loc)
261 return var;
262 }
263
264 return NULL;
265 }
266
267 /* Midgard can write all of color, depth and stencil in a single writeout
268 * operation, so we merge depth/stencil stores with color stores.
269 * If there are no color stores, we add a write to the "depth RT".
270 */
271 static bool
272 midgard_nir_lower_zs_store(nir_shader *nir)
273 {
274 if (nir->info.stage != MESA_SHADER_FRAGMENT)
275 return false;
276
277 nir_variable *z_var = NULL, *s_var = NULL;
278
279 nir_foreach_variable(var, &nir->outputs) {
280 if (var->data.location == FRAG_RESULT_DEPTH)
281 z_var = var;
282 else if (var->data.location == FRAG_RESULT_STENCIL)
283 s_var = var;
284 }
285
286 if (!z_var && !s_var)
287 return false;
288
289 bool progress = false;
290
291 nir_foreach_function(function, nir) {
292 if (!function->impl) continue;
293
294 nir_intrinsic_instr *z_store = NULL, *s_store = NULL;
295
296 nir_foreach_block(block, function->impl) {
297 nir_foreach_instr_safe(instr, block) {
298 if (instr->type != nir_instr_type_intrinsic)
299 continue;
300
301 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
302 if (intr->intrinsic != nir_intrinsic_store_output)
303 continue;
304
305 if (z_var && nir_intrinsic_base(intr) == z_var->data.driver_location) {
306 assert(!z_store);
307 z_store = intr;
308 }
309
310 if (s_var && nir_intrinsic_base(intr) == s_var->data.driver_location) {
311 assert(!s_store);
312 s_store = intr;
313 }
314 }
315 }
316
317 if (!z_store && !s_store) continue;
318
319 bool replaced = false;
320
321 nir_foreach_block(block, function->impl) {
322 nir_foreach_instr_safe(instr, block) {
323 if (instr->type != nir_instr_type_intrinsic)
324 continue;
325
326 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
327 if (intr->intrinsic != nir_intrinsic_store_output)
328 continue;
329
330 const nir_variable *var = search_var(&nir->outputs, nir_intrinsic_base(intr));
331 assert(var);
332
333 if (var->data.location != FRAG_RESULT_COLOR &&
334 var->data.location < FRAG_RESULT_DATA0)
335 continue;
336
337 assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
338
339 nir_builder b;
340 nir_builder_init(&b, function->impl);
341
342 assert(!z_store || z_store->instr.block == instr->block);
343 assert(!s_store || s_store->instr.block == instr->block);
344 b.cursor = nir_after_block_before_jump(instr->block);
345
346 nir_intrinsic_instr *combined_store;
347 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
348
349 combined_store->num_components = intr->src[0].ssa->num_components;
350
351 nir_intrinsic_set_base(combined_store, nir_intrinsic_base(intr));
352
353 unsigned writeout = PAN_WRITEOUT_C;
354 if (z_store)
355 writeout |= PAN_WRITEOUT_Z;
356 if (s_store)
357 writeout |= PAN_WRITEOUT_S;
358
359 nir_intrinsic_set_component(combined_store, writeout);
360
361 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
362
363 struct nir_ssa_def *src[4] = {
364 intr->src[0].ssa,
365 intr->src[1].ssa,
366 z_store ? z_store->src[0].ssa : zero,
367 s_store ? s_store->src[0].ssa : zero,
368 };
369
370 for (int i = 0; i < 4; ++i)
371 combined_store->src[i] = nir_src_for_ssa(src[i]);
372
373 nir_builder_instr_insert(&b, &combined_store->instr);
374
375 nir_instr_remove(instr);
376
377 replaced = true;
378 }
379 }
380
381 /* Insert a store to the depth RT (0xff) if needed */
382 if (!replaced) {
383 nir_builder b;
384 nir_builder_init(&b, function->impl);
385
386 nir_block *block = NULL;
387 if (z_store && s_store)
388 assert(z_store->instr.block == s_store->instr.block);
389
390 if (z_store)
391 block = z_store->instr.block;
392 else
393 block = s_store->instr.block;
394
395 b.cursor = nir_after_block_before_jump(block);
396
397 nir_intrinsic_instr *combined_store;
398 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
399
400 combined_store->num_components = 4;
401
402 nir_intrinsic_set_base(combined_store, 0);
403
404 unsigned writeout = 0;
405 if (z_store)
406 writeout |= PAN_WRITEOUT_Z;
407 if (s_store)
408 writeout |= PAN_WRITEOUT_S;
409
410 nir_intrinsic_set_component(combined_store, writeout);
411
412 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
413
414 struct nir_ssa_def *src[4] = {
415 nir_imm_vec4(&b, 0, 0, 0, 0),
416 zero,
417 z_store ? z_store->src[0].ssa : zero,
418 s_store ? s_store->src[0].ssa : zero,
419 };
420
421 for (int i = 0; i < 4; ++i)
422 combined_store->src[i] = nir_src_for_ssa(src[i]);
423
424 nir_builder_instr_insert(&b, &combined_store->instr);
425 }
426
427 if (z_store)
428 nir_instr_remove(&z_store->instr);
429
430 if (s_store)
431 nir_instr_remove(&s_store->instr);
432
433 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
434 progress = true;
435 }
436
437 return progress;
438 }
439
440 /* Flushes undefined values to zero */
441
442 static void
443 optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
444 {
445 bool progress;
446 unsigned lower_flrp =
447 (nir->options->lower_flrp16 ? 16 : 0) |
448 (nir->options->lower_flrp32 ? 32 : 0) |
449 (nir->options->lower_flrp64 ? 64 : 0);
450
451 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
452 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
453
454 nir_lower_tex_options lower_tex_options = {
455 .lower_txs_lod = true,
456 .lower_txp = ~0,
457 .lower_tex_without_implicit_lod =
458 (quirks & MIDGARD_EXPLICIT_LOD),
459
460 /* TODO: we have native gradient.. */
461 .lower_txd = true,
462 };
463
464 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
465
466 /* Must lower fdot2 after tex is lowered */
467 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
468
469 /* T720 is broken. */
470
471 if (quirks & MIDGARD_BROKEN_LOD)
472 NIR_PASS_V(nir, midgard_nir_lod_errata);
473
474 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_early);
475
476 do {
477 progress = false;
478
479 NIR_PASS(progress, nir, nir_lower_var_copies);
480 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
481
482 NIR_PASS(progress, nir, nir_copy_prop);
483 NIR_PASS(progress, nir, nir_opt_remove_phis);
484 NIR_PASS(progress, nir, nir_opt_dce);
485 NIR_PASS(progress, nir, nir_opt_dead_cf);
486 NIR_PASS(progress, nir, nir_opt_cse);
487 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
488 NIR_PASS(progress, nir, nir_opt_algebraic);
489 NIR_PASS(progress, nir, nir_opt_constant_folding);
490
491 if (lower_flrp != 0) {
492 bool lower_flrp_progress = false;
493 NIR_PASS(lower_flrp_progress,
494 nir,
495 nir_lower_flrp,
496 lower_flrp,
497 false /* always_precise */,
498 nir->options->lower_ffma);
499 if (lower_flrp_progress) {
500 NIR_PASS(progress, nir,
501 nir_opt_constant_folding);
502 progress = true;
503 }
504
505 /* Nothing should rematerialize any flrps, so we only
506 * need to do this lowering once.
507 */
508 lower_flrp = 0;
509 }
510
511 NIR_PASS(progress, nir, nir_opt_undef);
512 NIR_PASS(progress, nir, nir_undef_to_zero);
513
514 NIR_PASS(progress, nir, nir_opt_loop_unroll,
515 nir_var_shader_in |
516 nir_var_shader_out |
517 nir_var_function_temp);
518
519 NIR_PASS(progress, nir, nir_opt_vectorize);
520 } while (progress);
521
522 /* Run after opts so it can hit more */
523 if (!is_blend)
524 NIR_PASS(progress, nir, nir_fuse_io_16);
525
526 /* Must be run at the end to prevent creation of fsin/fcos ops */
527 NIR_PASS(progress, nir, midgard_nir_scale_trig);
528
529 do {
530 progress = false;
531
532 NIR_PASS(progress, nir, nir_opt_dce);
533 NIR_PASS(progress, nir, nir_opt_algebraic);
534 NIR_PASS(progress, nir, nir_opt_constant_folding);
535 NIR_PASS(progress, nir, nir_copy_prop);
536 } while (progress);
537
538 NIR_PASS(progress, nir, nir_opt_algebraic_late);
539 NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
540
541 /* We implement booleans as 32-bit 0/~0 */
542 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
543
544 /* Now that booleans are lowered, we can run out late opts */
545 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
546 NIR_PASS(progress, nir, midgard_nir_cancel_inot);
547
548 NIR_PASS(progress, nir, nir_copy_prop);
549 NIR_PASS(progress, nir, nir_opt_dce);
550
551 /* Take us out of SSA */
552 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
553 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
554
555 /* We are a vector architecture; write combine where possible */
556 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
557 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
558
559 NIR_PASS(progress, nir, nir_opt_dce);
560 }
561
562 /* Do not actually emit a load; instead, cache the constant for inlining */
563
564 static void
565 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
566 {
567 nir_ssa_def def = instr->def;
568
569 midgard_constants *consts = rzalloc(NULL, midgard_constants);
570
571 assert(instr->def.num_components * instr->def.bit_size <= sizeof(*consts) * 8);
572
573 #define RAW_CONST_COPY(bits) \
574 nir_const_value_to_array(consts->u##bits, instr->value, \
575 instr->def.num_components, u##bits)
576
577 switch (instr->def.bit_size) {
578 case 64:
579 RAW_CONST_COPY(64);
580 break;
581 case 32:
582 RAW_CONST_COPY(32);
583 break;
584 case 16:
585 RAW_CONST_COPY(16);
586 break;
587 case 8:
588 RAW_CONST_COPY(8);
589 break;
590 default:
591 unreachable("Invalid bit_size for load_const instruction\n");
592 }
593
594 /* Shifted for SSA, +1 for off-by-one */
595 _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, consts);
596 }
597
598 /* Normally constants are embedded implicitly, but for I/O and such we have to
599 * explicitly emit a move with the constant source */
600
601 static void
602 emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
603 {
604 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
605
606 if (constant_value) {
607 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
608 attach_constants(ctx, &ins, constant_value, node + 1);
609 emit_mir_instruction(ctx, ins);
610 }
611 }
612
613 static bool
614 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
615 {
616 unsigned comp = src->swizzle[0];
617
618 for (unsigned c = 1; c < nr_components; ++c) {
619 if (src->swizzle[c] != comp)
620 return true;
621 }
622
623 return false;
624 }
625
626 #define ALU_CASE(nir, _op) \
627 case nir_op_##nir: \
628 op = midgard_alu_op_##_op; \
629 assert(src_bitsize == dst_bitsize); \
630 break;
631
632 #define ALU_CASE_RTZ(nir, _op) \
633 case nir_op_##nir: \
634 op = midgard_alu_op_##_op; \
635 roundmode = MIDGARD_RTZ; \
636 break;
637
638 #define ALU_CHECK_CMP(sext) \
639 assert(src_bitsize == 16 || src_bitsize == 32); \
640 assert(dst_bitsize == 16 || dst_bitsize == 32); \
641
642 #define ALU_CASE_BCAST(nir, _op, count) \
643 case nir_op_##nir: \
644 op = midgard_alu_op_##_op; \
645 broadcast_swizzle = count; \
646 ALU_CHECK_CMP(true); \
647 break;
648
649 #define ALU_CASE_CMP(nir, _op, sext) \
650 case nir_op_##nir: \
651 op = midgard_alu_op_##_op; \
652 ALU_CHECK_CMP(sext); \
653 break;
654
655 /* Analyze the sizes of the dest and inputs to determine reg mode. */
656
657 static midgard_reg_mode
658 reg_mode_for_nir(nir_alu_instr *instr)
659 {
660 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
661 unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
662 unsigned max_bitsize = MAX2(src_bitsize, dst_bitsize);
663
664 /* We don't have fp16 LUTs, so we'll want to emit code like:
665 *
666 * vlut.fsinr hr0, hr0
667 *
668 * where both input and output are 16-bit but the operation is carried
669 * out in 32-bit
670 */
671
672 switch (instr->op) {
673 case nir_op_fsqrt:
674 case nir_op_frcp:
675 case nir_op_frsq:
676 case nir_op_fsin:
677 case nir_op_fcos:
678 case nir_op_fexp2:
679 case nir_op_flog2:
680 max_bitsize = MAX2(max_bitsize, 32);
681 break;
682
683 /* These get lowered to moves */
684 case nir_op_pack_32_4x8:
685 max_bitsize = 8;
686 break;
687 case nir_op_pack_32_2x16:
688 max_bitsize = 16;
689 break;
690 default:
691 break;
692 }
693
694
695 switch (max_bitsize) {
696 /* Use 16 pipe for 8 since we don't support vec16 yet */
697 case 8:
698 case 16:
699 return midgard_reg_mode_16;
700 case 32:
701 return midgard_reg_mode_32;
702 case 64:
703 return midgard_reg_mode_64;
704 default:
705 unreachable("Invalid bit size");
706 }
707 }
708
709 /* Compare mir_lower_invert */
710 static bool
711 nir_accepts_inot(nir_op op, unsigned src)
712 {
713 switch (op) {
714 case nir_op_ior:
715 case nir_op_iand: /* TODO: b2f16 */
716 case nir_op_ixor:
717 return true;
718 case nir_op_b32csel:
719 /* Only the condition */
720 return (src == 0);
721 default:
722 return false;
723 }
724 }
725
726 static bool
727 mir_accept_dest_mod(compiler_context *ctx, nir_dest **dest, nir_op op)
728 {
729 if (pan_has_dest_mod(dest, op)) {
730 assert((*dest)->is_ssa);
731 BITSET_SET(ctx->already_emitted, (*dest)->ssa.index);
732 return true;
733 }
734
735 return false;
736 }
737
738 static void
739 mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i, unsigned to, bool *abs, bool *neg, bool *not, enum midgard_roundmode *roundmode, bool is_int, unsigned bcast_count)
740 {
741 nir_alu_src src = instr->src[i];
742
743 if (!is_int) {
744 if (pan_has_source_mod(&src, nir_op_fneg))
745 *neg = !(*neg);
746
747 if (pan_has_source_mod(&src, nir_op_fabs))
748 *abs = true;
749 }
750
751 if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
752 *not = true;
753
754 if (roundmode) {
755 if (pan_has_source_mod(&src, nir_op_fround_even))
756 *roundmode = MIDGARD_RTE;
757
758 if (pan_has_source_mod(&src, nir_op_ftrunc))
759 *roundmode = MIDGARD_RTZ;
760
761 if (pan_has_source_mod(&src, nir_op_ffloor))
762 *roundmode = MIDGARD_RTN;
763
764 if (pan_has_source_mod(&src, nir_op_fceil))
765 *roundmode = MIDGARD_RTP;
766 }
767
768 unsigned bits = nir_src_bit_size(src.src);
769
770 ins->src[to] = nir_src_index(NULL, &src.src);
771 ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
772
773 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
774 ins->swizzle[to][c] = src.swizzle[
775 (!bcast_count || c < bcast_count) ? c :
776 (bcast_count - 1)];
777 }
778 }
779
780 /* Midgard features both fcsel and icsel, depending on whether you want int or
781 * float modifiers. NIR's csel is typeless, so we want a heuristic to guess if
782 * we should emit an int or float csel depending on what modifiers could be
783 * placed. In the absense of modifiers, this is probably arbitrary. */
784
785 static bool
786 mir_is_bcsel_float(nir_alu_instr *instr)
787 {
788 nir_op intmods[] = {
789 nir_op_i2i8, nir_op_i2i16,
790 nir_op_i2i32, nir_op_i2i64
791 };
792
793 nir_op floatmods[] = {
794 nir_op_fabs, nir_op_fneg,
795 nir_op_f2f16, nir_op_f2f32,
796 nir_op_f2f64
797 };
798
799 nir_op floatdestmods[] = {
800 nir_op_fsat, nir_op_fsat_signed, nir_op_fclamp_pos,
801 nir_op_f2f16, nir_op_f2f32
802 };
803
804 signed score = 0;
805
806 for (unsigned i = 1; i < 3; ++i) {
807 nir_alu_src s = instr->src[i];
808 for (unsigned q = 0; q < ARRAY_SIZE(intmods); ++q) {
809 if (pan_has_source_mod(&s, intmods[q]))
810 score--;
811 }
812 }
813
814 for (unsigned i = 1; i < 3; ++i) {
815 nir_alu_src s = instr->src[i];
816 for (unsigned q = 0; q < ARRAY_SIZE(floatmods); ++q) {
817 if (pan_has_source_mod(&s, floatmods[q]))
818 score++;
819 }
820 }
821
822 for (unsigned q = 0; q < ARRAY_SIZE(floatdestmods); ++q) {
823 nir_dest *dest = &instr->dest.dest;
824 if (pan_has_dest_mod(&dest, floatdestmods[q]))
825 score++;
826 }
827
828 return (score > 0);
829 }
830
831 static void
832 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
833 {
834 nir_dest *dest = &instr->dest.dest;
835
836 if (dest->is_ssa && BITSET_TEST(ctx->already_emitted, dest->ssa.index))
837 return;
838
839 /* Derivatives end up emitted on the texture pipe, not the ALUs. This
840 * is handled elsewhere */
841
842 if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
843 midgard_emit_derivatives(ctx, instr);
844 return;
845 }
846
847 bool is_ssa = dest->is_ssa;
848
849 unsigned nr_components = nir_dest_num_components(*dest);
850 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
851 unsigned op = 0;
852
853 /* Number of components valid to check for the instruction (the rest
854 * will be forced to the last), or 0 to use as-is. Relevant as
855 * ball-type instructions have a channel count in NIR but are all vec4
856 * in Midgard */
857
858 unsigned broadcast_swizzle = 0;
859
860 /* What register mode should we operate in? */
861 midgard_reg_mode reg_mode =
862 reg_mode_for_nir(instr);
863
864 /* Should we swap arguments? */
865 bool flip_src12 = false;
866
867 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
868 unsigned dst_bitsize = nir_dest_bit_size(*dest);
869
870 enum midgard_roundmode roundmode = MIDGARD_RTE;
871
872 switch (instr->op) {
873 ALU_CASE(fadd, fadd);
874 ALU_CASE(fmul, fmul);
875 ALU_CASE(fmin, fmin);
876 ALU_CASE(fmax, fmax);
877 ALU_CASE(imin, imin);
878 ALU_CASE(imax, imax);
879 ALU_CASE(umin, umin);
880 ALU_CASE(umax, umax);
881 ALU_CASE(ffloor, ffloor);
882 ALU_CASE(fround_even, froundeven);
883 ALU_CASE(ftrunc, ftrunc);
884 ALU_CASE(fceil, fceil);
885 ALU_CASE(fdot3, fdot3);
886 ALU_CASE(fdot4, fdot4);
887 ALU_CASE(iadd, iadd);
888 ALU_CASE(isub, isub);
889 ALU_CASE(imul, imul);
890
891 /* Zero shoved as second-arg */
892 ALU_CASE(iabs, iabsdiff);
893
894 ALU_CASE(mov, imov);
895
896 ALU_CASE_CMP(feq32, feq, false);
897 ALU_CASE_CMP(fne32, fne, false);
898 ALU_CASE_CMP(flt32, flt, false);
899 ALU_CASE_CMP(ieq32, ieq, true);
900 ALU_CASE_CMP(ine32, ine, true);
901 ALU_CASE_CMP(ilt32, ilt, true);
902 ALU_CASE_CMP(ult32, ult, false);
903
904 /* We don't have a native b2f32 instruction. Instead, like many
905 * GPUs, we exploit booleans as 0/~0 for false/true, and
906 * correspondingly AND
907 * by 1.0 to do the type conversion. For the moment, prime us
908 * to emit:
909 *
910 * iand [whatever], #0
911 *
912 * At the end of emit_alu (as MIR), we'll fix-up the constant
913 */
914
915 ALU_CASE_CMP(b2f32, iand, true);
916 ALU_CASE_CMP(b2f16, iand, true);
917 ALU_CASE_CMP(b2i32, iand, true);
918
919 /* Likewise, we don't have a dedicated f2b32 instruction, but
920 * we can do a "not equal to 0.0" test. */
921
922 ALU_CASE_CMP(f2b32, fne, false);
923 ALU_CASE_CMP(i2b32, ine, true);
924
925 ALU_CASE(frcp, frcp);
926 ALU_CASE(frsq, frsqrt);
927 ALU_CASE(fsqrt, fsqrt);
928 ALU_CASE(fexp2, fexp2);
929 ALU_CASE(flog2, flog2);
930
931 ALU_CASE_RTZ(f2i64, f2i_rte);
932 ALU_CASE_RTZ(f2u64, f2u_rte);
933 ALU_CASE_RTZ(i2f64, i2f_rte);
934 ALU_CASE_RTZ(u2f64, u2f_rte);
935
936 ALU_CASE_RTZ(f2i32, f2i_rte);
937 ALU_CASE_RTZ(f2u32, f2u_rte);
938 ALU_CASE_RTZ(i2f32, i2f_rte);
939 ALU_CASE_RTZ(u2f32, u2f_rte);
940
941 ALU_CASE_RTZ(f2i8, f2i_rte);
942 ALU_CASE_RTZ(f2u8, f2u_rte);
943
944 ALU_CASE_RTZ(f2i16, f2i_rte);
945 ALU_CASE_RTZ(f2u16, f2u_rte);
946 ALU_CASE_RTZ(i2f16, i2f_rte);
947 ALU_CASE_RTZ(u2f16, u2f_rte);
948
949 ALU_CASE(fsin, fsin);
950 ALU_CASE(fcos, fcos);
951
952 /* We'll get 0 in the second arg, so:
953 * ~a = ~(a | 0) = nor(a, 0) */
954 ALU_CASE(inot, inor);
955 ALU_CASE(iand, iand);
956 ALU_CASE(ior, ior);
957 ALU_CASE(ixor, ixor);
958 ALU_CASE(ishl, ishl);
959 ALU_CASE(ishr, iasr);
960 ALU_CASE(ushr, ilsr);
961
962 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
963 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
964 ALU_CASE_CMP(b32all_fequal4, fball_eq, true);
965
966 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
967 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
968 ALU_CASE_CMP(b32any_fnequal4, fbany_neq, true);
969
970 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
971 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
972 ALU_CASE_CMP(b32all_iequal4, iball_eq, true);
973
974 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
975 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
976 ALU_CASE_CMP(b32any_inequal4, ibany_neq, true);
977
978 /* Source mods will be shoved in later */
979 ALU_CASE(fabs, fmov);
980 ALU_CASE(fneg, fmov);
981 ALU_CASE(fsat, fmov);
982 ALU_CASE(fsat_signed, fmov);
983 ALU_CASE(fclamp_pos, fmov);
984
985 /* For size conversion, we use a move. Ideally though we would squash
986 * these ops together; maybe that has to happen after in NIR as part of
987 * propagation...? An earlier algebraic pass ensured we step down by
988 * only / exactly one size. If stepping down, we use a dest override to
989 * reduce the size; if stepping up, we use a larger-sized move with a
990 * half source and a sign/zero-extension modifier */
991
992 case nir_op_i2i8:
993 case nir_op_i2i16:
994 case nir_op_i2i32:
995 case nir_op_i2i64:
996 case nir_op_u2u8:
997 case nir_op_u2u16:
998 case nir_op_u2u32:
999 case nir_op_u2u64:
1000 case nir_op_f2f16:
1001 case nir_op_f2f32:
1002 case nir_op_f2f64: {
1003 if (instr->op == nir_op_f2f16 || instr->op == nir_op_f2f32 ||
1004 instr->op == nir_op_f2f64)
1005 op = midgard_alu_op_fmov;
1006 else
1007 op = midgard_alu_op_imov;
1008
1009 break;
1010 }
1011
1012 /* For greater-or-equal, we lower to less-or-equal and flip the
1013 * arguments */
1014
1015 case nir_op_fge:
1016 case nir_op_fge32:
1017 case nir_op_ige32:
1018 case nir_op_uge32: {
1019 op =
1020 instr->op == nir_op_fge ? midgard_alu_op_fle :
1021 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
1022 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
1023 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
1024 0;
1025
1026 flip_src12 = true;
1027 ALU_CHECK_CMP(false);
1028 break;
1029 }
1030
1031 case nir_op_b32csel: {
1032 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
1033 bool is_float = mir_is_bcsel_float(instr);
1034 op = is_float ?
1035 (mixed ? midgard_alu_op_fcsel_v : midgard_alu_op_fcsel) :
1036 (mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel);
1037
1038 break;
1039 }
1040
1041 case nir_op_unpack_32_2x16:
1042 case nir_op_unpack_32_4x8:
1043 case nir_op_pack_32_2x16:
1044 case nir_op_pack_32_4x8: {
1045 op = midgard_alu_op_imov;
1046 break;
1047 }
1048
1049 default:
1050 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
1051 assert(0);
1052 return;
1053 }
1054
1055 /* Promote imov to fmov if it might help inline a constant */
1056 if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src)
1057 && nir_src_bit_size(instr->src[0].src) == 32
1058 && nir_is_same_comp_swizzle(instr->src[0].swizzle,
1059 nir_src_num_components(instr->src[0].src))) {
1060 op = midgard_alu_op_fmov;
1061 }
1062
1063 /* Midgard can perform certain modifiers on output of an ALU op */
1064
1065 unsigned outmod = 0;
1066 bool is_int = midgard_is_integer_op(op);
1067
1068 if (midgard_is_integer_out_op(op)) {
1069 outmod = midgard_outmod_int_wrap;
1070 } else if (instr->op == nir_op_fsat) {
1071 outmod = midgard_outmod_sat;
1072 } else if (instr->op == nir_op_fsat_signed) {
1073 outmod = midgard_outmod_sat_signed;
1074 } else if (instr->op == nir_op_fclamp_pos) {
1075 outmod = midgard_outmod_pos;
1076 }
1077
1078 /* Fetch unit, quirks, etc information */
1079 unsigned opcode_props = alu_opcode_props[op].props;
1080 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
1081
1082 /* Look for floating point mods. We have the mods fsat, fsat_signed,
1083 * and fpos. We also have the relations (note 3 * 2 = 6 cases):
1084 *
1085 * fsat_signed(fpos(x)) = fsat(x)
1086 * fsat_signed(fsat(x)) = fsat(x)
1087 * fpos(fsat_signed(x)) = fsat(x)
1088 * fpos(fsat(x)) = fsat(x)
1089 * fsat(fsat_signed(x)) = fsat(x)
1090 * fsat(fpos(x)) = fsat(x)
1091 *
1092 * So by cases any composition of output modifiers is equivalent to
1093 * fsat alone.
1094 */
1095
1096 if (!is_int && !(opcode_props & OP_TYPE_CONVERT)) {
1097 bool fpos = mir_accept_dest_mod(ctx, &dest, nir_op_fclamp_pos);
1098 bool fsat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat);
1099 bool ssat = mir_accept_dest_mod(ctx, &dest, nir_op_fsat_signed);
1100 bool prior = (outmod != midgard_outmod_none);
1101 int count = (int) prior + (int) fpos + (int) ssat + (int) fsat;
1102
1103 outmod = ((count > 1) || fsat) ? midgard_outmod_sat :
1104 fpos ? midgard_outmod_pos :
1105 ssat ? midgard_outmod_sat_signed :
1106 outmod;
1107 }
1108
1109 midgard_instruction ins = {
1110 .type = TAG_ALU_4,
1111 .dest = nir_dest_index(dest),
1112 .dest_type = nir_op_infos[instr->op].output_type
1113 | nir_dest_bit_size(*dest),
1114 .roundmode = roundmode,
1115 };
1116
1117 enum midgard_roundmode *roundptr = (opcode_props & MIDGARD_ROUNDS) ?
1118 &ins.roundmode : NULL;
1119
1120 for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
1121 ins.src[i] = ~0;
1122
1123 if (quirk_flipped_r24) {
1124 ins.src[0] = ~0;
1125 mir_copy_src(&ins, instr, 0, 1, &ins.src_abs[1], &ins.src_neg[1], &ins.src_invert[1], roundptr, is_int, broadcast_swizzle);
1126 } else {
1127 for (unsigned i = 0; i < nr_inputs; ++i) {
1128 unsigned to = i;
1129
1130 if (instr->op == nir_op_b32csel) {
1131 /* The condition is the first argument; move
1132 * the other arguments up one to be a binary
1133 * instruction for Midgard with the condition
1134 * last */
1135
1136 if (i == 0)
1137 to = 2;
1138 else if (flip_src12)
1139 to = 2 - i;
1140 else
1141 to = i - 1;
1142 } else if (flip_src12) {
1143 to = 1 - to;
1144 }
1145
1146 mir_copy_src(&ins, instr, i, to, &ins.src_abs[to], &ins.src_neg[to], &ins.src_invert[to], roundptr, is_int, broadcast_swizzle);
1147
1148 /* (!c) ? a : b = c ? b : a */
1149 if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
1150 ins.src_invert[2] = false;
1151 flip_src12 ^= true;
1152 }
1153 }
1154 }
1155
1156 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
1157 /* Lowered to move */
1158 if (instr->op == nir_op_fneg)
1159 ins.src_neg[1] ^= true;
1160
1161 if (instr->op == nir_op_fabs)
1162 ins.src_abs[1] = true;
1163 }
1164
1165 ins.mask = mask_of(nr_components);
1166
1167 midgard_vector_alu alu = {
1168 .op = op,
1169 .reg_mode = reg_mode,
1170 .outmod = outmod,
1171 };
1172
1173 /* Apply writemask if non-SSA, keeping in mind that we can't write to
1174 * components that don't exist. Note modifier => SSA => !reg => no
1175 * writemask, so we don't have to worry about writemasks here.*/
1176
1177 if (!is_ssa)
1178 ins.mask &= instr->dest.write_mask;
1179
1180 ins.alu = alu;
1181
1182 /* Late fixup for emulated instructions */
1183
1184 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
1185 /* Presently, our second argument is an inline #0 constant.
1186 * Switch over to an embedded 1.0 constant (that can't fit
1187 * inline, since we're 32-bit, not 16-bit like the inline
1188 * constants) */
1189
1190 ins.has_inline_constant = false;
1191 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1192 ins.src_types[1] = nir_type_float32;
1193 ins.has_constants = true;
1194
1195 if (instr->op == nir_op_b2f32)
1196 ins.constants.f32[0] = 1.0f;
1197 else
1198 ins.constants.i32[0] = 1;
1199
1200 for (unsigned c = 0; c < 16; ++c)
1201 ins.swizzle[1][c] = 0;
1202 } else if (instr->op == nir_op_b2f16) {
1203 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1204 ins.src_types[1] = nir_type_float16;
1205 ins.has_constants = true;
1206 ins.constants.i16[0] = _mesa_float_to_half(1.0);
1207
1208 for (unsigned c = 0; c < 16; ++c)
1209 ins.swizzle[1][c] = 0;
1210 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1211 /* Lots of instructions need a 0 plonked in */
1212 ins.has_inline_constant = false;
1213 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1214 ins.src_types[1] = nir_type_uint32;
1215 ins.has_constants = true;
1216 ins.constants.u32[0] = 0;
1217
1218 for (unsigned c = 0; c < 16; ++c)
1219 ins.swizzle[1][c] = 0;
1220 } else if (instr->op == nir_op_pack_32_2x16) {
1221 ins.dest_type = nir_type_uint16;
1222 ins.mask = mask_of(nr_components * 2);
1223 ins.is_pack = true;
1224 } else if (instr->op == nir_op_pack_32_4x8) {
1225 ins.dest_type = nir_type_uint8;
1226 ins.mask = mask_of(nr_components * 4);
1227 ins.is_pack = true;
1228 } else if (instr->op == nir_op_unpack_32_2x16) {
1229 ins.dest_type = nir_type_uint32;
1230 ins.mask = mask_of(nr_components >> 1);
1231 ins.is_pack = true;
1232 } else if (instr->op == nir_op_unpack_32_4x8) {
1233 ins.dest_type = nir_type_uint32;
1234 ins.mask = mask_of(nr_components >> 2);
1235 ins.is_pack = true;
1236 }
1237
1238 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1239 /* To avoid duplicating the lookup tables (probably), true LUT
1240 * instructions can only operate as if they were scalars. Lower
1241 * them here by changing the component. */
1242
1243 unsigned orig_mask = ins.mask;
1244
1245 unsigned swizzle_back[MIR_VEC_COMPONENTS];
1246 memcpy(&swizzle_back, ins.swizzle[0], sizeof(swizzle_back));
1247
1248 midgard_instruction ins_split[MIR_VEC_COMPONENTS];
1249 unsigned ins_count = 0;
1250
1251 for (int i = 0; i < nr_components; ++i) {
1252 /* Mask the associated component, dropping the
1253 * instruction if needed */
1254
1255 ins.mask = 1 << i;
1256 ins.mask &= orig_mask;
1257
1258 for (unsigned j = 0; j < ins_count; ++j) {
1259 if (swizzle_back[i] == ins_split[j].swizzle[0][0]) {
1260 ins_split[j].mask |= ins.mask;
1261 ins.mask = 0;
1262 break;
1263 }
1264 }
1265
1266 if (!ins.mask)
1267 continue;
1268
1269 for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
1270 ins.swizzle[0][j] = swizzle_back[i]; /* Pull from the correct component */
1271
1272 ins_split[ins_count] = ins;
1273
1274 ++ins_count;
1275 }
1276
1277 for (unsigned i = 0; i < ins_count; ++i) {
1278 emit_mir_instruction(ctx, ins_split[i]);
1279 }
1280 } else {
1281 emit_mir_instruction(ctx, ins);
1282 }
1283 }
1284
1285 #undef ALU_CASE
1286
1287 static void
1288 mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
1289 {
1290 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1291 unsigned nir_mask = 0;
1292 unsigned dsize = 0;
1293
1294 if (is_read) {
1295 nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1296 dsize = nir_dest_bit_size(intr->dest);
1297 } else {
1298 nir_mask = nir_intrinsic_write_mask(intr);
1299 dsize = 32;
1300 }
1301
1302 /* Once we have the NIR mask, we need to normalize to work in 32-bit space */
1303 unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
1304 mir_set_bytemask(ins, bytemask);
1305 ins->dest_type = nir_type_uint | dsize;
1306 }
1307
1308 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1309 * optimized) versions of UBO #0 */
1310
1311 static midgard_instruction *
1312 emit_ubo_read(
1313 compiler_context *ctx,
1314 nir_instr *instr,
1315 unsigned dest,
1316 unsigned offset,
1317 nir_src *indirect_offset,
1318 unsigned indirect_shift,
1319 unsigned index)
1320 {
1321 /* TODO: half-floats */
1322
1323 midgard_instruction ins = m_ld_ubo_int4(dest, 0);
1324 ins.constants.u32[0] = offset;
1325
1326 if (instr->type == nir_instr_type_intrinsic)
1327 mir_set_intr_mask(instr, &ins, true);
1328
1329 if (indirect_offset) {
1330 ins.src[2] = nir_src_index(ctx, indirect_offset);
1331 ins.src_types[2] = nir_type_uint32;
1332 ins.load_store.arg_2 = (indirect_shift << 5);
1333
1334 /* X component for the whole swizzle to prevent register
1335 * pressure from ballooning from the extra components */
1336 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[2]); ++i)
1337 ins.swizzle[2][i] = 0;
1338 } else {
1339 ins.load_store.arg_2 = 0x1E;
1340 }
1341
1342 ins.load_store.arg_1 = index;
1343
1344 return emit_mir_instruction(ctx, ins);
1345 }
1346
1347 /* Globals are like UBOs if you squint. And shared memory is like globals if
1348 * you squint even harder */
1349
1350 static void
1351 emit_global(
1352 compiler_context *ctx,
1353 nir_instr *instr,
1354 bool is_read,
1355 unsigned srcdest,
1356 nir_src *offset,
1357 bool is_shared)
1358 {
1359 /* TODO: types */
1360
1361 midgard_instruction ins;
1362
1363 if (is_read)
1364 ins = m_ld_int4(srcdest, 0);
1365 else
1366 ins = m_st_int4(srcdest, 0);
1367
1368 mir_set_offset(ctx, &ins, offset, is_shared);
1369 mir_set_intr_mask(instr, &ins, is_read);
1370
1371 emit_mir_instruction(ctx, ins);
1372 }
1373
1374 static void
1375 emit_varying_read(
1376 compiler_context *ctx,
1377 unsigned dest, unsigned offset,
1378 unsigned nr_comp, unsigned component,
1379 nir_src *indirect_offset, nir_alu_type type, bool flat)
1380 {
1381 /* XXX: Half-floats? */
1382 /* TODO: swizzle, mask */
1383
1384 midgard_instruction ins = m_ld_vary_32(dest, offset);
1385 ins.mask = mask_of(nr_comp);
1386 ins.dest_type = type;
1387
1388 if (type == nir_type_float16) {
1389 /* Ensure we are aligned so we can pack it later */
1390 ins.mask = mask_of(ALIGN_POT(nr_comp, 2));
1391 }
1392
1393 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1394 ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
1395
1396 midgard_varying_parameter p = {
1397 .is_varying = 1,
1398 .interpolation = midgard_interp_default,
1399 .flat = flat,
1400 };
1401
1402 unsigned u;
1403 memcpy(&u, &p, sizeof(p));
1404 ins.load_store.varying_parameters = u;
1405
1406 if (indirect_offset) {
1407 ins.src[2] = nir_src_index(ctx, indirect_offset);
1408 ins.src_types[2] = nir_type_uint32;
1409 } else
1410 ins.load_store.arg_2 = 0x1E;
1411
1412 ins.load_store.arg_1 = 0x9E;
1413
1414 /* Use the type appropriate load */
1415 switch (type) {
1416 case nir_type_uint32:
1417 case nir_type_bool32:
1418 ins.load_store.op = midgard_op_ld_vary_32u;
1419 break;
1420 case nir_type_int32:
1421 ins.load_store.op = midgard_op_ld_vary_32i;
1422 break;
1423 case nir_type_float32:
1424 ins.load_store.op = midgard_op_ld_vary_32;
1425 break;
1426 case nir_type_float16:
1427 ins.load_store.op = midgard_op_ld_vary_16;
1428 break;
1429 default:
1430 unreachable("Attempted to load unknown type");
1431 break;
1432 }
1433
1434 emit_mir_instruction(ctx, ins);
1435 }
1436
1437 static void
1438 emit_attr_read(
1439 compiler_context *ctx,
1440 unsigned dest, unsigned offset,
1441 unsigned nr_comp, nir_alu_type t)
1442 {
1443 midgard_instruction ins = m_ld_attr_32(dest, offset);
1444 ins.load_store.arg_1 = 0x1E;
1445 ins.load_store.arg_2 = 0x1E;
1446 ins.mask = mask_of(nr_comp);
1447
1448 /* Use the type appropriate load */
1449 switch (t) {
1450 case nir_type_uint:
1451 case nir_type_bool:
1452 ins.load_store.op = midgard_op_ld_attr_32u;
1453 break;
1454 case nir_type_int:
1455 ins.load_store.op = midgard_op_ld_attr_32i;
1456 break;
1457 case nir_type_float:
1458 ins.load_store.op = midgard_op_ld_attr_32;
1459 break;
1460 default:
1461 unreachable("Attempted to load unknown type");
1462 break;
1463 }
1464
1465 emit_mir_instruction(ctx, ins);
1466 }
1467
1468 static void
1469 emit_sysval_read(compiler_context *ctx, nir_instr *instr,
1470 unsigned nr_components, unsigned offset)
1471 {
1472 nir_dest nir_dest;
1473
1474 /* Figure out which uniform this is */
1475 int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
1476 void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
1477
1478 unsigned dest = nir_dest_index(&nir_dest);
1479
1480 /* Sysvals are prefix uniforms */
1481 unsigned uniform = ((uintptr_t) val) - 1;
1482
1483 /* Emit the read itself -- this is never indirect */
1484 midgard_instruction *ins =
1485 emit_ubo_read(ctx, instr, dest, (uniform * 16) + offset, NULL, 0, 0);
1486
1487 ins->mask = mask_of(nr_components);
1488 }
1489
1490 static unsigned
1491 compute_builtin_arg(nir_op op)
1492 {
1493 switch (op) {
1494 case nir_intrinsic_load_work_group_id:
1495 return 0x14;
1496 case nir_intrinsic_load_local_invocation_id:
1497 return 0x10;
1498 default:
1499 unreachable("Invalid compute paramater loaded");
1500 }
1501 }
1502
1503 static void
1504 emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, unsigned src_s, enum midgard_rt_id rt)
1505 {
1506 assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1507
1508 midgard_instruction *br = ctx->writeout_branch[rt];
1509
1510 assert(!br);
1511
1512 emit_explicit_constant(ctx, src, src);
1513
1514 struct midgard_instruction ins =
1515 v_branch(false, false);
1516
1517 bool depth_only = (rt == MIDGARD_ZS_RT);
1518
1519 ins.writeout = depth_only ? 0 : PAN_WRITEOUT_C;
1520
1521 /* Add dependencies */
1522 ins.src[0] = src;
1523 ins.src_types[0] = nir_type_uint32;
1524 ins.constants.u32[0] = depth_only ? 0xFF : (rt - MIDGARD_COLOR_RT0) * 0x100;
1525 for (int i = 0; i < 4; ++i)
1526 ins.swizzle[0][i] = i;
1527
1528 if (~src_z) {
1529 emit_explicit_constant(ctx, src_z, src_z);
1530 ins.src[2] = src_z;
1531 ins.src_types[2] = nir_type_uint32;
1532 ins.writeout |= PAN_WRITEOUT_Z;
1533 }
1534 if (~src_s) {
1535 emit_explicit_constant(ctx, src_s, src_s);
1536 ins.src[3] = src_s;
1537 ins.src_types[3] = nir_type_uint32;
1538 ins.writeout |= PAN_WRITEOUT_S;
1539 }
1540
1541 /* Emit the branch */
1542 br = emit_mir_instruction(ctx, ins);
1543 schedule_barrier(ctx);
1544 ctx->writeout_branch[rt] = br;
1545
1546 /* Push our current location = current block count - 1 = where we'll
1547 * jump to. Maybe a bit too clever for my own good */
1548
1549 br->branch.target_block = ctx->block_count - 1;
1550 }
1551
1552 static void
1553 emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1554 {
1555 unsigned reg = nir_dest_index(&instr->dest);
1556 midgard_instruction ins = m_ld_compute_id(reg, 0);
1557 ins.mask = mask_of(3);
1558 ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
1559 ins.load_store.arg_1 = compute_builtin_arg(instr->intrinsic);
1560 emit_mir_instruction(ctx, ins);
1561 }
1562
1563 static unsigned
1564 vertex_builtin_arg(nir_op op)
1565 {
1566 switch (op) {
1567 case nir_intrinsic_load_vertex_id:
1568 return PAN_VERTEX_ID;
1569 case nir_intrinsic_load_instance_id:
1570 return PAN_INSTANCE_ID;
1571 default:
1572 unreachable("Invalid vertex builtin");
1573 }
1574 }
1575
1576 static void
1577 emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1578 {
1579 unsigned reg = nir_dest_index(&instr->dest);
1580 emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
1581 }
1582
1583 static void
1584 emit_control_barrier(compiler_context *ctx)
1585 {
1586 midgard_instruction ins = {
1587 .type = TAG_TEXTURE_4,
1588 .dest = ~0,
1589 .src = { ~0, ~0, ~0, ~0 },
1590 .texture = {
1591 .op = TEXTURE_OP_BARRIER,
1592
1593 /* TODO: optimize */
1594 .out_of_order = MIDGARD_BARRIER_BUFFER |
1595 MIDGARD_BARRIER_SHARED ,
1596 }
1597 };
1598
1599 emit_mir_instruction(ctx, ins);
1600 }
1601
1602 static unsigned
1603 mir_get_branch_cond(nir_src *src, bool *invert)
1604 {
1605 /* Wrap it. No swizzle since it's a scalar */
1606
1607 nir_alu_src alu = {
1608 .src = *src
1609 };
1610
1611 *invert = pan_has_source_mod(&alu, nir_op_inot);
1612 return nir_src_index(NULL, &alu.src);
1613 }
1614
1615 static void
1616 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1617 {
1618 unsigned offset = 0, reg;
1619
1620 switch (instr->intrinsic) {
1621 case nir_intrinsic_discard_if:
1622 case nir_intrinsic_discard: {
1623 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1624 struct midgard_instruction discard = v_branch(conditional, false);
1625 discard.branch.target_type = TARGET_DISCARD;
1626
1627 if (conditional) {
1628 discard.src[0] = mir_get_branch_cond(&instr->src[0],
1629 &discard.branch.invert_conditional);
1630 discard.src_types[0] = nir_type_uint32;
1631 }
1632
1633 emit_mir_instruction(ctx, discard);
1634 schedule_barrier(ctx);
1635
1636 break;
1637 }
1638
1639 case nir_intrinsic_load_uniform:
1640 case nir_intrinsic_load_ubo:
1641 case nir_intrinsic_load_global:
1642 case nir_intrinsic_load_shared:
1643 case nir_intrinsic_load_input:
1644 case nir_intrinsic_load_interpolated_input: {
1645 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1646 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1647 bool is_global = instr->intrinsic == nir_intrinsic_load_global;
1648 bool is_shared = instr->intrinsic == nir_intrinsic_load_shared;
1649 bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1650 bool is_interp = instr->intrinsic == nir_intrinsic_load_interpolated_input;
1651
1652 /* Get the base type of the intrinsic */
1653 /* TODO: Infer type? Does it matter? */
1654 nir_alu_type t =
1655 (is_ubo || is_global || is_shared) ? nir_type_uint :
1656 (is_interp) ? nir_type_float :
1657 nir_intrinsic_type(instr);
1658
1659 t = nir_alu_type_get_base_type(t);
1660
1661 if (!(is_ubo || is_global)) {
1662 offset = nir_intrinsic_base(instr);
1663 }
1664
1665 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1666
1667 nir_src *src_offset = nir_get_io_offset_src(instr);
1668
1669 bool direct = nir_src_is_const(*src_offset);
1670 nir_src *indirect_offset = direct ? NULL : src_offset;
1671
1672 if (direct)
1673 offset += nir_src_as_uint(*src_offset);
1674
1675 /* We may need to apply a fractional offset */
1676 int component = (is_flat || is_interp) ?
1677 nir_intrinsic_component(instr) : 0;
1678 reg = nir_dest_index(&instr->dest);
1679
1680 if (is_uniform && !ctx->is_blend) {
1681 emit_ubo_read(ctx, &instr->instr, reg, (ctx->sysvals.sysval_count + offset) * 16, indirect_offset, 4, 0);
1682 } else if (is_ubo) {
1683 nir_src index = instr->src[0];
1684
1685 /* TODO: Is indirect block number possible? */
1686 assert(nir_src_is_const(index));
1687
1688 uint32_t uindex = nir_src_as_uint(index) + 1;
1689 emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex);
1690 } else if (is_global || is_shared) {
1691 emit_global(ctx, &instr->instr, true, reg, src_offset, is_shared);
1692 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1693 emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
1694 } else if (ctx->is_blend) {
1695 /* ctx->blend_input will be precoloured to r0, where
1696 * the input is preloaded */
1697
1698 if (ctx->blend_input == ~0)
1699 ctx->blend_input = reg;
1700 else
1701 emit_mir_instruction(ctx, v_mov(ctx->blend_input, reg));
1702 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1703 emit_attr_read(ctx, reg, offset, nr_comp, t);
1704 } else {
1705 DBG("Unknown load\n");
1706 assert(0);
1707 }
1708
1709 break;
1710 }
1711
1712 /* Artefact of load_interpolated_input. TODO: other barycentric modes */
1713 case nir_intrinsic_load_barycentric_pixel:
1714 case nir_intrinsic_load_barycentric_centroid:
1715 break;
1716
1717 /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1718
1719 case nir_intrinsic_load_raw_output_pan: {
1720 reg = nir_dest_index(&instr->dest);
1721 assert(ctx->is_blend);
1722
1723 /* T720 and below use different blend opcodes with slightly
1724 * different semantics than T760 and up */
1725
1726 midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
1727
1728 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1729 ld.load_store.op = midgard_op_ld_color_buffer_32u_old;
1730 ld.load_store.address = 16;
1731 ld.load_store.arg_2 = 0x1E;
1732 }
1733
1734 emit_mir_instruction(ctx, ld);
1735 break;
1736 }
1737
1738 case nir_intrinsic_load_output: {
1739 reg = nir_dest_index(&instr->dest);
1740 assert(ctx->is_blend);
1741
1742 midgard_instruction ld = m_ld_color_buffer_as_fp16(reg, 0);
1743
1744 for (unsigned c = 4; c < 16; ++c)
1745 ld.swizzle[0][c] = 0;
1746
1747 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1748 ld.load_store.op = midgard_op_ld_color_buffer_as_fp16_old;
1749 ld.load_store.address = 1;
1750 ld.load_store.arg_2 = 0x1E;
1751 }
1752
1753 emit_mir_instruction(ctx, ld);
1754 break;
1755 }
1756
1757 case nir_intrinsic_load_blend_const_color_rgba: {
1758 assert(ctx->is_blend);
1759 reg = nir_dest_index(&instr->dest);
1760
1761 /* Blend constants are embedded directly in the shader and
1762 * patched in, so we use some magic routing */
1763
1764 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
1765 ins.has_constants = true;
1766 ins.has_blend_constant = true;
1767 emit_mir_instruction(ctx, ins);
1768 break;
1769 }
1770
1771 case nir_intrinsic_store_output:
1772 case nir_intrinsic_store_combined_output_pan:
1773 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1774
1775 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1776
1777 reg = nir_src_index(ctx, &instr->src[0]);
1778
1779 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1780 bool combined = instr->intrinsic ==
1781 nir_intrinsic_store_combined_output_pan;
1782
1783 const nir_variable *var;
1784 enum midgard_rt_id rt;
1785
1786 var = search_var(&ctx->nir->outputs,
1787 nir_intrinsic_base(instr));
1788 assert(var);
1789 if (var->data.location == FRAG_RESULT_COLOR)
1790 rt = MIDGARD_COLOR_RT0;
1791 else if (var->data.location >= FRAG_RESULT_DATA0)
1792 rt = MIDGARD_COLOR_RT0 + var->data.location -
1793 FRAG_RESULT_DATA0;
1794 else if (combined)
1795 rt = MIDGARD_ZS_RT;
1796 else
1797 assert(0);
1798
1799 unsigned reg_z = ~0, reg_s = ~0;
1800 if (combined) {
1801 unsigned writeout = nir_intrinsic_component(instr);
1802 if (writeout & PAN_WRITEOUT_Z)
1803 reg_z = nir_src_index(ctx, &instr->src[2]);
1804 if (writeout & PAN_WRITEOUT_S)
1805 reg_s = nir_src_index(ctx, &instr->src[3]);
1806 }
1807
1808 emit_fragment_store(ctx, reg, reg_z, reg_s, rt);
1809 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1810 assert(instr->intrinsic == nir_intrinsic_store_output);
1811
1812 /* We should have been vectorized, though we don't
1813 * currently check that st_vary is emitted only once
1814 * per slot (this is relevant, since there's not a mask
1815 * parameter available on the store [set to 0 by the
1816 * blob]). We do respect the component by adjusting the
1817 * swizzle. If this is a constant source, we'll need to
1818 * emit that explicitly. */
1819
1820 emit_explicit_constant(ctx, reg, reg);
1821
1822 unsigned dst_component = nir_intrinsic_component(instr);
1823 unsigned nr_comp = nir_src_num_components(instr->src[0]);
1824
1825 midgard_instruction st = m_st_vary_32(reg, offset);
1826 st.load_store.arg_1 = 0x9E;
1827 st.load_store.arg_2 = 0x1E;
1828
1829 switch (nir_alu_type_get_base_type(nir_intrinsic_type(instr))) {
1830 case nir_type_uint:
1831 case nir_type_bool:
1832 st.load_store.op = midgard_op_st_vary_32u;
1833 break;
1834 case nir_type_int:
1835 st.load_store.op = midgard_op_st_vary_32i;
1836 break;
1837 case nir_type_float:
1838 st.load_store.op = midgard_op_st_vary_32;
1839 break;
1840 default:
1841 unreachable("Attempted to store unknown type");
1842 break;
1843 }
1844
1845 /* nir_intrinsic_component(store_intr) encodes the
1846 * destination component start. Source component offset
1847 * adjustment is taken care of in
1848 * install_registers_instr(), when offset_swizzle() is
1849 * called.
1850 */
1851 unsigned src_component = COMPONENT_X;
1852
1853 assert(nr_comp > 0);
1854 for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle); ++i) {
1855 st.swizzle[0][i] = src_component;
1856 if (i >= dst_component && i < dst_component + nr_comp - 1)
1857 src_component++;
1858 }
1859
1860 emit_mir_instruction(ctx, st);
1861 } else {
1862 DBG("Unknown store\n");
1863 assert(0);
1864 }
1865
1866 break;
1867
1868 /* Special case of store_output for lowered blend shaders */
1869 case nir_intrinsic_store_raw_output_pan:
1870 assert (ctx->stage == MESA_SHADER_FRAGMENT);
1871 reg = nir_src_index(ctx, &instr->src[0]);
1872 emit_fragment_store(ctx, reg, ~0, ~0, ctx->blend_rt);
1873 break;
1874
1875 case nir_intrinsic_store_global:
1876 case nir_intrinsic_store_shared:
1877 reg = nir_src_index(ctx, &instr->src[0]);
1878 emit_explicit_constant(ctx, reg, reg);
1879
1880 emit_global(ctx, &instr->instr, false, reg, &instr->src[1], instr->intrinsic == nir_intrinsic_store_shared);
1881 break;
1882
1883 case nir_intrinsic_load_ssbo_address:
1884 emit_sysval_read(ctx, &instr->instr, 1, 0);
1885 break;
1886
1887 case nir_intrinsic_get_buffer_size:
1888 emit_sysval_read(ctx, &instr->instr, 1, 8);
1889 break;
1890
1891 case nir_intrinsic_load_viewport_scale:
1892 case nir_intrinsic_load_viewport_offset:
1893 case nir_intrinsic_load_num_work_groups:
1894 case nir_intrinsic_load_sampler_lod_parameters_pan:
1895 emit_sysval_read(ctx, &instr->instr, 3, 0);
1896 break;
1897
1898 case nir_intrinsic_load_work_group_id:
1899 case nir_intrinsic_load_local_invocation_id:
1900 emit_compute_builtin(ctx, instr);
1901 break;
1902
1903 case nir_intrinsic_load_vertex_id:
1904 case nir_intrinsic_load_instance_id:
1905 emit_vertex_builtin(ctx, instr);
1906 break;
1907
1908 case nir_intrinsic_memory_barrier_buffer:
1909 case nir_intrinsic_memory_barrier_shared:
1910 break;
1911
1912 case nir_intrinsic_control_barrier:
1913 schedule_barrier(ctx);
1914 emit_control_barrier(ctx);
1915 schedule_barrier(ctx);
1916 break;
1917
1918 default:
1919 fprintf(stderr, "Unhandled intrinsic %s\n", nir_intrinsic_infos[instr->intrinsic].name);
1920 assert(0);
1921 break;
1922 }
1923 }
1924
1925 static unsigned
1926 midgard_tex_format(enum glsl_sampler_dim dim)
1927 {
1928 switch (dim) {
1929 case GLSL_SAMPLER_DIM_1D:
1930 case GLSL_SAMPLER_DIM_BUF:
1931 return MALI_TEX_1D;
1932
1933 case GLSL_SAMPLER_DIM_2D:
1934 case GLSL_SAMPLER_DIM_MS:
1935 case GLSL_SAMPLER_DIM_EXTERNAL:
1936 case GLSL_SAMPLER_DIM_RECT:
1937 return MALI_TEX_2D;
1938
1939 case GLSL_SAMPLER_DIM_3D:
1940 return MALI_TEX_3D;
1941
1942 case GLSL_SAMPLER_DIM_CUBE:
1943 return MALI_TEX_CUBE;
1944
1945 default:
1946 DBG("Unknown sampler dim type\n");
1947 assert(0);
1948 return 0;
1949 }
1950 }
1951
1952 /* Tries to attach an explicit LOD or bias as a constant. Returns whether this
1953 * was successful */
1954
1955 static bool
1956 pan_attach_constant_bias(
1957 compiler_context *ctx,
1958 nir_src lod,
1959 midgard_texture_word *word)
1960 {
1961 /* To attach as constant, it has to *be* constant */
1962
1963 if (!nir_src_is_const(lod))
1964 return false;
1965
1966 float f = nir_src_as_float(lod);
1967
1968 /* Break into fixed-point */
1969 signed lod_int = f;
1970 float lod_frac = f - lod_int;
1971
1972 /* Carry over negative fractions */
1973 if (lod_frac < 0.0) {
1974 lod_int--;
1975 lod_frac += 1.0;
1976 }
1977
1978 /* Encode */
1979 word->bias = float_to_ubyte(lod_frac);
1980 word->bias_int = lod_int;
1981
1982 return true;
1983 }
1984
1985 static void
1986 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
1987 unsigned midgard_texop)
1988 {
1989 /* TODO */
1990 //assert (!instr->sampler);
1991
1992 int texture_index = instr->texture_index;
1993 int sampler_index = texture_index;
1994
1995 nir_alu_type dest_base = nir_alu_type_get_base_type(instr->dest_type);
1996 nir_alu_type dest_type = dest_base | nir_dest_bit_size(instr->dest);
1997
1998 midgard_instruction ins = {
1999 .type = TAG_TEXTURE_4,
2000 .mask = 0xF,
2001 .dest = nir_dest_index(&instr->dest),
2002 .src = { ~0, ~0, ~0, ~0 },
2003 .dest_type = dest_type,
2004 .swizzle = SWIZZLE_IDENTITY_4,
2005 .texture = {
2006 .op = midgard_texop,
2007 .format = midgard_tex_format(instr->sampler_dim),
2008 .texture_handle = texture_index,
2009 .sampler_handle = sampler_index,
2010 .shadow = instr->is_shadow,
2011 }
2012 };
2013
2014 if (instr->is_shadow && !instr->is_new_style_shadow)
2015 for (int i = 0; i < 4; ++i)
2016 ins.swizzle[0][i] = COMPONENT_X;
2017
2018 /* We may need a temporary for the coordinate */
2019
2020 bool needs_temp_coord =
2021 (midgard_texop == TEXTURE_OP_TEXEL_FETCH) ||
2022 (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) ||
2023 (instr->is_shadow);
2024
2025 unsigned coords = needs_temp_coord ? make_compiler_temp_reg(ctx) : 0;
2026
2027 for (unsigned i = 0; i < instr->num_srcs; ++i) {
2028 int index = nir_src_index(ctx, &instr->src[i].src);
2029 unsigned nr_components = nir_src_num_components(instr->src[i].src);
2030 unsigned sz = nir_src_bit_size(instr->src[i].src);
2031 nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
2032
2033 switch (instr->src[i].src_type) {
2034 case nir_tex_src_coord: {
2035 emit_explicit_constant(ctx, index, index);
2036
2037 unsigned coord_mask = mask_of(instr->coord_components);
2038
2039 bool flip_zw = (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) && (coord_mask & (1 << COMPONENT_Z));
2040
2041 if (flip_zw)
2042 coord_mask ^= ((1 << COMPONENT_Z) | (1 << COMPONENT_W));
2043
2044 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
2045 /* texelFetch is undefined on samplerCube */
2046 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
2047
2048 /* For cubemaps, we use a special ld/st op to
2049 * select the face and copy the xy into the
2050 * texture register */
2051
2052 midgard_instruction ld = m_ld_cubemap_coords(coords, 0);
2053 ld.src[1] = index;
2054 ld.src_types[1] = T;
2055 ld.mask = 0x3; /* xy */
2056 ld.load_store.arg_1 = 0x20;
2057 ld.swizzle[1][3] = COMPONENT_X;
2058 emit_mir_instruction(ctx, ld);
2059
2060 /* xyzw -> xyxx */
2061 ins.swizzle[1][2] = instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
2062 ins.swizzle[1][3] = COMPONENT_X;
2063 } else if (needs_temp_coord) {
2064 /* mov coord_temp, coords */
2065 midgard_instruction mov = v_mov(index, coords);
2066 mov.mask = coord_mask;
2067
2068 if (flip_zw)
2069 mov.swizzle[1][COMPONENT_W] = COMPONENT_Z;
2070
2071 emit_mir_instruction(ctx, mov);
2072 } else {
2073 coords = index;
2074 }
2075
2076 ins.src[1] = coords;
2077 ins.src_types[1] = T;
2078
2079 /* Texelfetch coordinates uses all four elements
2080 * (xyz/index) regardless of texture dimensionality,
2081 * which means it's necessary to zero the unused
2082 * components to keep everything happy */
2083
2084 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
2085 /* mov index.zw, #0, or generalized */
2086 midgard_instruction mov =
2087 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), coords);
2088 mov.has_constants = true;
2089 mov.mask = coord_mask ^ 0xF;
2090 emit_mir_instruction(ctx, mov);
2091 }
2092
2093 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
2094 /* Array component in w but NIR wants it in z,
2095 * but if we have a temp coord we already fixed
2096 * that up */
2097
2098 if (nr_components == 3) {
2099 ins.swizzle[1][2] = COMPONENT_Z;
2100 ins.swizzle[1][3] = needs_temp_coord ? COMPONENT_W : COMPONENT_Z;
2101 } else if (nr_components == 2) {
2102 ins.swizzle[1][2] =
2103 instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
2104 ins.swizzle[1][3] = COMPONENT_X;
2105 } else
2106 unreachable("Invalid texture 2D components");
2107 }
2108
2109 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
2110 /* We zeroed */
2111 ins.swizzle[1][2] = COMPONENT_Z;
2112 ins.swizzle[1][3] = COMPONENT_W;
2113 }
2114
2115 break;
2116 }
2117
2118 case nir_tex_src_bias:
2119 case nir_tex_src_lod: {
2120 /* Try as a constant if we can */
2121
2122 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
2123 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2124 break;
2125
2126 ins.texture.lod_register = true;
2127 ins.src[2] = index;
2128 ins.src_types[2] = T;
2129
2130 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2131 ins.swizzle[2][c] = COMPONENT_X;
2132
2133 emit_explicit_constant(ctx, index, index);
2134
2135 break;
2136 };
2137
2138 case nir_tex_src_offset: {
2139 ins.texture.offset_register = true;
2140 ins.src[3] = index;
2141 ins.src_types[3] = T;
2142
2143 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2144 ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
2145
2146 emit_explicit_constant(ctx, index, index);
2147 break;
2148 };
2149
2150 case nir_tex_src_comparator:
2151 case nir_tex_src_ms_index: {
2152 unsigned comp = COMPONENT_Z;
2153
2154 /* mov coord_temp.foo, coords */
2155 midgard_instruction mov = v_mov(index, coords);
2156 mov.mask = 1 << comp;
2157
2158 for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i)
2159 mov.swizzle[1][i] = COMPONENT_X;
2160
2161 emit_mir_instruction(ctx, mov);
2162 break;
2163 }
2164
2165 default: {
2166 fprintf(stderr, "Unknown texture source type: %d\n", instr->src[i].src_type);
2167 assert(0);
2168 }
2169 }
2170 }
2171
2172 emit_mir_instruction(ctx, ins);
2173 }
2174
2175 static void
2176 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
2177 {
2178 switch (instr->op) {
2179 case nir_texop_tex:
2180 case nir_texop_txb:
2181 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
2182 break;
2183 case nir_texop_txl:
2184 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
2185 break;
2186 case nir_texop_txf:
2187 case nir_texop_txf_ms:
2188 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
2189 break;
2190 case nir_texop_txs:
2191 emit_sysval_read(ctx, &instr->instr, 4, 0);
2192 break;
2193 default: {
2194 fprintf(stderr, "Unhandled texture op: %d\n", instr->op);
2195 assert(0);
2196 }
2197 }
2198 }
2199
2200 static void
2201 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
2202 {
2203 switch (instr->type) {
2204 case nir_jump_break: {
2205 /* Emit a branch out of the loop */
2206 struct midgard_instruction br = v_branch(false, false);
2207 br.branch.target_type = TARGET_BREAK;
2208 br.branch.target_break = ctx->current_loop_depth;
2209 emit_mir_instruction(ctx, br);
2210 break;
2211 }
2212
2213 default:
2214 DBG("Unknown jump type %d\n", instr->type);
2215 break;
2216 }
2217 }
2218
2219 static void
2220 emit_instr(compiler_context *ctx, struct nir_instr *instr)
2221 {
2222 switch (instr->type) {
2223 case nir_instr_type_load_const:
2224 emit_load_const(ctx, nir_instr_as_load_const(instr));
2225 break;
2226
2227 case nir_instr_type_intrinsic:
2228 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2229 break;
2230
2231 case nir_instr_type_alu:
2232 emit_alu(ctx, nir_instr_as_alu(instr));
2233 break;
2234
2235 case nir_instr_type_tex:
2236 emit_tex(ctx, nir_instr_as_tex(instr));
2237 break;
2238
2239 case nir_instr_type_jump:
2240 emit_jump(ctx, nir_instr_as_jump(instr));
2241 break;
2242
2243 case nir_instr_type_ssa_undef:
2244 /* Spurious */
2245 break;
2246
2247 default:
2248 DBG("Unhandled instruction type\n");
2249 break;
2250 }
2251 }
2252
2253
2254 /* ALU instructions can inline or embed constants, which decreases register
2255 * pressure and saves space. */
2256
2257 #define CONDITIONAL_ATTACH(idx) { \
2258 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
2259 \
2260 if (entry) { \
2261 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2262 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
2263 } \
2264 }
2265
2266 static void
2267 inline_alu_constants(compiler_context *ctx, midgard_block *block)
2268 {
2269 mir_foreach_instr_in_block(block, alu) {
2270 /* Other instructions cannot inline constants */
2271 if (alu->type != TAG_ALU_4) continue;
2272 if (alu->compact_branch) continue;
2273
2274 /* If there is already a constant here, we can do nothing */
2275 if (alu->has_constants) continue;
2276
2277 CONDITIONAL_ATTACH(0);
2278
2279 if (!alu->has_constants) {
2280 CONDITIONAL_ATTACH(1)
2281 } else if (!alu->inline_constant) {
2282 /* Corner case: _two_ vec4 constants, for instance with a
2283 * csel. For this case, we can only use a constant
2284 * register for one, we'll have to emit a move for the
2285 * other. */
2286
2287 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2288 unsigned scratch = make_compiler_temp(ctx);
2289
2290 if (entry) {
2291 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
2292 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2293
2294 /* Set the source */
2295 alu->src[1] = scratch;
2296
2297 /* Inject us -before- the last instruction which set r31 */
2298 mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
2299 }
2300 }
2301 }
2302 }
2303
2304 /* Midgard supports two types of constants, embedded constants (128-bit) and
2305 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2306 * constants can be demoted to inline constants, for space savings and
2307 * sometimes a performance boost */
2308
2309 static void
2310 embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
2311 {
2312 mir_foreach_instr_in_block(block, ins) {
2313 if (!ins->has_constants) continue;
2314 if (ins->has_inline_constant) continue;
2315
2316 /* Blend constants must not be inlined by definition */
2317 if (ins->has_blend_constant) continue;
2318
2319 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
2320 bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
2321 bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
2322
2323 if (!(is_16 || is_32))
2324 continue;
2325
2326 /* src1 cannot be an inline constant due to encoding
2327 * restrictions. So, if possible we try to flip the arguments
2328 * in that case */
2329
2330 int op = ins->alu.op;
2331
2332 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2333 alu_opcode_props[op].props & OP_COMMUTES) {
2334 mir_flip(ins);
2335 }
2336
2337 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2338 /* Component is from the swizzle. Take a nonzero component */
2339 assert(ins->mask);
2340 unsigned first_comp = ffs(ins->mask) - 1;
2341 unsigned component = ins->swizzle[1][first_comp];
2342
2343 /* Scale constant appropriately, if we can legally */
2344 int16_t scaled_constant = 0;
2345
2346 if (is_16) {
2347 scaled_constant = ins->constants.u16[component];
2348 } else if (midgard_is_integer_op(op)) {
2349 scaled_constant = ins->constants.u32[component];
2350
2351 /* Constant overflow after resize */
2352 if (scaled_constant != ins->constants.u32[component])
2353 continue;
2354 } else {
2355 float original = ins->constants.f32[component];
2356 scaled_constant = _mesa_float_to_half(original);
2357
2358 /* Check for loss of precision. If this is
2359 * mediump, we don't care, but for a highp
2360 * shader, we need to pay attention. NIR
2361 * doesn't yet tell us which mode we're in!
2362 * Practically this prevents most constants
2363 * from being inlined, sadly. */
2364
2365 float fp32 = _mesa_half_to_float(scaled_constant);
2366
2367 if (fp32 != original)
2368 continue;
2369 }
2370
2371 /* Should've been const folded */
2372 if (ins->src_abs[1] || ins->src_neg[1])
2373 continue;
2374
2375 /* Make sure that the constant is not itself a vector
2376 * by checking if all accessed values are the same. */
2377
2378 const midgard_constants *cons = &ins->constants;
2379 uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
2380
2381 bool is_vector = false;
2382 unsigned mask = effective_writemask(&ins->alu, ins->mask);
2383
2384 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
2385 /* We only care if this component is actually used */
2386 if (!(mask & (1 << c)))
2387 continue;
2388
2389 uint32_t test = is_16 ?
2390 cons->u16[ins->swizzle[1][c]] :
2391 cons->u32[ins->swizzle[1][c]];
2392
2393 if (test != value) {
2394 is_vector = true;
2395 break;
2396 }
2397 }
2398
2399 if (is_vector)
2400 continue;
2401
2402 /* Get rid of the embedded constant */
2403 ins->has_constants = false;
2404 ins->src[1] = ~0;
2405 ins->has_inline_constant = true;
2406 ins->inline_constant = scaled_constant;
2407 }
2408 }
2409 }
2410
2411 /* Dead code elimination for branches at the end of a block - only one branch
2412 * per block is legal semantically */
2413
2414 static void
2415 midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2416 {
2417 bool branched = false;
2418
2419 mir_foreach_instr_in_block_safe(block, ins) {
2420 if (!midgard_is_branch_unit(ins->unit)) continue;
2421
2422 if (branched)
2423 mir_remove_instruction(ins);
2424
2425 branched = true;
2426 }
2427 }
2428
2429 /* We want to force the invert on AND/OR to the second slot to legalize into
2430 * iandnot/iornot. The relevant patterns are for AND (and OR respectively)
2431 *
2432 * ~a & #b = ~a & ~(#~b)
2433 * ~a & b = b & ~a
2434 */
2435
2436 static void
2437 midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
2438 {
2439 mir_foreach_instr_in_block(block, ins) {
2440 if (ins->type != TAG_ALU_4) continue;
2441
2442 if (ins->alu.op != midgard_alu_op_iand &&
2443 ins->alu.op != midgard_alu_op_ior) continue;
2444
2445 if (ins->src_invert[1] || !ins->src_invert[0]) continue;
2446
2447 if (ins->has_inline_constant) {
2448 /* ~(#~a) = ~(~#a) = a, so valid, and forces both
2449 * inverts on */
2450 ins->inline_constant = ~ins->inline_constant;
2451 ins->src_invert[1] = true;
2452 } else {
2453 /* Flip to the right invert order. Note
2454 * has_inline_constant false by assumption on the
2455 * branch, so flipping makes sense. */
2456 mir_flip(ins);
2457 }
2458 }
2459 }
2460
2461 static unsigned
2462 emit_fragment_epilogue(compiler_context *ctx, unsigned rt)
2463 {
2464 /* Loop to ourselves */
2465 midgard_instruction *br = ctx->writeout_branch[rt];
2466 struct midgard_instruction ins = v_branch(false, false);
2467 ins.writeout = br->writeout;
2468 ins.branch.target_block = ctx->block_count - 1;
2469 ins.constants.u32[0] = br->constants.u32[0];
2470 memcpy(&ins.src_types, &br->src_types, sizeof(ins.src_types));
2471 emit_mir_instruction(ctx, ins);
2472
2473 ctx->current_block->epilogue = true;
2474 schedule_barrier(ctx);
2475 return ins.branch.target_block;
2476 }
2477
2478 static midgard_block *
2479 emit_block(compiler_context *ctx, nir_block *block)
2480 {
2481 midgard_block *this_block = ctx->after_block;
2482 ctx->after_block = NULL;
2483
2484 if (!this_block)
2485 this_block = create_empty_block(ctx);
2486
2487 list_addtail(&this_block->base.link, &ctx->blocks);
2488
2489 this_block->scheduled = false;
2490 ++ctx->block_count;
2491
2492 /* Set up current block */
2493 list_inithead(&this_block->base.instructions);
2494 ctx->current_block = this_block;
2495
2496 nir_foreach_instr(instr, block) {
2497 emit_instr(ctx, instr);
2498 ++ctx->instruction_count;
2499 }
2500
2501 return this_block;
2502 }
2503
2504 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2505
2506 static void
2507 emit_if(struct compiler_context *ctx, nir_if *nif)
2508 {
2509 midgard_block *before_block = ctx->current_block;
2510
2511 /* Speculatively emit the branch, but we can't fill it in until later */
2512 bool inv = false;
2513 EMIT(branch, true, true);
2514 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2515 then_branch->src[0] = mir_get_branch_cond(&nif->condition, &inv);
2516 then_branch->src_types[0] = nir_type_uint32;
2517 then_branch->branch.invert_conditional = !inv;
2518
2519 /* Emit the two subblocks. */
2520 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2521 midgard_block *end_then_block = ctx->current_block;
2522
2523 /* Emit a jump from the end of the then block to the end of the else */
2524 EMIT(branch, false, false);
2525 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2526
2527 /* Emit second block, and check if it's empty */
2528
2529 int else_idx = ctx->block_count;
2530 int count_in = ctx->instruction_count;
2531 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2532 midgard_block *end_else_block = ctx->current_block;
2533 int after_else_idx = ctx->block_count;
2534
2535 /* Now that we have the subblocks emitted, fix up the branches */
2536
2537 assert(then_block);
2538 assert(else_block);
2539
2540 if (ctx->instruction_count == count_in) {
2541 /* The else block is empty, so don't emit an exit jump */
2542 mir_remove_instruction(then_exit);
2543 then_branch->branch.target_block = after_else_idx;
2544 } else {
2545 then_branch->branch.target_block = else_idx;
2546 then_exit->branch.target_block = after_else_idx;
2547 }
2548
2549 /* Wire up the successors */
2550
2551 ctx->after_block = create_empty_block(ctx);
2552
2553 pan_block_add_successor(&before_block->base, &then_block->base);
2554 pan_block_add_successor(&before_block->base, &else_block->base);
2555
2556 pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
2557 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
2558 }
2559
2560 static void
2561 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2562 {
2563 /* Remember where we are */
2564 midgard_block *start_block = ctx->current_block;
2565
2566 /* Allocate a loop number, growing the current inner loop depth */
2567 int loop_idx = ++ctx->current_loop_depth;
2568
2569 /* Get index from before the body so we can loop back later */
2570 int start_idx = ctx->block_count;
2571
2572 /* Emit the body itself */
2573 midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
2574
2575 /* Branch back to loop back */
2576 struct midgard_instruction br_back = v_branch(false, false);
2577 br_back.branch.target_block = start_idx;
2578 emit_mir_instruction(ctx, br_back);
2579
2580 /* Mark down that branch in the graph. */
2581 pan_block_add_successor(&start_block->base, &loop_block->base);
2582 pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
2583
2584 /* Find the index of the block about to follow us (note: we don't add
2585 * one; blocks are 0-indexed so we get a fencepost problem) */
2586 int break_block_idx = ctx->block_count;
2587
2588 /* Fix up the break statements we emitted to point to the right place,
2589 * now that we can allocate a block number for them */
2590 ctx->after_block = create_empty_block(ctx);
2591
2592 mir_foreach_block_from(ctx, start_block, _block) {
2593 mir_foreach_instr_in_block(((midgard_block *) _block), ins) {
2594 if (ins->type != TAG_ALU_4) continue;
2595 if (!ins->compact_branch) continue;
2596
2597 /* We found a branch -- check the type to see if we need to do anything */
2598 if (ins->branch.target_type != TARGET_BREAK) continue;
2599
2600 /* It's a break! Check if it's our break */
2601 if (ins->branch.target_break != loop_idx) continue;
2602
2603 /* Okay, cool, we're breaking out of this loop.
2604 * Rewrite from a break to a goto */
2605
2606 ins->branch.target_type = TARGET_GOTO;
2607 ins->branch.target_block = break_block_idx;
2608
2609 pan_block_add_successor(_block, &ctx->after_block->base);
2610 }
2611 }
2612
2613 /* Now that we've finished emitting the loop, free up the depth again
2614 * so we play nice with recursion amid nested loops */
2615 --ctx->current_loop_depth;
2616
2617 /* Dump loop stats */
2618 ++ctx->loop_count;
2619 }
2620
2621 static midgard_block *
2622 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2623 {
2624 midgard_block *start_block = NULL;
2625
2626 foreach_list_typed(nir_cf_node, node, node, list) {
2627 switch (node->type) {
2628 case nir_cf_node_block: {
2629 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2630
2631 if (!start_block)
2632 start_block = block;
2633
2634 break;
2635 }
2636
2637 case nir_cf_node_if:
2638 emit_if(ctx, nir_cf_node_as_if(node));
2639 break;
2640
2641 case nir_cf_node_loop:
2642 emit_loop(ctx, nir_cf_node_as_loop(node));
2643 break;
2644
2645 case nir_cf_node_function:
2646 assert(0);
2647 break;
2648 }
2649 }
2650
2651 return start_block;
2652 }
2653
2654 /* Due to lookahead, we need to report the first tag executed in the command
2655 * stream and in branch targets. An initial block might be empty, so iterate
2656 * until we find one that 'works' */
2657
2658 static unsigned
2659 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2660 {
2661 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2662
2663 mir_foreach_block_from(ctx, initial_block, _v) {
2664 midgard_block *v = (midgard_block *) _v;
2665 if (v->quadword_count) {
2666 midgard_bundle *initial_bundle =
2667 util_dynarray_element(&v->bundles, midgard_bundle, 0);
2668
2669 return initial_bundle->tag;
2670 }
2671 }
2672
2673 /* Default to a tag 1 which will break from the shader, in case we jump
2674 * to the exit block (i.e. `return` in a compute shader) */
2675
2676 return 1;
2677 }
2678
2679 /* For each fragment writeout instruction, generate a writeout loop to
2680 * associate with it */
2681
2682 static void
2683 mir_add_writeout_loops(compiler_context *ctx)
2684 {
2685 for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
2686 midgard_instruction *br = ctx->writeout_branch[rt];
2687 if (!br) continue;
2688
2689 unsigned popped = br->branch.target_block;
2690 pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base), &ctx->current_block->base);
2691 br->branch.target_block = emit_fragment_epilogue(ctx, rt);
2692 br->branch.target_type = TARGET_GOTO;
2693
2694 /* If we have more RTs, we'll need to restore back after our
2695 * loop terminates */
2696
2697 if ((rt + 1) < ARRAY_SIZE(ctx->writeout_branch) && ctx->writeout_branch[rt + 1]) {
2698 midgard_instruction uncond = v_branch(false, false);
2699 uncond.branch.target_block = popped;
2700 uncond.branch.target_type = TARGET_GOTO;
2701 emit_mir_instruction(ctx, uncond);
2702 pan_block_add_successor(&ctx->current_block->base, &(mir_get_block(ctx, popped)->base));
2703 schedule_barrier(ctx);
2704 } else {
2705 /* We're last, so we can terminate here */
2706 br->last_writeout = true;
2707 }
2708 }
2709 }
2710
2711 int
2712 midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id, bool shaderdb)
2713 {
2714 struct util_dynarray *compiled = &program->compiled;
2715
2716 midgard_debug = debug_get_option_midgard_debug();
2717
2718 /* TODO: Bound against what? */
2719 compiler_context *ctx = rzalloc(NULL, compiler_context);
2720
2721 ctx->nir = nir;
2722 ctx->stage = nir->info.stage;
2723 ctx->is_blend = is_blend;
2724 ctx->alpha_ref = program->alpha_ref;
2725 ctx->blend_rt = MIDGARD_COLOR_RT0 + blend_rt;
2726 ctx->blend_input = ~0;
2727 ctx->quirks = midgard_get_quirks(gpu_id);
2728
2729 /* Start off with a safe cutoff, allowing usage of all 16 work
2730 * registers. Later, we'll promote uniform reads to uniform registers
2731 * if we determine it is beneficial to do so */
2732 ctx->uniform_cutoff = 8;
2733
2734 /* Initialize at a global (not block) level hash tables */
2735
2736 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2737 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2738
2739 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2740 * (so we don't accidentally duplicate the epilogue since mesa/st has
2741 * messed with our I/O quite a bit already) */
2742
2743 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2744
2745 if (ctx->stage == MESA_SHADER_VERTEX) {
2746 NIR_PASS_V(nir, nir_lower_viewport_transform);
2747 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
2748 }
2749
2750 NIR_PASS_V(nir, nir_lower_var_copies);
2751 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2752 NIR_PASS_V(nir, nir_split_var_copies);
2753 NIR_PASS_V(nir, nir_lower_var_copies);
2754 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2755 NIR_PASS_V(nir, nir_lower_var_copies);
2756 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2757
2758 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
2759 glsl_type_size, 0);
2760 NIR_PASS_V(nir, nir_lower_ssbo);
2761 NIR_PASS_V(nir, midgard_nir_lower_zs_store);
2762
2763 /* Optimisation passes */
2764
2765 optimise_nir(nir, ctx->quirks, is_blend);
2766
2767 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2768 nir_print_shader(nir, stdout);
2769 }
2770
2771 /* Assign sysvals and counts, now that we're sure
2772 * (post-optimisation) */
2773
2774 panfrost_nir_assign_sysvals(&ctx->sysvals, nir);
2775 program->sysval_count = ctx->sysvals.sysval_count;
2776 memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
2777
2778 nir_foreach_function(func, nir) {
2779 if (!func->impl)
2780 continue;
2781
2782 list_inithead(&ctx->blocks);
2783 ctx->block_count = 0;
2784 ctx->func = func;
2785 ctx->already_emitted = calloc(BITSET_WORDS(func->impl->ssa_alloc), sizeof(BITSET_WORD));
2786
2787 emit_cf_list(ctx, &func->impl->body);
2788 free(ctx->already_emitted);
2789 break; /* TODO: Multi-function shaders */
2790 }
2791
2792 util_dynarray_init(compiled, NULL);
2793
2794 /* Per-block lowering before opts */
2795
2796 mir_foreach_block(ctx, _block) {
2797 midgard_block *block = (midgard_block *) _block;
2798 inline_alu_constants(ctx, block);
2799 embedded_to_inline_constant(ctx, block);
2800 }
2801 /* MIR-level optimizations */
2802
2803 bool progress = false;
2804
2805 do {
2806 progress = false;
2807 progress |= midgard_opt_dead_code_eliminate(ctx);
2808
2809 mir_foreach_block(ctx, _block) {
2810 midgard_block *block = (midgard_block *) _block;
2811 progress |= midgard_opt_copy_prop(ctx, block);
2812 progress |= midgard_opt_combine_projection(ctx, block);
2813 progress |= midgard_opt_varying_projection(ctx, block);
2814 }
2815 } while (progress);
2816
2817 mir_foreach_block(ctx, _block) {
2818 midgard_block *block = (midgard_block *) _block;
2819 midgard_lower_derivatives(ctx, block);
2820 midgard_legalize_invert(ctx, block);
2821 midgard_cull_dead_branch(ctx, block);
2822 }
2823
2824 if (ctx->stage == MESA_SHADER_FRAGMENT)
2825 mir_add_writeout_loops(ctx);
2826
2827 /* Analyze now that the code is known but before scheduling creates
2828 * pipeline registers which are harder to track */
2829 mir_analyze_helper_terminate(ctx);
2830 mir_analyze_helper_requirements(ctx);
2831
2832 /* Schedule! */
2833 midgard_schedule_program(ctx);
2834 mir_ra(ctx);
2835
2836 /* Now that all the bundles are scheduled and we can calculate block
2837 * sizes, emit actual branch instructions rather than placeholders */
2838
2839 int br_block_idx = 0;
2840
2841 mir_foreach_block(ctx, _block) {
2842 midgard_block *block = (midgard_block *) _block;
2843 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2844 for (int c = 0; c < bundle->instruction_count; ++c) {
2845 midgard_instruction *ins = bundle->instructions[c];
2846
2847 if (!midgard_is_branch_unit(ins->unit)) continue;
2848
2849 /* Parse some basic branch info */
2850 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2851 bool is_conditional = ins->branch.conditional;
2852 bool is_inverted = ins->branch.invert_conditional;
2853 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2854 bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT;
2855 bool is_special = is_discard || is_tilebuf_wait;
2856 bool is_writeout = ins->writeout;
2857
2858 /* Determine the block we're jumping to */
2859 int target_number = ins->branch.target_block;
2860
2861 /* Report the destination tag */
2862 int dest_tag = is_discard ? 0 :
2863 is_tilebuf_wait ? bundle->tag :
2864 midgard_get_first_tag_from_block(ctx, target_number);
2865
2866 /* Count up the number of quadwords we're
2867 * jumping over = number of quadwords until
2868 * (br_block_idx, target_number) */
2869
2870 int quadword_offset = 0;
2871
2872 if (is_discard) {
2873 /* Ignored */
2874 } else if (is_tilebuf_wait) {
2875 quadword_offset = -1;
2876 } else if (target_number > br_block_idx) {
2877 /* Jump forward */
2878
2879 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2880 midgard_block *blk = mir_get_block(ctx, idx);
2881 assert(blk);
2882
2883 quadword_offset += blk->quadword_count;
2884 }
2885 } else {
2886 /* Jump backwards */
2887
2888 for (int idx = br_block_idx; idx >= target_number; --idx) {
2889 midgard_block *blk = mir_get_block(ctx, idx);
2890 assert(blk);
2891
2892 quadword_offset -= blk->quadword_count;
2893 }
2894 }
2895
2896 /* Unconditional extended branches (far jumps)
2897 * have issues, so we always use a conditional
2898 * branch, setting the condition to always for
2899 * unconditional. For compact unconditional
2900 * branches, cond isn't used so it doesn't
2901 * matter what we pick. */
2902
2903 midgard_condition cond =
2904 !is_conditional ? midgard_condition_always :
2905 is_inverted ? midgard_condition_false :
2906 midgard_condition_true;
2907
2908 midgard_jmp_writeout_op op =
2909 is_discard ? midgard_jmp_writeout_op_discard :
2910 is_tilebuf_wait ? midgard_jmp_writeout_op_tilebuffer_pending :
2911 is_writeout ? midgard_jmp_writeout_op_writeout :
2912 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2913 midgard_jmp_writeout_op_branch_cond;
2914
2915 if (!is_compact) {
2916 midgard_branch_extended branch =
2917 midgard_create_branch_extended(
2918 cond, op,
2919 dest_tag,
2920 quadword_offset);
2921
2922 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2923 } else if (is_conditional || is_special) {
2924 midgard_branch_cond branch = {
2925 .op = op,
2926 .dest_tag = dest_tag,
2927 .offset = quadword_offset,
2928 .cond = cond
2929 };
2930
2931 assert(branch.offset == quadword_offset);
2932
2933 memcpy(&ins->br_compact, &branch, sizeof(branch));
2934 } else {
2935 assert(op == midgard_jmp_writeout_op_branch_uncond);
2936
2937 midgard_branch_uncond branch = {
2938 .op = op,
2939 .dest_tag = dest_tag,
2940 .offset = quadword_offset,
2941 .unknown = 1
2942 };
2943
2944 assert(branch.offset == quadword_offset);
2945
2946 memcpy(&ins->br_compact, &branch, sizeof(branch));
2947 }
2948 }
2949 }
2950
2951 ++br_block_idx;
2952 }
2953
2954 /* Emit flat binary from the instruction arrays. Iterate each block in
2955 * sequence. Save instruction boundaries such that lookahead tags can
2956 * be assigned easily */
2957
2958 /* Cache _all_ bundles in source order for lookahead across failed branches */
2959
2960 int bundle_count = 0;
2961 mir_foreach_block(ctx, _block) {
2962 midgard_block *block = (midgard_block *) _block;
2963 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2964 }
2965 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2966 int bundle_idx = 0;
2967 mir_foreach_block(ctx, _block) {
2968 midgard_block *block = (midgard_block *) _block;
2969 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2970 source_order_bundles[bundle_idx++] = bundle;
2971 }
2972 }
2973
2974 int current_bundle = 0;
2975
2976 /* Midgard prefetches instruction types, so during emission we
2977 * need to lookahead. Unless this is the last instruction, in
2978 * which we return 1. */
2979
2980 mir_foreach_block(ctx, _block) {
2981 midgard_block *block = (midgard_block *) _block;
2982 mir_foreach_bundle_in_block(block, bundle) {
2983 int lookahead = 1;
2984
2985 if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
2986 lookahead = source_order_bundles[current_bundle + 1]->tag;
2987
2988 emit_binary_bundle(ctx, block, bundle, compiled, lookahead);
2989 ++current_bundle;
2990 }
2991
2992 /* TODO: Free deeper */
2993 //util_dynarray_fini(&block->instructions);
2994 }
2995
2996 free(source_order_bundles);
2997
2998 /* Report the very first tag executed */
2999 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
3000
3001 /* Deal with off-by-one related to the fencepost problem */
3002 program->work_register_count = ctx->work_registers + 1;
3003 program->uniform_cutoff = ctx->uniform_cutoff;
3004
3005 program->blend_patch_offset = ctx->blend_constant_offset;
3006 program->tls_size = ctx->tls_size;
3007
3008 if (midgard_debug & MIDGARD_DBG_SHADERS)
3009 disassemble_midgard(stdout, program->compiled.data, program->compiled.size, gpu_id, ctx->stage);
3010
3011 if (midgard_debug & MIDGARD_DBG_SHADERDB || shaderdb) {
3012 unsigned nr_bundles = 0, nr_ins = 0;
3013
3014 /* Count instructions and bundles */
3015
3016 mir_foreach_block(ctx, _block) {
3017 midgard_block *block = (midgard_block *) _block;
3018 nr_bundles += util_dynarray_num_elements(
3019 &block->bundles, midgard_bundle);
3020
3021 mir_foreach_bundle_in_block(block, bun)
3022 nr_ins += bun->instruction_count;
3023 }
3024
3025 /* Calculate thread count. There are certain cutoffs by
3026 * register count for thread count */
3027
3028 unsigned nr_registers = program->work_register_count;
3029
3030 unsigned nr_threads =
3031 (nr_registers <= 4) ? 4 :
3032 (nr_registers <= 8) ? 2 :
3033 1;
3034
3035 /* Dump stats */
3036
3037 fprintf(stderr, "shader%d - %s shader: "
3038 "%u inst, %u bundles, %u quadwords, "
3039 "%u registers, %u threads, %u loops, "
3040 "%u:%u spills:fills\n",
3041 SHADER_DB_COUNT++,
3042 ctx->is_blend ? "PAN_SHADER_BLEND" :
3043 gl_shader_stage_name(ctx->stage),
3044 nr_ins, nr_bundles, ctx->quadword_count,
3045 nr_registers, nr_threads,
3046 ctx->loop_count,
3047 ctx->spills, ctx->fills);
3048 }
3049
3050 ralloc_free(ctx);
3051
3052 return 0;
3053 }