24765f3da2ab8d4c0c353c50406234787716e621
[mesa.git] / src / panfrost / midgard / midgard_compile.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/mman.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <err.h>
32
33 #include "main/mtypes.h"
34 #include "compiler/glsl/glsl_to_nir.h"
35 #include "compiler/nir_types.h"
36 #include "main/imports.h"
37 #include "compiler/nir/nir_builder.h"
38 #include "util/half_float.h"
39 #include "util/u_math.h"
40 #include "util/u_debug.h"
41 #include "util/u_dynarray.h"
42 #include "util/list.h"
43 #include "main/mtypes.h"
44
45 #include "midgard.h"
46 #include "midgard_nir.h"
47 #include "midgard_compile.h"
48 #include "midgard_ops.h"
49 #include "helpers.h"
50 #include "compiler.h"
51 #include "midgard_quirks.h"
52
53 #include "disassemble.h"
54
55 static const struct debug_named_value debug_options[] = {
56 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
57 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
58 {"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
59 DEBUG_NAMED_VALUE_END
60 };
61
62 DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
63
64 unsigned SHADER_DB_COUNT = 0;
65
66 int midgard_debug = 0;
67
68 #define DBG(fmt, ...) \
69 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
70 fprintf(stderr, "%s:%d: "fmt, \
71 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
72 static midgard_block *
73 create_empty_block(compiler_context *ctx)
74 {
75 midgard_block *blk = rzalloc(ctx, midgard_block);
76
77 blk->predecessors = _mesa_set_create(blk,
78 _mesa_hash_pointer,
79 _mesa_key_pointer_equal);
80
81 blk->source_id = ctx->block_source_count++;
82
83 return blk;
84 }
85
86 static void
87 midgard_block_add_successor(midgard_block *block, midgard_block *successor)
88 {
89 assert(block);
90 assert(successor);
91
92 /* Deduplicate */
93 for (unsigned i = 0; i < block->nr_successors; ++i) {
94 if (block->successors[i] == successor)
95 return;
96 }
97
98 block->successors[block->nr_successors++] = successor;
99 assert(block->nr_successors <= ARRAY_SIZE(block->successors));
100
101 /* Note the predecessor in the other direction */
102 _mesa_set_add(successor->predecessors, block);
103 }
104
105 static void
106 schedule_barrier(compiler_context *ctx)
107 {
108 midgard_block *temp = ctx->after_block;
109 ctx->after_block = create_empty_block(ctx);
110 ctx->block_count++;
111 list_addtail(&ctx->after_block->link, &ctx->blocks);
112 list_inithead(&ctx->after_block->instructions);
113 midgard_block_add_successor(ctx->current_block, ctx->after_block);
114 ctx->current_block = ctx->after_block;
115 ctx->after_block = temp;
116 }
117
118 /* Helpers to generate midgard_instruction's using macro magic, since every
119 * driver seems to do it that way */
120
121 #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
122
123 #define M_LOAD_STORE(name, store) \
124 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
125 midgard_instruction i = { \
126 .type = TAG_LOAD_STORE_4, \
127 .mask = 0xF, \
128 .dest = ~0, \
129 .src = { ~0, ~0, ~0, ~0 }, \
130 .swizzle = SWIZZLE_IDENTITY_4, \
131 .load_store = { \
132 .op = midgard_op_##name, \
133 .address = address \
134 } \
135 }; \
136 \
137 if (store) \
138 i.src[0] = ssa; \
139 else \
140 i.dest = ssa; \
141 \
142 return i; \
143 }
144
145 #define M_LOAD(name) M_LOAD_STORE(name, false)
146 #define M_STORE(name) M_LOAD_STORE(name, true)
147
148 /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
149 * the corresponding Midgard source */
150
151 static midgard_vector_alu_src
152 vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
153 bool half, bool sext)
154 {
155 /* Figure out how many components there are so we can adjust.
156 * Specifically we want to broadcast the last channel so things like
157 * ball2/3 work.
158 */
159
160 if (broadcast_count && src) {
161 uint8_t last_component = src->swizzle[broadcast_count - 1];
162
163 for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
164 src->swizzle[c] = last_component;
165 }
166 }
167
168 midgard_vector_alu_src alu_src = {
169 .rep_low = 0,
170 .rep_high = 0,
171 .half = half
172 };
173
174 if (is_int) {
175 alu_src.mod = midgard_int_normal;
176
177 /* Sign/zero-extend if needed */
178
179 if (half) {
180 alu_src.mod = sext ?
181 midgard_int_sign_extend
182 : midgard_int_zero_extend;
183 }
184
185 /* These should have been lowered away */
186 if (src)
187 assert(!(src->abs || src->negate));
188 } else {
189 if (src)
190 alu_src.mod = (src->abs << 0) | (src->negate << 1);
191 }
192
193 return alu_src;
194 }
195
196 /* load/store instructions have both 32-bit and 16-bit variants, depending on
197 * whether we are using vectors composed of highp or mediump. At the moment, we
198 * don't support half-floats -- this requires changes in other parts of the
199 * compiler -- therefore the 16-bit versions are commented out. */
200
201 //M_LOAD(ld_attr_16);
202 M_LOAD(ld_attr_32);
203 //M_LOAD(ld_vary_16);
204 M_LOAD(ld_vary_32);
205 M_LOAD(ld_ubo_int4);
206 M_LOAD(ld_int4);
207 M_STORE(st_int4);
208 M_LOAD(ld_color_buffer_8);
209 //M_STORE(st_vary_16);
210 M_STORE(st_vary_32);
211 M_LOAD(ld_cubemap_coords);
212 M_LOAD(ld_compute_id);
213
214 static midgard_instruction
215 v_branch(bool conditional, bool invert)
216 {
217 midgard_instruction ins = {
218 .type = TAG_ALU_4,
219 .unit = ALU_ENAB_BRANCH,
220 .compact_branch = true,
221 .branch = {
222 .conditional = conditional,
223 .invert_conditional = invert
224 },
225 .dest = ~0,
226 .src = { ~0, ~0, ~0, ~0 },
227 };
228
229 return ins;
230 }
231
232 static midgard_branch_extended
233 midgard_create_branch_extended( midgard_condition cond,
234 midgard_jmp_writeout_op op,
235 unsigned dest_tag,
236 signed quadword_offset)
237 {
238 /* The condition code is actually a LUT describing a function to
239 * combine multiple condition codes. However, we only support a single
240 * condition code at the moment, so we just duplicate over a bunch of
241 * times. */
242
243 uint16_t duplicated_cond =
244 (cond << 14) |
245 (cond << 12) |
246 (cond << 10) |
247 (cond << 8) |
248 (cond << 6) |
249 (cond << 4) |
250 (cond << 2) |
251 (cond << 0);
252
253 midgard_branch_extended branch = {
254 .op = op,
255 .dest_tag = dest_tag,
256 .offset = quadword_offset,
257 .cond = duplicated_cond
258 };
259
260 return branch;
261 }
262
263 static void
264 attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
265 {
266 ins->has_constants = true;
267 memcpy(&ins->constants, constants, 16);
268 }
269
270 static int
271 glsl_type_size(const struct glsl_type *type, bool bindless)
272 {
273 return glsl_count_attribute_slots(type, false);
274 }
275
276 /* Lower fdot2 to a vector multiplication followed by channel addition */
277 static void
278 midgard_nir_lower_fdot2_body(nir_builder *b, nir_alu_instr *alu)
279 {
280 if (alu->op != nir_op_fdot2)
281 return;
282
283 b->cursor = nir_before_instr(&alu->instr);
284
285 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
286 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
287
288 nir_ssa_def *product = nir_fmul(b, src0, src1);
289
290 nir_ssa_def *sum = nir_fadd(b,
291 nir_channel(b, product, 0),
292 nir_channel(b, product, 1));
293
294 /* Replace the fdot2 with this sum */
295 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
296 }
297
298 static int
299 midgard_sysval_for_ssbo(nir_intrinsic_instr *instr)
300 {
301 /* This is way too meta */
302 bool is_store = instr->intrinsic == nir_intrinsic_store_ssbo;
303 unsigned idx_idx = is_store ? 1 : 0;
304
305 nir_src index = instr->src[idx_idx];
306 assert(nir_src_is_const(index));
307 uint32_t uindex = nir_src_as_uint(index);
308
309 return PAN_SYSVAL(SSBO, uindex);
310 }
311
312 static int
313 midgard_sysval_for_sampler(nir_intrinsic_instr *instr)
314 {
315 /* TODO: indirect samplers !!! */
316 nir_src index = instr->src[0];
317 assert(nir_src_is_const(index));
318 uint32_t uindex = nir_src_as_uint(index);
319
320 return PAN_SYSVAL(SAMPLER, uindex);
321 }
322
323 static int
324 midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
325 {
326 switch (instr->intrinsic) {
327 case nir_intrinsic_load_viewport_scale:
328 return PAN_SYSVAL_VIEWPORT_SCALE;
329 case nir_intrinsic_load_viewport_offset:
330 return PAN_SYSVAL_VIEWPORT_OFFSET;
331 case nir_intrinsic_load_num_work_groups:
332 return PAN_SYSVAL_NUM_WORK_GROUPS;
333 case nir_intrinsic_load_ssbo:
334 case nir_intrinsic_store_ssbo:
335 return midgard_sysval_for_ssbo(instr);
336 case nir_intrinsic_load_sampler_lod_parameters_pan:
337 return midgard_sysval_for_sampler(instr);
338 default:
339 return ~0;
340 }
341 }
342
343 static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
344 unsigned *dest)
345 {
346 nir_intrinsic_instr *intr;
347 nir_dest *dst = NULL;
348 nir_tex_instr *tex;
349 int sysval = -1;
350
351 bool is_store = false;
352
353 switch (instr->type) {
354 case nir_instr_type_intrinsic:
355 intr = nir_instr_as_intrinsic(instr);
356 sysval = midgard_nir_sysval_for_intrinsic(intr);
357 dst = &intr->dest;
358 is_store |= intr->intrinsic == nir_intrinsic_store_ssbo;
359 break;
360 case nir_instr_type_tex:
361 tex = nir_instr_as_tex(instr);
362 if (tex->op != nir_texop_txs)
363 break;
364
365 sysval = PAN_SYSVAL(TEXTURE_SIZE,
366 PAN_TXS_SYSVAL_ID(tex->texture_index,
367 nir_tex_instr_dest_size(tex) -
368 (tex->is_array ? 1 : 0),
369 tex->is_array));
370 dst = &tex->dest;
371 break;
372 default:
373 break;
374 }
375
376 if (dest && dst && !is_store)
377 *dest = nir_dest_index(ctx, dst);
378
379 return sysval;
380 }
381
382 static void
383 midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
384 {
385 int sysval;
386
387 sysval = sysval_for_instr(ctx, instr, NULL);
388 if (sysval < 0)
389 return;
390
391 /* We have a sysval load; check if it's already been assigned */
392
393 if (_mesa_hash_table_u64_search(ctx->sysval_to_id, sysval))
394 return;
395
396 /* It hasn't -- so assign it now! */
397
398 unsigned id = ctx->sysval_count++;
399 _mesa_hash_table_u64_insert(ctx->sysval_to_id, sysval, (void *) ((uintptr_t) id + 1));
400 ctx->sysvals[id] = sysval;
401 }
402
403 static void
404 midgard_nir_assign_sysvals(compiler_context *ctx, nir_shader *shader)
405 {
406 ctx->sysval_count = 0;
407
408 nir_foreach_function(function, shader) {
409 if (!function->impl) continue;
410
411 nir_foreach_block(block, function->impl) {
412 nir_foreach_instr_safe(instr, block) {
413 midgard_nir_assign_sysval_body(ctx, instr);
414 }
415 }
416 }
417 }
418
419 static bool
420 midgard_nir_lower_fdot2(nir_shader *shader)
421 {
422 bool progress = false;
423
424 nir_foreach_function(function, shader) {
425 if (!function->impl) continue;
426
427 nir_builder _b;
428 nir_builder *b = &_b;
429 nir_builder_init(b, function->impl);
430
431 nir_foreach_block(block, function->impl) {
432 nir_foreach_instr_safe(instr, block) {
433 if (instr->type != nir_instr_type_alu) continue;
434
435 nir_alu_instr *alu = nir_instr_as_alu(instr);
436 midgard_nir_lower_fdot2_body(b, alu);
437
438 progress |= true;
439 }
440 }
441
442 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
443
444 }
445
446 return progress;
447 }
448
449 /* Flushes undefined values to zero */
450
451 static void
452 optimise_nir(nir_shader *nir, unsigned quirks)
453 {
454 bool progress;
455 unsigned lower_flrp =
456 (nir->options->lower_flrp16 ? 16 : 0) |
457 (nir->options->lower_flrp32 ? 32 : 0) |
458 (nir->options->lower_flrp64 ? 64 : 0);
459
460 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
461 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
462
463 nir_lower_tex_options lower_tex_options = {
464 .lower_txs_lod = true,
465 .lower_txp = ~0,
466 .lower_tex_without_implicit_lod =
467 (quirks & MIDGARD_EXPLICIT_LOD),
468
469 /* TODO: we have native gradient.. */
470 .lower_txd = true,
471 };
472
473 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
474
475 /* Must lower fdot2 after tex is lowered */
476 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
477
478 /* T720 is broken. */
479
480 if (quirks & MIDGARD_BROKEN_LOD)
481 NIR_PASS_V(nir, midgard_nir_lod_errata);
482
483 do {
484 progress = false;
485
486 NIR_PASS(progress, nir, nir_lower_var_copies);
487 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
488
489 NIR_PASS(progress, nir, nir_copy_prop);
490 NIR_PASS(progress, nir, nir_opt_dce);
491 NIR_PASS(progress, nir, nir_opt_dead_cf);
492 NIR_PASS(progress, nir, nir_opt_cse);
493 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
494 NIR_PASS(progress, nir, nir_opt_algebraic);
495 NIR_PASS(progress, nir, nir_opt_constant_folding);
496
497 if (lower_flrp != 0) {
498 bool lower_flrp_progress = false;
499 NIR_PASS(lower_flrp_progress,
500 nir,
501 nir_lower_flrp,
502 lower_flrp,
503 false /* always_precise */,
504 nir->options->lower_ffma);
505 if (lower_flrp_progress) {
506 NIR_PASS(progress, nir,
507 nir_opt_constant_folding);
508 progress = true;
509 }
510
511 /* Nothing should rematerialize any flrps, so we only
512 * need to do this lowering once.
513 */
514 lower_flrp = 0;
515 }
516
517 NIR_PASS(progress, nir, nir_opt_undef);
518 NIR_PASS(progress, nir, nir_undef_to_zero);
519
520 NIR_PASS(progress, nir, nir_opt_loop_unroll,
521 nir_var_shader_in |
522 nir_var_shader_out |
523 nir_var_function_temp);
524
525 NIR_PASS(progress, nir, nir_opt_vectorize);
526 } while (progress);
527
528 /* Must be run at the end to prevent creation of fsin/fcos ops */
529 NIR_PASS(progress, nir, midgard_nir_scale_trig);
530
531 do {
532 progress = false;
533
534 NIR_PASS(progress, nir, nir_opt_dce);
535 NIR_PASS(progress, nir, nir_opt_algebraic);
536 NIR_PASS(progress, nir, nir_opt_constant_folding);
537 NIR_PASS(progress, nir, nir_copy_prop);
538 } while (progress);
539
540 NIR_PASS(progress, nir, nir_opt_algebraic_late);
541
542 /* We implement booleans as 32-bit 0/~0 */
543 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
544
545 /* Now that booleans are lowered, we can run out late opts */
546 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
547
548 /* Lower mods for float ops only. Integer ops don't support modifiers
549 * (saturate doesn't make sense on integers, neg/abs require dedicated
550 * instructions) */
551
552 NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
553 NIR_PASS(progress, nir, nir_copy_prop);
554 NIR_PASS(progress, nir, nir_opt_dce);
555
556 /* Take us out of SSA */
557 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
558 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
559
560 /* We are a vector architecture; write combine where possible */
561 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
562 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
563
564 NIR_PASS(progress, nir, nir_opt_dce);
565 }
566
567 /* Do not actually emit a load; instead, cache the constant for inlining */
568
569 static void
570 emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
571 {
572 nir_ssa_def def = instr->def;
573
574 float *v = rzalloc_array(NULL, float, 4);
575 nir_const_value_to_array(v, instr->value, instr->def.num_components, f32);
576
577 /* Shifted for SSA, +1 for off-by-one */
578 _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, v);
579 }
580
581 /* Normally constants are embedded implicitly, but for I/O and such we have to
582 * explicitly emit a move with the constant source */
583
584 static void
585 emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
586 {
587 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
588
589 if (constant_value) {
590 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
591 attach_constants(ctx, &ins, constant_value, node + 1);
592 emit_mir_instruction(ctx, ins);
593 }
594 }
595
596 static bool
597 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
598 {
599 unsigned comp = src->swizzle[0];
600
601 for (unsigned c = 1; c < nr_components; ++c) {
602 if (src->swizzle[c] != comp)
603 return true;
604 }
605
606 return false;
607 }
608
609 #define ALU_CASE(nir, _op) \
610 case nir_op_##nir: \
611 op = midgard_alu_op_##_op; \
612 assert(src_bitsize == dst_bitsize); \
613 break;
614
615 #define ALU_CASE_BCAST(nir, _op, count) \
616 case nir_op_##nir: \
617 op = midgard_alu_op_##_op; \
618 broadcast_swizzle = count; \
619 assert(src_bitsize == dst_bitsize); \
620 break;
621 static bool
622 nir_is_fzero_constant(nir_src src)
623 {
624 if (!nir_src_is_const(src))
625 return false;
626
627 for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
628 if (nir_src_comp_as_float(src, c) != 0.0)
629 return false;
630 }
631
632 return true;
633 }
634
635 /* Analyze the sizes of the inputs to determine which reg mode. Ops needed
636 * special treatment override this anyway. */
637
638 static midgard_reg_mode
639 reg_mode_for_nir(nir_alu_instr *instr)
640 {
641 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
642
643 switch (src_bitsize) {
644 case 8:
645 return midgard_reg_mode_8;
646 case 16:
647 return midgard_reg_mode_16;
648 case 32:
649 return midgard_reg_mode_32;
650 case 64:
651 return midgard_reg_mode_64;
652 default:
653 unreachable("Invalid bit size");
654 }
655 }
656
657 static void
658 emit_alu(compiler_context *ctx, nir_alu_instr *instr)
659 {
660 /* Derivatives end up emitted on the texture pipe, not the ALUs. This
661 * is handled elsewhere */
662
663 if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
664 midgard_emit_derivatives(ctx, instr);
665 return;
666 }
667
668 bool is_ssa = instr->dest.dest.is_ssa;
669
670 unsigned dest = nir_dest_index(ctx, &instr->dest.dest);
671 unsigned nr_components = nir_dest_num_components(instr->dest.dest);
672 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
673
674 /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
675 * supported. A few do not and are commented for now. Also, there are a
676 * number of NIR ops which Midgard does not support and need to be
677 * lowered, also TODO. This switch block emits the opcode and calling
678 * convention of the Midgard instruction; actual packing is done in
679 * emit_alu below */
680
681 unsigned op;
682
683 /* Number of components valid to check for the instruction (the rest
684 * will be forced to the last), or 0 to use as-is. Relevant as
685 * ball-type instructions have a channel count in NIR but are all vec4
686 * in Midgard */
687
688 unsigned broadcast_swizzle = 0;
689
690 /* What register mode should we operate in? */
691 midgard_reg_mode reg_mode =
692 reg_mode_for_nir(instr);
693
694 /* Do we need a destination override? Used for inline
695 * type conversion */
696
697 midgard_dest_override dest_override =
698 midgard_dest_override_none;
699
700 /* Should we use a smaller respective source and sign-extend? */
701
702 bool half_1 = false, sext_1 = false;
703 bool half_2 = false, sext_2 = false;
704
705 unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
706 unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
707
708 switch (instr->op) {
709 ALU_CASE(fadd, fadd);
710 ALU_CASE(fmul, fmul);
711 ALU_CASE(fmin, fmin);
712 ALU_CASE(fmax, fmax);
713 ALU_CASE(imin, imin);
714 ALU_CASE(imax, imax);
715 ALU_CASE(umin, umin);
716 ALU_CASE(umax, umax);
717 ALU_CASE(ffloor, ffloor);
718 ALU_CASE(fround_even, froundeven);
719 ALU_CASE(ftrunc, ftrunc);
720 ALU_CASE(fceil, fceil);
721 ALU_CASE(fdot3, fdot3);
722 ALU_CASE(fdot4, fdot4);
723 ALU_CASE(iadd, iadd);
724 ALU_CASE(isub, isub);
725 ALU_CASE(imul, imul);
726
727 /* Zero shoved as second-arg */
728 ALU_CASE(iabs, iabsdiff);
729
730 ALU_CASE(mov, imov);
731
732 ALU_CASE(feq32, feq);
733 ALU_CASE(fne32, fne);
734 ALU_CASE(flt32, flt);
735 ALU_CASE(ieq32, ieq);
736 ALU_CASE(ine32, ine);
737 ALU_CASE(ilt32, ilt);
738 ALU_CASE(ult32, ult);
739
740 /* We don't have a native b2f32 instruction. Instead, like many
741 * GPUs, we exploit booleans as 0/~0 for false/true, and
742 * correspondingly AND
743 * by 1.0 to do the type conversion. For the moment, prime us
744 * to emit:
745 *
746 * iand [whatever], #0
747 *
748 * At the end of emit_alu (as MIR), we'll fix-up the constant
749 */
750
751 ALU_CASE(b2f32, iand);
752 ALU_CASE(b2i32, iand);
753
754 /* Likewise, we don't have a dedicated f2b32 instruction, but
755 * we can do a "not equal to 0.0" test. */
756
757 ALU_CASE(f2b32, fne);
758 ALU_CASE(i2b32, ine);
759
760 ALU_CASE(frcp, frcp);
761 ALU_CASE(frsq, frsqrt);
762 ALU_CASE(fsqrt, fsqrt);
763 ALU_CASE(fexp2, fexp2);
764 ALU_CASE(flog2, flog2);
765
766 ALU_CASE(f2i32, f2i_rtz);
767 ALU_CASE(f2u32, f2u_rtz);
768 ALU_CASE(i2f32, i2f_rtz);
769 ALU_CASE(u2f32, u2f_rtz);
770
771 ALU_CASE(f2i16, f2i_rtz);
772 ALU_CASE(f2u16, f2u_rtz);
773 ALU_CASE(i2f16, i2f_rtz);
774 ALU_CASE(u2f16, u2f_rtz);
775
776 ALU_CASE(fsin, fsin);
777 ALU_CASE(fcos, fcos);
778
779 /* We'll set invert */
780 ALU_CASE(inot, imov);
781 ALU_CASE(iand, iand);
782 ALU_CASE(ior, ior);
783 ALU_CASE(ixor, ixor);
784 ALU_CASE(ishl, ishl);
785 ALU_CASE(ishr, iasr);
786 ALU_CASE(ushr, ilsr);
787
788 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
789 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
790 ALU_CASE(b32all_fequal4, fball_eq);
791
792 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
793 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
794 ALU_CASE(b32any_fnequal4, fbany_neq);
795
796 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
797 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
798 ALU_CASE(b32all_iequal4, iball_eq);
799
800 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
801 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
802 ALU_CASE(b32any_inequal4, ibany_neq);
803
804 /* Source mods will be shoved in later */
805 ALU_CASE(fabs, fmov);
806 ALU_CASE(fneg, fmov);
807 ALU_CASE(fsat, fmov);
808
809 /* For size conversion, we use a move. Ideally though we would squash
810 * these ops together; maybe that has to happen after in NIR as part of
811 * propagation...? An earlier algebraic pass ensured we step down by
812 * only / exactly one size. If stepping down, we use a dest override to
813 * reduce the size; if stepping up, we use a larger-sized move with a
814 * half source and a sign/zero-extension modifier */
815
816 case nir_op_i2i8:
817 case nir_op_i2i16:
818 case nir_op_i2i32:
819 case nir_op_i2i64:
820 /* If we end up upscale, we'll need a sign-extend on the
821 * operand (the second argument) */
822
823 sext_2 = true;
824 /* fallthrough */
825 case nir_op_u2u8:
826 case nir_op_u2u16:
827 case nir_op_u2u32:
828 case nir_op_u2u64: {
829 op = midgard_alu_op_imov;
830
831 if (dst_bitsize == (src_bitsize * 2)) {
832 /* Converting up */
833 half_2 = true;
834
835 /* Use a greater register mode */
836 reg_mode++;
837 } else if (src_bitsize == (dst_bitsize * 2)) {
838 /* Converting down */
839 dest_override = midgard_dest_override_lower;
840 }
841
842 break;
843 }
844
845 case nir_op_f2f16: {
846 assert(src_bitsize == 32);
847
848 op = midgard_alu_op_fmov;
849 dest_override = midgard_dest_override_lower;
850 break;
851 }
852
853 case nir_op_f2f32: {
854 assert(src_bitsize == 16);
855
856 op = midgard_alu_op_fmov;
857 half_2 = true;
858 reg_mode++;
859 break;
860 }
861
862
863 /* For greater-or-equal, we lower to less-or-equal and flip the
864 * arguments */
865
866 case nir_op_fge:
867 case nir_op_fge32:
868 case nir_op_ige32:
869 case nir_op_uge32: {
870 op =
871 instr->op == nir_op_fge ? midgard_alu_op_fle :
872 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
873 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
874 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
875 0;
876
877 /* Swap via temporary */
878 nir_alu_src temp = instr->src[1];
879 instr->src[1] = instr->src[0];
880 instr->src[0] = temp;
881
882 break;
883 }
884
885 case nir_op_b32csel: {
886 /* Midgard features both fcsel and icsel, depending on
887 * the type of the arguments/output. However, as long
888 * as we're careful we can _always_ use icsel and
889 * _never_ need fcsel, since the latter does additional
890 * floating-point-specific processing whereas the
891 * former just moves bits on the wire. It's not obvious
892 * why these are separate opcodes, save for the ability
893 * to do things like sat/pos/abs/neg for free */
894
895 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
896 op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
897
898 /* The condition is the first argument; move the other
899 * arguments up one to be a binary instruction for
900 * Midgard with the condition last */
901
902 nir_alu_src temp = instr->src[2];
903
904 instr->src[2] = instr->src[0];
905 instr->src[0] = instr->src[1];
906 instr->src[1] = temp;
907
908 break;
909 }
910
911 default:
912 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
913 assert(0);
914 return;
915 }
916
917 /* Midgard can perform certain modifiers on output of an ALU op */
918 unsigned outmod;
919
920 if (midgard_is_integer_out_op(op)) {
921 outmod = midgard_outmod_int_wrap;
922 } else {
923 bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
924 outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
925 }
926
927 /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
928
929 if (instr->op == nir_op_fmax) {
930 if (nir_is_fzero_constant(instr->src[0].src)) {
931 op = midgard_alu_op_fmov;
932 nr_inputs = 1;
933 outmod = midgard_outmod_pos;
934 instr->src[0] = instr->src[1];
935 } else if (nir_is_fzero_constant(instr->src[1].src)) {
936 op = midgard_alu_op_fmov;
937 nr_inputs = 1;
938 outmod = midgard_outmod_pos;
939 }
940 }
941
942 /* Fetch unit, quirks, etc information */
943 unsigned opcode_props = alu_opcode_props[op].props;
944 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
945
946 /* src0 will always exist afaik, but src1 will not for 1-argument
947 * instructions. The latter can only be fetched if the instruction
948 * needs it, or else we may segfault. */
949
950 unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
951 unsigned src1 = nr_inputs >= 2 ? nir_alu_src_index(ctx, &instr->src[1]) : ~0;
952 unsigned src2 = nr_inputs == 3 ? nir_alu_src_index(ctx, &instr->src[2]) : ~0;
953 assert(nr_inputs <= 3);
954
955 /* Rather than use the instruction generation helpers, we do it
956 * ourselves here to avoid the mess */
957
958 midgard_instruction ins = {
959 .type = TAG_ALU_4,
960 .src = {
961 quirk_flipped_r24 ? ~0 : src0,
962 quirk_flipped_r24 ? src0 : src1,
963 src2,
964 ~0
965 },
966 .dest = dest,
967 };
968
969 nir_alu_src *nirmods[3] = { NULL };
970
971 if (nr_inputs >= 2) {
972 nirmods[0] = &instr->src[0];
973 nirmods[1] = &instr->src[1];
974 } else if (nr_inputs == 1) {
975 nirmods[quirk_flipped_r24] = &instr->src[0];
976 } else {
977 assert(0);
978 }
979
980 if (nr_inputs == 3)
981 nirmods[2] = &instr->src[2];
982
983 /* These were lowered to a move, so apply the corresponding mod */
984
985 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
986 nir_alu_src *s = nirmods[quirk_flipped_r24];
987
988 if (instr->op == nir_op_fneg)
989 s->negate = !s->negate;
990
991 if (instr->op == nir_op_fabs)
992 s->abs = !s->abs;
993 }
994
995 bool is_int = midgard_is_integer_op(op);
996
997 ins.mask = mask_of(nr_components);
998
999 midgard_vector_alu alu = {
1000 .op = op,
1001 .reg_mode = reg_mode,
1002 .dest_override = dest_override,
1003 .outmod = outmod,
1004
1005 .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
1006 .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
1007 };
1008
1009 /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
1010
1011 if (!is_ssa)
1012 ins.mask &= instr->dest.write_mask;
1013
1014 for (unsigned m = 0; m < 3; ++m) {
1015 if (!nirmods[m])
1016 continue;
1017
1018 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c)
1019 ins.swizzle[m][c] = nirmods[m]->swizzle[c];
1020
1021 /* Replicate. TODO: remove when vec16 lands */
1022 for (unsigned c = NIR_MAX_VEC_COMPONENTS; c < MIR_VEC_COMPONENTS; ++c)
1023 ins.swizzle[m][c] = nirmods[m]->swizzle[NIR_MAX_VEC_COMPONENTS - 1];
1024 }
1025
1026 if (nr_inputs == 3) {
1027 /* Conditions can't have mods */
1028 assert(!nirmods[2]->abs);
1029 assert(!nirmods[2]->negate);
1030 }
1031
1032 ins.alu = alu;
1033
1034 /* Late fixup for emulated instructions */
1035
1036 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
1037 /* Presently, our second argument is an inline #0 constant.
1038 * Switch over to an embedded 1.0 constant (that can't fit
1039 * inline, since we're 32-bit, not 16-bit like the inline
1040 * constants) */
1041
1042 ins.has_inline_constant = false;
1043 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1044 ins.has_constants = true;
1045
1046 if (instr->op == nir_op_b2f32) {
1047 float f = 1.0f;
1048 memcpy(&ins.constants, &f, sizeof(float));
1049 } else {
1050 ins.constants[0] = 1;
1051 }
1052
1053
1054 for (unsigned c = 0; c < 16; ++c)
1055 ins.swizzle[1][c] = 0;
1056 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1057 /* Lots of instructions need a 0 plonked in */
1058 ins.has_inline_constant = false;
1059 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1060 ins.has_constants = true;
1061 ins.constants[0] = 0;
1062
1063 for (unsigned c = 0; c < 16; ++c)
1064 ins.swizzle[1][c] = 0;
1065 } else if (instr->op == nir_op_inot) {
1066 ins.invert = true;
1067 }
1068
1069 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1070 /* To avoid duplicating the lookup tables (probably), true LUT
1071 * instructions can only operate as if they were scalars. Lower
1072 * them here by changing the component. */
1073
1074 unsigned orig_mask = ins.mask;
1075
1076 for (int i = 0; i < nr_components; ++i) {
1077 /* Mask the associated component, dropping the
1078 * instruction if needed */
1079
1080 ins.mask = 1 << i;
1081 ins.mask &= orig_mask;
1082
1083 if (!ins.mask)
1084 continue;
1085
1086 for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
1087 ins.swizzle[0][j] = nirmods[0]->swizzle[i]; /* Pull from the correct component */
1088
1089 emit_mir_instruction(ctx, ins);
1090 }
1091 } else {
1092 emit_mir_instruction(ctx, ins);
1093 }
1094 }
1095
1096 #undef ALU_CASE
1097
1098 static void
1099 mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
1100 {
1101 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1102 unsigned nir_mask = 0;
1103 unsigned dsize = 0;
1104
1105 if (is_read) {
1106 nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1107 dsize = nir_dest_bit_size(intr->dest);
1108 } else {
1109 nir_mask = nir_intrinsic_write_mask(intr);
1110 dsize = 32;
1111 }
1112
1113 /* Once we have the NIR mask, we need to normalize to work in 32-bit space */
1114 unsigned bytemask = mir_to_bytemask(mir_mode_for_destsize(dsize), nir_mask);
1115 mir_set_bytemask(ins, bytemask);
1116
1117 if (dsize == 64)
1118 ins->load_64 = true;
1119 }
1120
1121 /* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1122 * optimized) versions of UBO #0 */
1123
1124 midgard_instruction *
1125 emit_ubo_read(
1126 compiler_context *ctx,
1127 nir_instr *instr,
1128 unsigned dest,
1129 unsigned offset,
1130 nir_src *indirect_offset,
1131 unsigned index)
1132 {
1133 /* TODO: half-floats */
1134
1135 midgard_instruction ins = m_ld_ubo_int4(dest, 0);
1136 ins.constants[0] = offset;
1137
1138 if (instr->type == nir_instr_type_intrinsic)
1139 mir_set_intr_mask(instr, &ins, true);
1140
1141 if (indirect_offset) {
1142 ins.src[2] = nir_src_index(ctx, indirect_offset);
1143 ins.load_store.arg_2 = 0x80;
1144 } else {
1145 ins.load_store.arg_2 = 0x1E;
1146 }
1147
1148 ins.load_store.arg_1 = index;
1149
1150 return emit_mir_instruction(ctx, ins);
1151 }
1152
1153 /* SSBO reads are like UBO reads if you squint */
1154
1155 static void
1156 emit_ssbo_access(
1157 compiler_context *ctx,
1158 nir_instr *instr,
1159 bool is_read,
1160 unsigned srcdest,
1161 unsigned offset,
1162 nir_src *indirect_offset,
1163 unsigned index)
1164 {
1165 /* TODO: types */
1166
1167 midgard_instruction ins;
1168
1169 if (is_read)
1170 ins = m_ld_int4(srcdest, offset);
1171 else
1172 ins = m_st_int4(srcdest, offset);
1173
1174 /* SSBO reads use a generic memory read interface, so we need the
1175 * address of the SSBO as the first argument. This is a sysval. */
1176
1177 unsigned addr = make_compiler_temp(ctx);
1178 emit_sysval_read(ctx, instr, addr, 2);
1179
1180 /* The source array:
1181 *
1182 * src[0] = store ? value : unused
1183 * src[1] = arg_1
1184 * src[2] = arg_2
1185 *
1186 * We would like arg_1 = the address and
1187 * arg_2 = the offset.
1188 */
1189
1190 ins.src[1] = addr;
1191
1192 /* TODO: What is this? It looks superficially like a shift << 5, but
1193 * arg_1 doesn't take a shift Should it be E0 or A0? We also need the
1194 * indirect offset. */
1195
1196 if (indirect_offset) {
1197 ins.load_store.arg_1 |= 0xE0;
1198 ins.src[2] = nir_src_index(ctx, indirect_offset);
1199 } else {
1200 ins.load_store.arg_2 = 0x7E;
1201 }
1202
1203 /* TODO: Bounds check */
1204
1205 /* Finally, we emit the direct offset */
1206
1207 ins.load_store.varying_parameters = (offset & 0x1FF) << 1;
1208 ins.load_store.address = (offset >> 9);
1209 mir_set_intr_mask(instr, &ins, is_read);
1210
1211 emit_mir_instruction(ctx, ins);
1212 }
1213
1214 static void
1215 emit_varying_read(
1216 compiler_context *ctx,
1217 unsigned dest, unsigned offset,
1218 unsigned nr_comp, unsigned component,
1219 nir_src *indirect_offset, nir_alu_type type, bool flat)
1220 {
1221 /* XXX: Half-floats? */
1222 /* TODO: swizzle, mask */
1223
1224 midgard_instruction ins = m_ld_vary_32(dest, offset);
1225 ins.mask = mask_of(nr_comp);
1226
1227 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1228 ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
1229
1230 midgard_varying_parameter p = {
1231 .is_varying = 1,
1232 .interpolation = midgard_interp_default,
1233 .flat = flat,
1234 };
1235
1236 unsigned u;
1237 memcpy(&u, &p, sizeof(p));
1238 ins.load_store.varying_parameters = u;
1239
1240 if (indirect_offset)
1241 ins.src[2] = nir_src_index(ctx, indirect_offset);
1242 else
1243 ins.load_store.arg_2 = 0x1E;
1244
1245 ins.load_store.arg_1 = 0x9E;
1246
1247 /* Use the type appropriate load */
1248 switch (type) {
1249 case nir_type_uint:
1250 case nir_type_bool:
1251 ins.load_store.op = midgard_op_ld_vary_32u;
1252 break;
1253 case nir_type_int:
1254 ins.load_store.op = midgard_op_ld_vary_32i;
1255 break;
1256 case nir_type_float:
1257 ins.load_store.op = midgard_op_ld_vary_32;
1258 break;
1259 default:
1260 unreachable("Attempted to load unknown type");
1261 break;
1262 }
1263
1264 emit_mir_instruction(ctx, ins);
1265 }
1266
1267 static void
1268 emit_attr_read(
1269 compiler_context *ctx,
1270 unsigned dest, unsigned offset,
1271 unsigned nr_comp, nir_alu_type t)
1272 {
1273 midgard_instruction ins = m_ld_attr_32(dest, offset);
1274 ins.load_store.arg_1 = 0x1E;
1275 ins.load_store.arg_2 = 0x1E;
1276 ins.mask = mask_of(nr_comp);
1277
1278 /* Use the type appropriate load */
1279 switch (t) {
1280 case nir_type_uint:
1281 case nir_type_bool:
1282 ins.load_store.op = midgard_op_ld_attr_32u;
1283 break;
1284 case nir_type_int:
1285 ins.load_store.op = midgard_op_ld_attr_32i;
1286 break;
1287 case nir_type_float:
1288 ins.load_store.op = midgard_op_ld_attr_32;
1289 break;
1290 default:
1291 unreachable("Attempted to load unknown type");
1292 break;
1293 }
1294
1295 emit_mir_instruction(ctx, ins);
1296 }
1297
1298 void
1299 emit_sysval_read(compiler_context *ctx, nir_instr *instr, signed dest_override,
1300 unsigned nr_components)
1301 {
1302 unsigned dest = 0;
1303
1304 /* Figure out which uniform this is */
1305 int sysval = sysval_for_instr(ctx, instr, &dest);
1306 void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
1307
1308 if (dest_override >= 0)
1309 dest = dest_override;
1310
1311 /* Sysvals are prefix uniforms */
1312 unsigned uniform = ((uintptr_t) val) - 1;
1313
1314 /* Emit the read itself -- this is never indirect */
1315 midgard_instruction *ins =
1316 emit_ubo_read(ctx, instr, dest, uniform * 16, NULL, 0);
1317
1318 ins->mask = mask_of(nr_components);
1319 }
1320
1321 static unsigned
1322 compute_builtin_arg(nir_op op)
1323 {
1324 switch (op) {
1325 case nir_intrinsic_load_work_group_id:
1326 return 0x14;
1327 case nir_intrinsic_load_local_invocation_id:
1328 return 0x10;
1329 default:
1330 unreachable("Invalid compute paramater loaded");
1331 }
1332 }
1333
1334 static void
1335 emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
1336 {
1337 emit_explicit_constant(ctx, src, src);
1338
1339 struct midgard_instruction ins =
1340 v_branch(false, false);
1341
1342 ins.writeout = true;
1343
1344 /* Add dependencies */
1345 ins.src[0] = src;
1346 ins.constants[0] = rt * 0x100;
1347
1348 /* Emit the branch */
1349 midgard_instruction *br = emit_mir_instruction(ctx, ins);
1350 schedule_barrier(ctx);
1351
1352 assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1353 assert(!ctx->writeout_branch[rt]);
1354 ctx->writeout_branch[rt] = br;
1355
1356 /* Push our current location = current block count - 1 = where we'll
1357 * jump to. Maybe a bit too clever for my own good */
1358
1359 br->branch.target_block = ctx->block_count - 1;
1360 }
1361
1362 static void
1363 emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1364 {
1365 unsigned reg = nir_dest_index(ctx, &instr->dest);
1366 midgard_instruction ins = m_ld_compute_id(reg, 0);
1367 ins.mask = mask_of(3);
1368 ins.load_store.arg_1 = compute_builtin_arg(instr->intrinsic);
1369 emit_mir_instruction(ctx, ins);
1370 }
1371
1372 static unsigned
1373 vertex_builtin_arg(nir_op op)
1374 {
1375 switch (op) {
1376 case nir_intrinsic_load_vertex_id:
1377 return PAN_VERTEX_ID;
1378 case nir_intrinsic_load_instance_id:
1379 return PAN_INSTANCE_ID;
1380 default:
1381 unreachable("Invalid vertex builtin");
1382 }
1383 }
1384
1385 static void
1386 emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1387 {
1388 unsigned reg = nir_dest_index(ctx, &instr->dest);
1389 emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
1390 }
1391
1392 static void
1393 emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1394 {
1395 unsigned offset = 0, reg;
1396
1397 switch (instr->intrinsic) {
1398 case nir_intrinsic_discard_if:
1399 case nir_intrinsic_discard: {
1400 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1401 struct midgard_instruction discard = v_branch(conditional, false);
1402 discard.branch.target_type = TARGET_DISCARD;
1403
1404 if (conditional)
1405 discard.src[0] = nir_src_index(ctx, &instr->src[0]);
1406
1407 emit_mir_instruction(ctx, discard);
1408 schedule_barrier(ctx);
1409
1410 break;
1411 }
1412
1413 case nir_intrinsic_load_uniform:
1414 case nir_intrinsic_load_ubo:
1415 case nir_intrinsic_load_ssbo:
1416 case nir_intrinsic_load_input:
1417 case nir_intrinsic_load_interpolated_input: {
1418 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1419 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
1420 bool is_ssbo = instr->intrinsic == nir_intrinsic_load_ssbo;
1421 bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1422 bool is_interp = instr->intrinsic == nir_intrinsic_load_interpolated_input;
1423
1424 /* Get the base type of the intrinsic */
1425 /* TODO: Infer type? Does it matter? */
1426 nir_alu_type t =
1427 (is_ubo || is_ssbo) ? nir_type_uint :
1428 (is_interp) ? nir_type_float :
1429 nir_intrinsic_type(instr);
1430
1431 t = nir_alu_type_get_base_type(t);
1432
1433 if (!(is_ubo || is_ssbo)) {
1434 offset = nir_intrinsic_base(instr);
1435 }
1436
1437 unsigned nr_comp = nir_intrinsic_dest_components(instr);
1438
1439 nir_src *src_offset = nir_get_io_offset_src(instr);
1440
1441 bool direct = nir_src_is_const(*src_offset);
1442 nir_src *indirect_offset = direct ? NULL : src_offset;
1443
1444 if (direct)
1445 offset += nir_src_as_uint(*src_offset);
1446
1447 /* We may need to apply a fractional offset */
1448 int component = (is_flat || is_interp) ?
1449 nir_intrinsic_component(instr) : 0;
1450 reg = nir_dest_index(ctx, &instr->dest);
1451
1452 if (is_uniform && !ctx->is_blend) {
1453 emit_ubo_read(ctx, &instr->instr, reg, (ctx->sysval_count + offset) * 16, indirect_offset, 0);
1454 } else if (is_ubo) {
1455 nir_src index = instr->src[0];
1456
1457 /* We don't yet support indirect UBOs. For indirect
1458 * block numbers (if that's possible), we don't know
1459 * enough about the hardware yet. For indirect sources,
1460 * we know what we need but we need to add some NIR
1461 * support for lowering correctly with respect to
1462 * 128-bit reads */
1463
1464 assert(nir_src_is_const(index));
1465 assert(nir_src_is_const(*src_offset));
1466
1467 uint32_t uindex = nir_src_as_uint(index) + 1;
1468 emit_ubo_read(ctx, &instr->instr, reg, offset, NULL, uindex);
1469 } else if (is_ssbo) {
1470 nir_src index = instr->src[0];
1471 assert(nir_src_is_const(index));
1472 uint32_t uindex = nir_src_as_uint(index);
1473
1474 emit_ssbo_access(ctx, &instr->instr, true, reg, offset, indirect_offset, uindex);
1475 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
1476 emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t, is_flat);
1477 } else if (ctx->is_blend) {
1478 /* For blend shaders, load the input color, which is
1479 * preloaded to r0 */
1480
1481 midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), reg);
1482 emit_mir_instruction(ctx, move);
1483 schedule_barrier(ctx);
1484 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1485 emit_attr_read(ctx, reg, offset, nr_comp, t);
1486 } else {
1487 DBG("Unknown load\n");
1488 assert(0);
1489 }
1490
1491 break;
1492 }
1493
1494 /* Artefact of load_interpolated_input. TODO: other barycentric modes */
1495 case nir_intrinsic_load_barycentric_pixel:
1496 break;
1497
1498 /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1499
1500 case nir_intrinsic_load_raw_output_pan:
1501 case nir_intrinsic_load_output_u8_as_fp16_pan:
1502 reg = nir_dest_index(ctx, &instr->dest);
1503 assert(ctx->is_blend);
1504
1505 /* T720 and below use different blend opcodes with slightly
1506 * different semantics than T760 and up */
1507
1508 midgard_instruction ld = m_ld_color_buffer_8(reg, 0);
1509 bool old_blend = ctx->quirks & MIDGARD_OLD_BLEND;
1510
1511 if (instr->intrinsic == nir_intrinsic_load_output_u8_as_fp16_pan) {
1512 ld.load_store.op = old_blend ?
1513 midgard_op_ld_color_buffer_u8_as_fp16_old :
1514 midgard_op_ld_color_buffer_u8_as_fp16;
1515
1516 if (old_blend) {
1517 ld.load_store.address = 1;
1518 ld.load_store.arg_2 = 0x1E;
1519 }
1520
1521 for (unsigned c = 2; c < 16; ++c)
1522 ld.swizzle[0][c] = 0;
1523 }
1524
1525 emit_mir_instruction(ctx, ld);
1526 break;
1527
1528 case nir_intrinsic_load_blend_const_color_rgba: {
1529 assert(ctx->is_blend);
1530 reg = nir_dest_index(ctx, &instr->dest);
1531
1532 /* Blend constants are embedded directly in the shader and
1533 * patched in, so we use some magic routing */
1534
1535 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
1536 ins.has_constants = true;
1537 ins.has_blend_constant = true;
1538 emit_mir_instruction(ctx, ins);
1539 break;
1540 }
1541
1542 case nir_intrinsic_store_output:
1543 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1544
1545 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
1546
1547 reg = nir_src_index(ctx, &instr->src[0]);
1548
1549 if (ctx->stage == MESA_SHADER_FRAGMENT) {
1550 /* Determine number of render targets */
1551 emit_fragment_store(ctx, reg, offset);
1552 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1553 /* We should have been vectorized, though we don't
1554 * currently check that st_vary is emitted only once
1555 * per slot (this is relevant, since there's not a mask
1556 * parameter available on the store [set to 0 by the
1557 * blob]). We do respect the component by adjusting the
1558 * swizzle. If this is a constant source, we'll need to
1559 * emit that explicitly. */
1560
1561 emit_explicit_constant(ctx, reg, reg);
1562
1563 unsigned component = nir_intrinsic_component(instr);
1564 unsigned nr_comp = nir_src_num_components(instr->src[0]);
1565
1566 midgard_instruction st = m_st_vary_32(reg, offset);
1567 st.load_store.arg_1 = 0x9E;
1568 st.load_store.arg_2 = 0x1E;
1569
1570 switch (nir_alu_type_get_base_type(nir_intrinsic_type(instr))) {
1571 case nir_type_uint:
1572 case nir_type_bool:
1573 st.load_store.op = midgard_op_st_vary_32u;
1574 break;
1575 case nir_type_int:
1576 st.load_store.op = midgard_op_st_vary_32i;
1577 break;
1578 case nir_type_float:
1579 st.load_store.op = midgard_op_st_vary_32;
1580 break;
1581 default:
1582 unreachable("Attempted to store unknown type");
1583 break;
1584 }
1585
1586 for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle[0]); ++i)
1587 st.swizzle[0][i] = MIN2(i + component, nr_comp);
1588
1589 emit_mir_instruction(ctx, st);
1590 } else {
1591 DBG("Unknown store\n");
1592 assert(0);
1593 }
1594
1595 break;
1596
1597 /* Special case of store_output for lowered blend shaders */
1598 case nir_intrinsic_store_raw_output_pan:
1599 assert (ctx->stage == MESA_SHADER_FRAGMENT);
1600 reg = nir_src_index(ctx, &instr->src[0]);
1601
1602 if (ctx->quirks & MIDGARD_OLD_BLEND) {
1603 /* Suppose reg = qr0.xyzw. That means 4 8-bit ---> 1 32-bit. So
1604 * reg = r0.x. We want to splatter. So we can do a 32-bit move
1605 * of:
1606 *
1607 * imov r0.xyzw, r0.xxxx
1608 */
1609
1610 unsigned expanded = make_compiler_temp(ctx);
1611
1612 midgard_instruction splatter = v_mov(reg, expanded);
1613
1614 for (unsigned c = 0; c < 16; ++c)
1615 splatter.swizzle[1][c] = 0;
1616
1617 emit_mir_instruction(ctx, splatter);
1618 emit_fragment_store(ctx, expanded, ctx->blend_rt);
1619 } else
1620 emit_fragment_store(ctx, reg, ctx->blend_rt);
1621
1622 break;
1623
1624 case nir_intrinsic_store_ssbo:
1625 assert(nir_src_is_const(instr->src[1]));
1626
1627 bool direct_offset = nir_src_is_const(instr->src[2]);
1628 offset = direct_offset ? nir_src_as_uint(instr->src[2]) : 0;
1629 nir_src *indirect_offset = direct_offset ? NULL : &instr->src[2];
1630 reg = nir_src_index(ctx, &instr->src[0]);
1631
1632 uint32_t uindex = nir_src_as_uint(instr->src[1]);
1633
1634 emit_explicit_constant(ctx, reg, reg);
1635 emit_ssbo_access(ctx, &instr->instr, false, reg, offset, indirect_offset, uindex);
1636 break;
1637
1638 case nir_intrinsic_load_viewport_scale:
1639 case nir_intrinsic_load_viewport_offset:
1640 case nir_intrinsic_load_num_work_groups:
1641 case nir_intrinsic_load_sampler_lod_parameters_pan:
1642 emit_sysval_read(ctx, &instr->instr, ~0, 3);
1643 break;
1644
1645 case nir_intrinsic_load_work_group_id:
1646 case nir_intrinsic_load_local_invocation_id:
1647 emit_compute_builtin(ctx, instr);
1648 break;
1649
1650 case nir_intrinsic_load_vertex_id:
1651 case nir_intrinsic_load_instance_id:
1652 emit_vertex_builtin(ctx, instr);
1653 break;
1654
1655 default:
1656 printf ("Unhandled intrinsic\n");
1657 assert(0);
1658 break;
1659 }
1660 }
1661
1662 static unsigned
1663 midgard_tex_format(enum glsl_sampler_dim dim)
1664 {
1665 switch (dim) {
1666 case GLSL_SAMPLER_DIM_1D:
1667 case GLSL_SAMPLER_DIM_BUF:
1668 return MALI_TEX_1D;
1669
1670 case GLSL_SAMPLER_DIM_2D:
1671 case GLSL_SAMPLER_DIM_EXTERNAL:
1672 case GLSL_SAMPLER_DIM_RECT:
1673 return MALI_TEX_2D;
1674
1675 case GLSL_SAMPLER_DIM_3D:
1676 return MALI_TEX_3D;
1677
1678 case GLSL_SAMPLER_DIM_CUBE:
1679 return MALI_TEX_CUBE;
1680
1681 default:
1682 DBG("Unknown sampler dim type\n");
1683 assert(0);
1684 return 0;
1685 }
1686 }
1687
1688 /* Tries to attach an explicit LOD / bias as a constant. Returns whether this
1689 * was successful */
1690
1691 static bool
1692 pan_attach_constant_bias(
1693 compiler_context *ctx,
1694 nir_src lod,
1695 midgard_texture_word *word)
1696 {
1697 /* To attach as constant, it has to *be* constant */
1698
1699 if (!nir_src_is_const(lod))
1700 return false;
1701
1702 float f = nir_src_as_float(lod);
1703
1704 /* Break into fixed-point */
1705 signed lod_int = f;
1706 float lod_frac = f - lod_int;
1707
1708 /* Carry over negative fractions */
1709 if (lod_frac < 0.0) {
1710 lod_int--;
1711 lod_frac += 1.0;
1712 }
1713
1714 /* Encode */
1715 word->bias = float_to_ubyte(lod_frac);
1716 word->bias_int = lod_int;
1717
1718 return true;
1719 }
1720
1721 static enum mali_sampler_type
1722 midgard_sampler_type(nir_alu_type t) {
1723 switch (nir_alu_type_get_base_type(t))
1724 {
1725 case nir_type_float:
1726 return MALI_SAMPLER_FLOAT;
1727 case nir_type_int:
1728 return MALI_SAMPLER_SIGNED;
1729 case nir_type_uint:
1730 return MALI_SAMPLER_UNSIGNED;
1731 default:
1732 unreachable("Unknown sampler type");
1733 }
1734 }
1735
1736 static void
1737 emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
1738 unsigned midgard_texop)
1739 {
1740 /* TODO */
1741 //assert (!instr->sampler);
1742 //assert (!instr->texture_array_size);
1743
1744 int texture_index = instr->texture_index;
1745 int sampler_index = texture_index;
1746
1747 /* No helper to build texture words -- we do it all here */
1748 midgard_instruction ins = {
1749 .type = TAG_TEXTURE_4,
1750 .mask = 0xF,
1751 .dest = nir_dest_index(ctx, &instr->dest),
1752 .src = { ~0, ~0, ~0, ~0 },
1753 .swizzle = SWIZZLE_IDENTITY_4,
1754 .texture = {
1755 .op = midgard_texop,
1756 .format = midgard_tex_format(instr->sampler_dim),
1757 .texture_handle = texture_index,
1758 .sampler_handle = sampler_index,
1759
1760 /* TODO: half */
1761 .in_reg_full = 1,
1762 .out_full = 1,
1763
1764 .sampler_type = midgard_sampler_type(instr->dest_type),
1765 .shadow = instr->is_shadow,
1766 }
1767 };
1768
1769 /* We may need a temporary for the coordinate */
1770
1771 bool needs_temp_coord =
1772 (midgard_texop == TEXTURE_OP_TEXEL_FETCH) ||
1773 (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) ||
1774 (instr->is_shadow);
1775
1776 unsigned coords = needs_temp_coord ? make_compiler_temp_reg(ctx) : 0;
1777
1778 for (unsigned i = 0; i < instr->num_srcs; ++i) {
1779 int index = nir_src_index(ctx, &instr->src[i].src);
1780 unsigned nr_components = nir_src_num_components(instr->src[i].src);
1781
1782 switch (instr->src[i].src_type) {
1783 case nir_tex_src_coord: {
1784 emit_explicit_constant(ctx, index, index);
1785
1786 unsigned coord_mask = mask_of(instr->coord_components);
1787
1788 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
1789 /* texelFetch is undefined on samplerCube */
1790 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
1791
1792 /* For cubemaps, we use a special ld/st op to
1793 * select the face and copy the xy into the
1794 * texture register */
1795
1796 midgard_instruction ld = m_ld_cubemap_coords(coords, 0);
1797 ld.src[1] = index;
1798 ld.mask = 0x3; /* xy */
1799 ld.load_store.arg_1 = 0x20;
1800 ld.swizzle[1][3] = COMPONENT_X;
1801 emit_mir_instruction(ctx, ld);
1802
1803 /* xyzw -> xyxx */
1804 ins.swizzle[1][2] = instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
1805 ins.swizzle[1][3] = COMPONENT_X;
1806 } else if (needs_temp_coord) {
1807 /* mov coord_temp, coords */
1808 midgard_instruction mov = v_mov(index, coords);
1809 mov.mask = coord_mask;
1810 emit_mir_instruction(ctx, mov);
1811 } else {
1812 coords = index;
1813 }
1814
1815 ins.src[1] = coords;
1816
1817 /* Texelfetch coordinates uses all four elements
1818 * (xyz/index) regardless of texture dimensionality,
1819 * which means it's necessary to zero the unused
1820 * components to keep everything happy */
1821
1822 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
1823 /* mov index.zw, #0, or generalized */
1824 midgard_instruction mov =
1825 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), coords);
1826 mov.has_constants = true;
1827 mov.mask = coord_mask ^ 0xF;
1828 emit_mir_instruction(ctx, mov);
1829 }
1830
1831 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
1832 /* Array component in w but NIR wants it in z */
1833 if (nr_components == 3) {
1834 ins.swizzle[1][2] = COMPONENT_Z;
1835 ins.swizzle[1][3] = COMPONENT_Z;
1836 } else if (nr_components == 2) {
1837 ins.swizzle[1][2] =
1838 instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
1839 ins.swizzle[1][3] = COMPONENT_X;
1840 } else
1841 unreachable("Invalid texture 2D components");
1842 }
1843
1844 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
1845 /* We zeroed */
1846 ins.swizzle[1][2] = COMPONENT_Z;
1847 ins.swizzle[1][3] = COMPONENT_W;
1848 }
1849
1850 break;
1851 }
1852
1853 case nir_tex_src_bias:
1854 case nir_tex_src_lod: {
1855 /* Try as a constant if we can */
1856
1857 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
1858 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
1859 break;
1860
1861 ins.texture.lod_register = true;
1862 ins.src[2] = index;
1863
1864 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
1865 ins.swizzle[2][c] = COMPONENT_X;
1866
1867 emit_explicit_constant(ctx, index, index);
1868
1869 break;
1870 };
1871
1872 case nir_tex_src_offset: {
1873 ins.texture.offset_register = true;
1874 ins.src[3] = index;
1875
1876 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
1877 ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
1878
1879 emit_explicit_constant(ctx, index, index);
1880 break;
1881 };
1882
1883 case nir_tex_src_comparator: {
1884 unsigned comp = COMPONENT_Z;
1885
1886 /* mov coord_temp.foo, coords */
1887 midgard_instruction mov = v_mov(index, coords);
1888 mov.mask = 1 << comp;
1889
1890 for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i)
1891 mov.swizzle[1][i] = COMPONENT_X;
1892
1893 emit_mir_instruction(ctx, mov);
1894 break;
1895 }
1896
1897 default:
1898 unreachable("Unknown texture source type\n");
1899 }
1900 }
1901
1902 emit_mir_instruction(ctx, ins);
1903
1904 /* Used for .cont and .last hinting */
1905 ctx->texture_op_count++;
1906 }
1907
1908 static void
1909 emit_tex(compiler_context *ctx, nir_tex_instr *instr)
1910 {
1911 switch (instr->op) {
1912 case nir_texop_tex:
1913 case nir_texop_txb:
1914 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
1915 break;
1916 case nir_texop_txl:
1917 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
1918 break;
1919 case nir_texop_txf:
1920 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
1921 break;
1922 case nir_texop_txs:
1923 emit_sysval_read(ctx, &instr->instr, ~0, 4);
1924 break;
1925 default:
1926 unreachable("Unhanlded texture op");
1927 }
1928 }
1929
1930 static void
1931 emit_jump(compiler_context *ctx, nir_jump_instr *instr)
1932 {
1933 switch (instr->type) {
1934 case nir_jump_break: {
1935 /* Emit a branch out of the loop */
1936 struct midgard_instruction br = v_branch(false, false);
1937 br.branch.target_type = TARGET_BREAK;
1938 br.branch.target_break = ctx->current_loop_depth;
1939 emit_mir_instruction(ctx, br);
1940 break;
1941 }
1942
1943 default:
1944 DBG("Unknown jump type %d\n", instr->type);
1945 break;
1946 }
1947 }
1948
1949 static void
1950 emit_instr(compiler_context *ctx, struct nir_instr *instr)
1951 {
1952 switch (instr->type) {
1953 case nir_instr_type_load_const:
1954 emit_load_const(ctx, nir_instr_as_load_const(instr));
1955 break;
1956
1957 case nir_instr_type_intrinsic:
1958 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
1959 break;
1960
1961 case nir_instr_type_alu:
1962 emit_alu(ctx, nir_instr_as_alu(instr));
1963 break;
1964
1965 case nir_instr_type_tex:
1966 emit_tex(ctx, nir_instr_as_tex(instr));
1967 break;
1968
1969 case nir_instr_type_jump:
1970 emit_jump(ctx, nir_instr_as_jump(instr));
1971 break;
1972
1973 case nir_instr_type_ssa_undef:
1974 /* Spurious */
1975 break;
1976
1977 default:
1978 DBG("Unhandled instruction type\n");
1979 break;
1980 }
1981 }
1982
1983
1984 /* ALU instructions can inline or embed constants, which decreases register
1985 * pressure and saves space. */
1986
1987 #define CONDITIONAL_ATTACH(idx) { \
1988 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
1989 \
1990 if (entry) { \
1991 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
1992 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
1993 } \
1994 }
1995
1996 static void
1997 inline_alu_constants(compiler_context *ctx, midgard_block *block)
1998 {
1999 mir_foreach_instr_in_block(block, alu) {
2000 /* Other instructions cannot inline constants */
2001 if (alu->type != TAG_ALU_4) continue;
2002 if (alu->compact_branch) continue;
2003
2004 /* If there is already a constant here, we can do nothing */
2005 if (alu->has_constants) continue;
2006
2007 CONDITIONAL_ATTACH(0);
2008
2009 if (!alu->has_constants) {
2010 CONDITIONAL_ATTACH(1)
2011 } else if (!alu->inline_constant) {
2012 /* Corner case: _two_ vec4 constants, for instance with a
2013 * csel. For this case, we can only use a constant
2014 * register for one, we'll have to emit a move for the
2015 * other. Note, if both arguments are constants, then
2016 * necessarily neither argument depends on the value of
2017 * any particular register. As the destination register
2018 * will be wiped, that means we can spill the constant
2019 * to the destination register.
2020 */
2021
2022 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2023 unsigned scratch = alu->dest;
2024
2025 if (entry) {
2026 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
2027 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2028
2029 /* Set the source */
2030 alu->src[1] = scratch;
2031
2032 /* Inject us -before- the last instruction which set r31 */
2033 mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
2034 }
2035 }
2036 }
2037 }
2038
2039 /* Being a little silly with the names, but returns the op that is the bitwise
2040 * inverse of the op with the argument switched. I.e. (f and g are
2041 * contrapositives):
2042 *
2043 * f(a, b) = ~g(b, a)
2044 *
2045 * Corollary: if g is the contrapositve of f, f is the contrapositive of g:
2046 *
2047 * f(a, b) = ~g(b, a)
2048 * ~f(a, b) = g(b, a)
2049 * ~f(a, b) = ~h(a, b) where h is the contrapositive of g
2050 * f(a, b) = h(a, b)
2051 *
2052 * Thus we define this function in pairs.
2053 */
2054
2055 static inline midgard_alu_op
2056 mir_contrapositive(midgard_alu_op op)
2057 {
2058 switch (op) {
2059 case midgard_alu_op_flt:
2060 return midgard_alu_op_fle;
2061 case midgard_alu_op_fle:
2062 return midgard_alu_op_flt;
2063
2064 case midgard_alu_op_ilt:
2065 return midgard_alu_op_ile;
2066 case midgard_alu_op_ile:
2067 return midgard_alu_op_ilt;
2068
2069 default:
2070 unreachable("No known contrapositive");
2071 }
2072 }
2073
2074 /* Midgard supports two types of constants, embedded constants (128-bit) and
2075 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2076 * constants can be demoted to inline constants, for space savings and
2077 * sometimes a performance boost */
2078
2079 static void
2080 embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
2081 {
2082 mir_foreach_instr_in_block(block, ins) {
2083 if (!ins->has_constants) continue;
2084 if (ins->has_inline_constant) continue;
2085
2086 /* Blend constants must not be inlined by definition */
2087 if (ins->has_blend_constant) continue;
2088
2089 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
2090 bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16;
2091 bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32;
2092
2093 if (!(is_16 || is_32))
2094 continue;
2095
2096 /* src1 cannot be an inline constant due to encoding
2097 * restrictions. So, if possible we try to flip the arguments
2098 * in that case */
2099
2100 int op = ins->alu.op;
2101
2102 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2103 bool flip = alu_opcode_props[op].props & OP_COMMUTES;
2104
2105 switch (op) {
2106 /* Conditionals can be inverted */
2107 case midgard_alu_op_flt:
2108 case midgard_alu_op_ilt:
2109 case midgard_alu_op_fle:
2110 case midgard_alu_op_ile:
2111 ins->alu.op = mir_contrapositive(ins->alu.op);
2112 ins->invert = true;
2113 flip = true;
2114 break;
2115
2116 case midgard_alu_op_fcsel:
2117 case midgard_alu_op_icsel:
2118 DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
2119 default:
2120 break;
2121 }
2122
2123 if (flip)
2124 mir_flip(ins);
2125 }
2126
2127 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2128 /* Extract the source information */
2129
2130 midgard_vector_alu_src *src;
2131 int q = ins->alu.src2;
2132 midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q;
2133 src = m;
2134
2135 /* Component is from the swizzle. Take a nonzero component */
2136 assert(ins->mask);
2137 unsigned first_comp = ffs(ins->mask) - 1;
2138 unsigned component = ins->swizzle[1][first_comp];
2139
2140 /* Scale constant appropriately, if we can legally */
2141 uint16_t scaled_constant = 0;
2142
2143 if (midgard_is_integer_op(op) || is_16) {
2144 unsigned int *iconstants = (unsigned int *) ins->constants;
2145 scaled_constant = (uint16_t) iconstants[component];
2146
2147 /* Constant overflow after resize */
2148 if (scaled_constant != iconstants[component])
2149 continue;
2150 } else {
2151 float *f = (float *) ins->constants;
2152 float original = f[component];
2153 scaled_constant = _mesa_float_to_half(original);
2154
2155 /* Check for loss of precision. If this is
2156 * mediump, we don't care, but for a highp
2157 * shader, we need to pay attention. NIR
2158 * doesn't yet tell us which mode we're in!
2159 * Practically this prevents most constants
2160 * from being inlined, sadly. */
2161
2162 float fp32 = _mesa_half_to_float(scaled_constant);
2163
2164 if (fp32 != original)
2165 continue;
2166 }
2167
2168 /* We don't know how to handle these with a constant */
2169
2170 if (mir_nontrivial_source2_mod_simple(ins) || src->rep_low || src->rep_high) {
2171 DBG("Bailing inline constant...\n");
2172 continue;
2173 }
2174
2175 /* Make sure that the constant is not itself a vector
2176 * by checking if all accessed values are the same. */
2177
2178 uint32_t *cons = ins->constants;
2179 uint32_t value = cons[component];
2180
2181 bool is_vector = false;
2182 unsigned mask = effective_writemask(&ins->alu, ins->mask);
2183
2184 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
2185 /* We only care if this component is actually used */
2186 if (!(mask & (1 << c)))
2187 continue;
2188
2189 uint32_t test = cons[ins->swizzle[1][c]];
2190
2191 if (test != value) {
2192 is_vector = true;
2193 break;
2194 }
2195 }
2196
2197 if (is_vector)
2198 continue;
2199
2200 /* Get rid of the embedded constant */
2201 ins->has_constants = false;
2202 ins->src[1] = ~0;
2203 ins->has_inline_constant = true;
2204 ins->inline_constant = scaled_constant;
2205 }
2206 }
2207 }
2208
2209 /* Dead code elimination for branches at the end of a block - only one branch
2210 * per block is legal semantically */
2211
2212 static void
2213 midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
2214 {
2215 bool branched = false;
2216
2217 mir_foreach_instr_in_block_safe(block, ins) {
2218 if (!midgard_is_branch_unit(ins->unit)) continue;
2219
2220 if (branched)
2221 mir_remove_instruction(ins);
2222
2223 branched = true;
2224 }
2225 }
2226
2227 /* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
2228 * the move can be propagated away entirely */
2229
2230 static bool
2231 mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
2232 {
2233 /* Nothing to do */
2234 if (comp == midgard_outmod_none)
2235 return true;
2236
2237 if (*outmod == midgard_outmod_none) {
2238 *outmod = comp;
2239 return true;
2240 }
2241
2242 /* TODO: Compose rules */
2243 return false;
2244 }
2245
2246 static bool
2247 midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
2248 {
2249 bool progress = false;
2250
2251 mir_foreach_instr_in_block_safe(block, ins) {
2252 if (ins->type != TAG_ALU_4) continue;
2253 if (ins->alu.op != midgard_alu_op_fmov) continue;
2254 if (ins->alu.outmod != midgard_outmod_pos) continue;
2255
2256 /* TODO: Registers? */
2257 unsigned src = ins->src[1];
2258 if (src & IS_REG) continue;
2259
2260 /* There might be a source modifier, too */
2261 if (mir_nontrivial_source2_mod(ins)) continue;
2262
2263 /* Backpropagate the modifier */
2264 mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
2265 if (v->type != TAG_ALU_4) continue;
2266 if (v->dest != src) continue;
2267
2268 /* Can we even take a float outmod? */
2269 if (midgard_is_integer_out_op(v->alu.op)) continue;
2270
2271 midgard_outmod_float temp = v->alu.outmod;
2272 progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
2273
2274 /* Throw in the towel.. */
2275 if (!progress) break;
2276
2277 /* Otherwise, transfer the modifier */
2278 v->alu.outmod = temp;
2279 ins->alu.outmod = midgard_outmod_none;
2280
2281 break;
2282 }
2283 }
2284
2285 return progress;
2286 }
2287
2288 static unsigned
2289 emit_fragment_epilogue(compiler_context *ctx, unsigned rt)
2290 {
2291 /* Loop to ourselves */
2292
2293 struct midgard_instruction ins = v_branch(false, false);
2294 ins.writeout = true;
2295 ins.branch.target_block = ctx->block_count - 1;
2296 ins.constants[0] = rt * 0x100;
2297 emit_mir_instruction(ctx, ins);
2298
2299 ctx->current_block->epilogue = true;
2300 schedule_barrier(ctx);
2301 return ins.branch.target_block;
2302 }
2303
2304 static midgard_block *
2305 emit_block(compiler_context *ctx, nir_block *block)
2306 {
2307 midgard_block *this_block = ctx->after_block;
2308 ctx->after_block = NULL;
2309
2310 if (!this_block)
2311 this_block = create_empty_block(ctx);
2312
2313 list_addtail(&this_block->link, &ctx->blocks);
2314
2315 this_block->is_scheduled = false;
2316 ++ctx->block_count;
2317
2318 /* Set up current block */
2319 list_inithead(&this_block->instructions);
2320 ctx->current_block = this_block;
2321
2322 nir_foreach_instr(instr, block) {
2323 emit_instr(ctx, instr);
2324 ++ctx->instruction_count;
2325 }
2326
2327 return this_block;
2328 }
2329
2330 static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2331
2332 static void
2333 emit_if(struct compiler_context *ctx, nir_if *nif)
2334 {
2335 midgard_block *before_block = ctx->current_block;
2336
2337 /* Speculatively emit the branch, but we can't fill it in until later */
2338 EMIT(branch, true, true);
2339 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
2340 then_branch->src[0] = nir_src_index(ctx, &nif->condition);
2341
2342 /* Emit the two subblocks. */
2343 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
2344 midgard_block *end_then_block = ctx->current_block;
2345
2346 /* Emit a jump from the end of the then block to the end of the else */
2347 EMIT(branch, false, false);
2348 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2349
2350 /* Emit second block, and check if it's empty */
2351
2352 int else_idx = ctx->block_count;
2353 int count_in = ctx->instruction_count;
2354 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
2355 midgard_block *end_else_block = ctx->current_block;
2356 int after_else_idx = ctx->block_count;
2357
2358 /* Now that we have the subblocks emitted, fix up the branches */
2359
2360 assert(then_block);
2361 assert(else_block);
2362
2363 if (ctx->instruction_count == count_in) {
2364 /* The else block is empty, so don't emit an exit jump */
2365 mir_remove_instruction(then_exit);
2366 then_branch->branch.target_block = after_else_idx;
2367 } else {
2368 then_branch->branch.target_block = else_idx;
2369 then_exit->branch.target_block = after_else_idx;
2370 }
2371
2372 /* Wire up the successors */
2373
2374 ctx->after_block = create_empty_block(ctx);
2375
2376 midgard_block_add_successor(before_block, then_block);
2377 midgard_block_add_successor(before_block, else_block);
2378
2379 midgard_block_add_successor(end_then_block, ctx->after_block);
2380 midgard_block_add_successor(end_else_block, ctx->after_block);
2381 }
2382
2383 static void
2384 emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2385 {
2386 /* Remember where we are */
2387 midgard_block *start_block = ctx->current_block;
2388
2389 /* Allocate a loop number, growing the current inner loop depth */
2390 int loop_idx = ++ctx->current_loop_depth;
2391
2392 /* Get index from before the body so we can loop back later */
2393 int start_idx = ctx->block_count;
2394
2395 /* Emit the body itself */
2396 midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
2397
2398 /* Branch back to loop back */
2399 struct midgard_instruction br_back = v_branch(false, false);
2400 br_back.branch.target_block = start_idx;
2401 emit_mir_instruction(ctx, br_back);
2402
2403 /* Mark down that branch in the graph. */
2404 midgard_block_add_successor(start_block, loop_block);
2405 midgard_block_add_successor(ctx->current_block, loop_block);
2406
2407 /* Find the index of the block about to follow us (note: we don't add
2408 * one; blocks are 0-indexed so we get a fencepost problem) */
2409 int break_block_idx = ctx->block_count;
2410
2411 /* Fix up the break statements we emitted to point to the right place,
2412 * now that we can allocate a block number for them */
2413 ctx->after_block = create_empty_block(ctx);
2414
2415 list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
2416 mir_foreach_instr_in_block(block, ins) {
2417 if (ins->type != TAG_ALU_4) continue;
2418 if (!ins->compact_branch) continue;
2419
2420 /* We found a branch -- check the type to see if we need to do anything */
2421 if (ins->branch.target_type != TARGET_BREAK) continue;
2422
2423 /* It's a break! Check if it's our break */
2424 if (ins->branch.target_break != loop_idx) continue;
2425
2426 /* Okay, cool, we're breaking out of this loop.
2427 * Rewrite from a break to a goto */
2428
2429 ins->branch.target_type = TARGET_GOTO;
2430 ins->branch.target_block = break_block_idx;
2431
2432 midgard_block_add_successor(block, ctx->after_block);
2433 }
2434 }
2435
2436 /* Now that we've finished emitting the loop, free up the depth again
2437 * so we play nice with recursion amid nested loops */
2438 --ctx->current_loop_depth;
2439
2440 /* Dump loop stats */
2441 ++ctx->loop_count;
2442 }
2443
2444 static midgard_block *
2445 emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2446 {
2447 midgard_block *start_block = NULL;
2448
2449 foreach_list_typed(nir_cf_node, node, node, list) {
2450 switch (node->type) {
2451 case nir_cf_node_block: {
2452 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2453
2454 if (!start_block)
2455 start_block = block;
2456
2457 break;
2458 }
2459
2460 case nir_cf_node_if:
2461 emit_if(ctx, nir_cf_node_as_if(node));
2462 break;
2463
2464 case nir_cf_node_loop:
2465 emit_loop(ctx, nir_cf_node_as_loop(node));
2466 break;
2467
2468 case nir_cf_node_function:
2469 assert(0);
2470 break;
2471 }
2472 }
2473
2474 return start_block;
2475 }
2476
2477 /* Due to lookahead, we need to report the first tag executed in the command
2478 * stream and in branch targets. An initial block might be empty, so iterate
2479 * until we find one that 'works' */
2480
2481 static unsigned
2482 midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2483 {
2484 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2485
2486 unsigned first_tag = 0;
2487
2488 mir_foreach_block_from(ctx, initial_block, v) {
2489 if (v->quadword_count) {
2490 midgard_bundle *initial_bundle =
2491 util_dynarray_element(&v->bundles, midgard_bundle, 0);
2492
2493 first_tag = initial_bundle->tag;
2494 break;
2495 }
2496 }
2497
2498 return first_tag;
2499 }
2500
2501 static unsigned
2502 pan_format_from_nir_base(nir_alu_type base)
2503 {
2504 switch (base) {
2505 case nir_type_int:
2506 return MALI_FORMAT_SINT;
2507 case nir_type_uint:
2508 case nir_type_bool:
2509 return MALI_FORMAT_UINT;
2510 case nir_type_float:
2511 return MALI_CHANNEL_FLOAT;
2512 default:
2513 unreachable("Invalid base");
2514 }
2515 }
2516
2517 static unsigned
2518 pan_format_from_nir_size(nir_alu_type base, unsigned size)
2519 {
2520 if (base == nir_type_float) {
2521 switch (size) {
2522 case 16: return MALI_FORMAT_SINT;
2523 case 32: return MALI_FORMAT_UNORM;
2524 default:
2525 unreachable("Invalid float size for format");
2526 }
2527 } else {
2528 switch (size) {
2529 case 1:
2530 case 8: return MALI_CHANNEL_8;
2531 case 16: return MALI_CHANNEL_16;
2532 case 32: return MALI_CHANNEL_32;
2533 default:
2534 unreachable("Invalid int size for format");
2535 }
2536 }
2537 }
2538
2539 static enum mali_format
2540 pan_format_from_glsl(const struct glsl_type *type)
2541 {
2542 enum glsl_base_type glsl_base = glsl_get_base_type(glsl_without_array(type));
2543 nir_alu_type t = nir_get_nir_type_for_glsl_base_type(glsl_base);
2544
2545 unsigned base = nir_alu_type_get_base_type(t);
2546 unsigned size = nir_alu_type_get_type_size(t);
2547
2548 return pan_format_from_nir_base(base) |
2549 pan_format_from_nir_size(base, size) |
2550 MALI_NR_CHANNELS(4);
2551 }
2552
2553 /* For each fragment writeout instruction, generate a writeout loop to
2554 * associate with it */
2555
2556 static void
2557 mir_add_writeout_loops(compiler_context *ctx)
2558 {
2559 for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
2560 midgard_instruction *br = ctx->writeout_branch[rt];
2561 if (!br) continue;
2562
2563 unsigned popped = br->branch.target_block;
2564 midgard_block_add_successor(mir_get_block(ctx, popped - 1), ctx->current_block);
2565 br->branch.target_block = emit_fragment_epilogue(ctx, rt);
2566
2567 /* If we have more RTs, we'll need to restore back after our
2568 * loop terminates */
2569
2570 if ((rt + 1) < ARRAY_SIZE(ctx->writeout_branch) && ctx->writeout_branch[rt + 1]) {
2571 midgard_instruction uncond = v_branch(false, false);
2572 uncond.branch.target_block = popped;
2573 emit_mir_instruction(ctx, uncond);
2574 midgard_block_add_successor(ctx->current_block, mir_get_block(ctx, popped));
2575 schedule_barrier(ctx);
2576 } else {
2577 /* We're last, so we can terminate here */
2578 br->last_writeout = true;
2579 }
2580 }
2581 }
2582
2583 int
2584 midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend, unsigned blend_rt, unsigned gpu_id, bool shaderdb)
2585 {
2586 struct util_dynarray *compiled = &program->compiled;
2587
2588 midgard_debug = debug_get_option_midgard_debug();
2589
2590 /* TODO: Bound against what? */
2591 compiler_context *ctx = rzalloc(NULL, compiler_context);
2592
2593 ctx->nir = nir;
2594 ctx->stage = nir->info.stage;
2595 ctx->is_blend = is_blend;
2596 ctx->alpha_ref = program->alpha_ref;
2597 ctx->blend_rt = blend_rt;
2598 ctx->quirks = midgard_get_quirks(gpu_id);
2599
2600 /* Start off with a safe cutoff, allowing usage of all 16 work
2601 * registers. Later, we'll promote uniform reads to uniform registers
2602 * if we determine it is beneficial to do so */
2603 ctx->uniform_cutoff = 8;
2604
2605 /* Initialize at a global (not block) level hash tables */
2606
2607 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
2608 ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
2609 ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
2610
2611 /* Record the varying mapping for the command stream's bookkeeping */
2612
2613 struct exec_list *varyings =
2614 ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
2615
2616 unsigned max_varying = 0;
2617 nir_foreach_variable(var, varyings) {
2618 unsigned loc = var->data.driver_location;
2619 unsigned sz = glsl_type_size(var->type, FALSE);
2620
2621 for (int c = 0; c < sz; ++c) {
2622 program->varyings[loc + c] = var->data.location + c;
2623 program->varying_type[loc + c] = pan_format_from_glsl(var->type);
2624 max_varying = MAX2(max_varying, loc + c);
2625 }
2626 }
2627
2628 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2629 * (so we don't accidentally duplicate the epilogue since mesa/st has
2630 * messed with our I/O quite a bit already) */
2631
2632 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2633
2634 if (ctx->stage == MESA_SHADER_VERTEX) {
2635 NIR_PASS_V(nir, nir_lower_viewport_transform);
2636 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
2637 }
2638
2639 NIR_PASS_V(nir, nir_lower_var_copies);
2640 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2641 NIR_PASS_V(nir, nir_split_var_copies);
2642 NIR_PASS_V(nir, nir_lower_var_copies);
2643 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2644 NIR_PASS_V(nir, nir_lower_var_copies);
2645 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2646
2647 NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
2648
2649 /* Optimisation passes */
2650
2651 optimise_nir(nir, ctx->quirks);
2652
2653 if (midgard_debug & MIDGARD_DBG_SHADERS) {
2654 nir_print_shader(nir, stdout);
2655 }
2656
2657 /* Assign sysvals and counts, now that we're sure
2658 * (post-optimisation) */
2659
2660 midgard_nir_assign_sysvals(ctx, nir);
2661
2662 program->uniform_count = nir->num_uniforms;
2663 program->sysval_count = ctx->sysval_count;
2664 memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
2665
2666 nir_foreach_function(func, nir) {
2667 if (!func->impl)
2668 continue;
2669
2670 list_inithead(&ctx->blocks);
2671 ctx->block_count = 0;
2672 ctx->func = func;
2673
2674 emit_cf_list(ctx, &func->impl->body);
2675 break; /* TODO: Multi-function shaders */
2676 }
2677
2678 util_dynarray_init(compiled, NULL);
2679
2680 /* Per-block lowering before opts */
2681
2682 mir_foreach_block(ctx, block) {
2683 inline_alu_constants(ctx, block);
2684 midgard_opt_promote_fmov(ctx, block);
2685 embedded_to_inline_constant(ctx, block);
2686 }
2687 /* MIR-level optimizations */
2688
2689 bool progress = false;
2690
2691 do {
2692 progress = false;
2693
2694 mir_foreach_block(ctx, block) {
2695 progress |= midgard_opt_pos_propagate(ctx, block);
2696 progress |= midgard_opt_copy_prop(ctx, block);
2697 progress |= midgard_opt_dead_code_eliminate(ctx, block);
2698 progress |= midgard_opt_combine_projection(ctx, block);
2699 progress |= midgard_opt_varying_projection(ctx, block);
2700 progress |= midgard_opt_not_propagate(ctx, block);
2701 progress |= midgard_opt_fuse_src_invert(ctx, block);
2702 progress |= midgard_opt_fuse_dest_invert(ctx, block);
2703 progress |= midgard_opt_csel_invert(ctx, block);
2704 progress |= midgard_opt_drop_cmp_invert(ctx, block);
2705 progress |= midgard_opt_invert_branch(ctx, block);
2706 }
2707 } while (progress);
2708
2709 mir_foreach_block(ctx, block) {
2710 midgard_lower_invert(ctx, block);
2711 midgard_lower_derivatives(ctx, block);
2712 }
2713
2714 /* Nested control-flow can result in dead branches at the end of the
2715 * block. This messes with our analysis and is just dead code, so cull
2716 * them */
2717 mir_foreach_block(ctx, block) {
2718 midgard_opt_cull_dead_branch(ctx, block);
2719 }
2720
2721 /* Ensure we were lowered */
2722 mir_foreach_instr_global(ctx, ins) {
2723 assert(!ins->invert);
2724 }
2725
2726 if (ctx->stage == MESA_SHADER_FRAGMENT)
2727 mir_add_writeout_loops(ctx);
2728
2729 /* Schedule! */
2730 schedule_program(ctx);
2731 mir_ra(ctx);
2732
2733 /* Now that all the bundles are scheduled and we can calculate block
2734 * sizes, emit actual branch instructions rather than placeholders */
2735
2736 int br_block_idx = 0;
2737
2738 mir_foreach_block(ctx, block) {
2739 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2740 for (int c = 0; c < bundle->instruction_count; ++c) {
2741 midgard_instruction *ins = bundle->instructions[c];
2742
2743 if (!midgard_is_branch_unit(ins->unit)) continue;
2744
2745 /* Parse some basic branch info */
2746 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
2747 bool is_conditional = ins->branch.conditional;
2748 bool is_inverted = ins->branch.invert_conditional;
2749 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
2750 bool is_writeout = ins->writeout;
2751
2752 /* Determine the block we're jumping to */
2753 int target_number = ins->branch.target_block;
2754
2755 /* Report the destination tag */
2756 int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
2757
2758 /* Count up the number of quadwords we're
2759 * jumping over = number of quadwords until
2760 * (br_block_idx, target_number) */
2761
2762 int quadword_offset = 0;
2763
2764 if (is_discard) {
2765 /* Ignored */
2766 } else if (target_number > br_block_idx) {
2767 /* Jump forward */
2768
2769 for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
2770 midgard_block *blk = mir_get_block(ctx, idx);
2771 assert(blk);
2772
2773 quadword_offset += blk->quadword_count;
2774 }
2775 } else {
2776 /* Jump backwards */
2777
2778 for (int idx = br_block_idx; idx >= target_number; --idx) {
2779 midgard_block *blk = mir_get_block(ctx, idx);
2780 assert(blk);
2781
2782 quadword_offset -= blk->quadword_count;
2783 }
2784 }
2785
2786 /* Unconditional extended branches (far jumps)
2787 * have issues, so we always use a conditional
2788 * branch, setting the condition to always for
2789 * unconditional. For compact unconditional
2790 * branches, cond isn't used so it doesn't
2791 * matter what we pick. */
2792
2793 midgard_condition cond =
2794 !is_conditional ? midgard_condition_always :
2795 is_inverted ? midgard_condition_false :
2796 midgard_condition_true;
2797
2798 midgard_jmp_writeout_op op =
2799 is_discard ? midgard_jmp_writeout_op_discard :
2800 is_writeout ? midgard_jmp_writeout_op_writeout :
2801 (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
2802 midgard_jmp_writeout_op_branch_cond;
2803
2804 if (!is_compact) {
2805 midgard_branch_extended branch =
2806 midgard_create_branch_extended(
2807 cond, op,
2808 dest_tag,
2809 quadword_offset);
2810
2811 memcpy(&ins->branch_extended, &branch, sizeof(branch));
2812 } else if (is_conditional || is_discard) {
2813 midgard_branch_cond branch = {
2814 .op = op,
2815 .dest_tag = dest_tag,
2816 .offset = quadword_offset,
2817 .cond = cond
2818 };
2819
2820 assert(branch.offset == quadword_offset);
2821
2822 memcpy(&ins->br_compact, &branch, sizeof(branch));
2823 } else {
2824 assert(op == midgard_jmp_writeout_op_branch_uncond);
2825
2826 midgard_branch_uncond branch = {
2827 .op = op,
2828 .dest_tag = dest_tag,
2829 .offset = quadword_offset,
2830 .unknown = 1
2831 };
2832
2833 assert(branch.offset == quadword_offset);
2834
2835 memcpy(&ins->br_compact, &branch, sizeof(branch));
2836 }
2837 }
2838 }
2839
2840 ++br_block_idx;
2841 }
2842
2843 /* Emit flat binary from the instruction arrays. Iterate each block in
2844 * sequence. Save instruction boundaries such that lookahead tags can
2845 * be assigned easily */
2846
2847 /* Cache _all_ bundles in source order for lookahead across failed branches */
2848
2849 int bundle_count = 0;
2850 mir_foreach_block(ctx, block) {
2851 bundle_count += block->bundles.size / sizeof(midgard_bundle);
2852 }
2853 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
2854 int bundle_idx = 0;
2855 mir_foreach_block(ctx, block) {
2856 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
2857 source_order_bundles[bundle_idx++] = bundle;
2858 }
2859 }
2860
2861 int current_bundle = 0;
2862
2863 /* Midgard prefetches instruction types, so during emission we
2864 * need to lookahead. Unless this is the last instruction, in
2865 * which we return 1. */
2866
2867 mir_foreach_block(ctx, block) {
2868 mir_foreach_bundle_in_block(block, bundle) {
2869 int lookahead = 1;
2870
2871 if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
2872 lookahead = source_order_bundles[current_bundle + 1]->tag;
2873
2874 emit_binary_bundle(ctx, bundle, compiled, lookahead);
2875 ++current_bundle;
2876 }
2877
2878 /* TODO: Free deeper */
2879 //util_dynarray_fini(&block->instructions);
2880 }
2881
2882 free(source_order_bundles);
2883
2884 /* Report the very first tag executed */
2885 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
2886
2887 /* Deal with off-by-one related to the fencepost problem */
2888 program->work_register_count = ctx->work_registers + 1;
2889 program->uniform_cutoff = ctx->uniform_cutoff;
2890
2891 program->blend_patch_offset = ctx->blend_constant_offset;
2892 program->tls_size = ctx->tls_size;
2893
2894 if (midgard_debug & MIDGARD_DBG_SHADERS)
2895 disassemble_midgard(program->compiled.data, program->compiled.size, gpu_id, ctx->stage);
2896
2897 if (midgard_debug & MIDGARD_DBG_SHADERDB || shaderdb) {
2898 unsigned nr_bundles = 0, nr_ins = 0;
2899
2900 /* Count instructions and bundles */
2901
2902 mir_foreach_block(ctx, block) {
2903 nr_bundles += util_dynarray_num_elements(
2904 &block->bundles, midgard_bundle);
2905
2906 mir_foreach_bundle_in_block(block, bun)
2907 nr_ins += bun->instruction_count;
2908 }
2909
2910 /* Calculate thread count. There are certain cutoffs by
2911 * register count for thread count */
2912
2913 unsigned nr_registers = program->work_register_count;
2914
2915 unsigned nr_threads =
2916 (nr_registers <= 4) ? 4 :
2917 (nr_registers <= 8) ? 2 :
2918 1;
2919
2920 /* Dump stats */
2921
2922 fprintf(stderr, "shader%d - %s shader: "
2923 "%u inst, %u bundles, %u quadwords, "
2924 "%u registers, %u threads, %u loops, "
2925 "%u:%u spills:fills\n",
2926 SHADER_DB_COUNT++,
2927 gl_shader_stage_name(ctx->stage),
2928 nr_ins, nr_bundles, ctx->quadword_count,
2929 nr_registers, nr_threads,
2930 ctx->loop_count,
2931 ctx->spills, ctx->fills);
2932 }
2933
2934 ralloc_free(ctx);
2935
2936 return 0;
2937 }