panfrost: add atomic ops infrastructure
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
27
28 static midgard_int_mod
29 mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
30 {
31 if (!half) {
32 assert(!shift);
33 /* Sign-extension, really... */
34 return scalar ? 0 : midgard_int_normal;
35 }
36
37 if (shift)
38 return midgard_int_shift;
39
40 if (nir_alu_type_get_base_type(T) == nir_type_int)
41 return midgard_int_sign_extend;
42 else
43 return midgard_int_zero_extend;
44 }
45
46 unsigned
47 mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
48 {
49 bool integer = midgard_is_integer_op(ins->op);
50 unsigned base_size = max_bitsize_for_alu(ins);
51 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
52 bool half = (sz == (base_size >> 1));
53
54 return integer ?
55 mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
56 ((ins->src_abs[i] << 0) |
57 ((ins->src_neg[i] << 1)));
58 }
59
60 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
61 * use scalar ALU instructions, for functional or performance reasons. To do
62 * this, we just demote vector ALU payloads to scalar. */
63
64 static int
65 component_from_mask(unsigned mask)
66 {
67 for (int c = 0; c < 8; ++c) {
68 if (mask & (1 << c))
69 return c;
70 }
71
72 assert(0);
73 return 0;
74 }
75
76 static unsigned
77 mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
78 {
79 midgard_scalar_alu_src s = {
80 .mod = mod,
81 .full = is_full,
82 .component = component << (is_full ? 1 : 0)
83 };
84
85 unsigned o;
86 memcpy(&o, &s, sizeof(s));
87
88 return o & ((1 << 6) - 1);
89 }
90
91 static midgard_scalar_alu
92 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
93 {
94 bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
95
96 bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
97 bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
98 unsigned comp = component_from_mask(ins->mask);
99
100 unsigned packed_src[2] = {
101 mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
102 mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
103 };
104
105 /* The output component is from the mask */
106 midgard_scalar_alu s = {
107 .op = v.op,
108 .src1 = packed_src[0],
109 .src2 = packed_src[1],
110 .unknown = 0,
111 .outmod = v.outmod,
112 .output_full = is_full,
113 .output_component = comp
114 };
115
116 /* Full components are physically spaced out */
117 if (is_full) {
118 assert(s.output_component < 4);
119 s.output_component <<= 1;
120 }
121
122 /* Inline constant is passed along rather than trying to extract it
123 * from v */
124
125 if (ins->has_inline_constant) {
126 uint16_t imm = 0;
127 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
128 imm |= (lower_11 >> 9) & 3;
129 imm |= (lower_11 >> 6) & 4;
130 imm |= (lower_11 >> 2) & 0x38;
131 imm |= (lower_11 & 63) << 6;
132
133 s.src2 = imm;
134 }
135
136 return s;
137 }
138
139 /* 64-bit swizzles are super easy since there are 2 components of 2 components
140 * in an 8-bit field ... lots of duplication to go around!
141 *
142 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
143 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
144 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
145 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
146 * with rep. Pretty nifty, huh? */
147
148 static unsigned
149 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
150 {
151 unsigned packed = 0;
152
153 for (unsigned i = 0; i < 2; ++i) {
154 assert(swizzle[i] <= max_component);
155
156 unsigned a = (swizzle[i] & 1) ?
157 (COMPONENT_W << 2) | COMPONENT_Z :
158 (COMPONENT_Y << 2) | COMPONENT_X;
159
160 packed |= a << (i * 4);
161 }
162
163 return packed;
164 }
165
166 static void
167 mir_pack_mask_alu(midgard_instruction *ins, midgard_vector_alu *alu)
168 {
169 unsigned effective = ins->mask;
170
171 /* If we have a destination override, we need to figure out whether to
172 * override to the lower or upper half, shifting the effective mask in
173 * the latter, so AAAA.... becomes AAAA */
174
175 unsigned inst_size = max_bitsize_for_alu(ins);
176 signed upper_shift = mir_upper_override(ins, inst_size);
177
178 if (upper_shift >= 0) {
179 effective >>= upper_shift;
180 alu->dest_override = upper_shift ?
181 midgard_dest_override_upper :
182 midgard_dest_override_lower;
183 } else {
184 alu->dest_override = midgard_dest_override_none;
185 }
186
187 if (inst_size == 32)
188 alu->mask = expand_writemask(effective, 2);
189 else if (inst_size == 64)
190 alu->mask = expand_writemask(effective, 1);
191 else
192 alu->mask = effective;
193 }
194
195 static unsigned
196 mir_pack_swizzle(unsigned mask, unsigned *swizzle,
197 nir_alu_type T, midgard_reg_mode reg_mode,
198 bool op_channeled, bool *rep_low, bool *rep_high)
199 {
200 unsigned packed = 0;
201 unsigned sz = nir_alu_type_get_type_size(T);
202
203 if (reg_mode == midgard_reg_mode_64) {
204 assert(sz == 64 || sz == 32);
205 unsigned components = (sz == 32) ? 4 : 2;
206
207 packed = mir_pack_swizzle_64(swizzle, components);
208
209 if (sz == 32) {
210 bool lo = swizzle[0] >= COMPONENT_Z;
211 bool hi = swizzle[1] >= COMPONENT_Z;
212
213 if (mask & 0x1) {
214 /* We can't mix halves... */
215 if (mask & 2)
216 assert(lo == hi);
217
218 *rep_low = lo;
219 } else {
220 *rep_low = hi;
221 }
222 } else if (sz < 32) {
223 unreachable("Cannot encode 8/16 swizzle in 64-bit");
224 }
225 } else {
226 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
227 * the strategy is to check whether the nibble we're on is
228 * upper or lower. We need all components to be on the same
229 * "side"; that much is enforced by the ISA and should have
230 * been lowered. TODO: 8-bit packing. TODO: vec8 */
231
232 unsigned first = mask ? ffs(mask) - 1 : 0;
233 bool upper = swizzle[first] > 3;
234
235 if (upper && mask)
236 assert(sz <= 16);
237
238 bool dest_up = !op_channeled && (first >= 4);
239
240 for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
241 unsigned v = swizzle[c];
242
243 ASSERTED bool t_upper = v > 3;
244
245 /* Ensure we're doing something sane */
246
247 if (mask & (1 << c)) {
248 assert(t_upper == upper);
249 assert(v <= 7);
250 }
251
252 /* Use the non upper part */
253 v &= 0x3;
254
255 packed |= v << (2 * (c % 4));
256 }
257
258
259 /* Replicate for now.. should really pick a side for
260 * dot products */
261
262 if (reg_mode == midgard_reg_mode_16 && sz == 16) {
263 *rep_low = !upper;
264 *rep_high = upper;
265 } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
266 *rep_low = upper;
267 *rep_high = upper;
268 } else if (reg_mode == midgard_reg_mode_32) {
269 *rep_low = upper;
270 } else {
271 unreachable("Unhandled reg mode");
272 }
273 }
274
275 return packed;
276 }
277
278 static void
279 mir_pack_vector_srcs(midgard_instruction *ins, midgard_vector_alu *alu)
280 {
281 bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
282
283 unsigned base_size = max_bitsize_for_alu(ins);
284
285 for (unsigned i = 0; i < 2; ++i) {
286 if (ins->has_inline_constant && (i == 1))
287 continue;
288
289 if (ins->src[i] == ~0)
290 continue;
291
292 bool rep_lo = false, rep_hi = false;
293 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
294 bool half = (sz == (base_size >> 1));
295
296 assert((sz == base_size) || half);
297
298 unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
299 ins->src_types[i], reg_mode_for_bitsize(base_size),
300 channeled, &rep_lo, &rep_hi);
301
302 midgard_vector_alu_src pack = {
303 .mod = mir_pack_mod(ins, i, false),
304 .rep_low = rep_lo,
305 .rep_high = rep_hi,
306 .half = half,
307 .swizzle = swizzle
308 };
309
310 unsigned p = vector_alu_srco_unsigned(pack);
311
312 if (i == 0)
313 alu->src1 = p;
314 else
315 alu->src2 = p;
316 }
317 }
318
319 static void
320 mir_pack_swizzle_ldst(midgard_instruction *ins)
321 {
322 /* TODO: non-32-bit, non-vec4 */
323 for (unsigned c = 0; c < 4; ++c) {
324 unsigned v = ins->swizzle[0][c];
325
326 /* Check vec4 */
327 assert(v <= 3);
328
329 ins->load_store.swizzle |= v << (2 * c);
330 }
331
332 /* TODO: arg_1/2 */
333 }
334
335 static void
336 mir_pack_swizzle_tex(midgard_instruction *ins)
337 {
338 for (unsigned i = 0; i < 2; ++i) {
339 unsigned packed = 0;
340
341 for (unsigned c = 0; c < 4; ++c) {
342 unsigned v = ins->swizzle[i][c];
343
344 /* Check vec4 */
345 assert(v <= 3);
346
347 packed |= v << (2 * c);
348 }
349
350 if (i == 0)
351 ins->texture.swizzle = packed;
352 else
353 ins->texture.in_reg_swizzle = packed;
354 }
355
356 /* TODO: bias component */
357 }
358
359 /* Up to 3 { ALU, LDST } bundles can execute in parallel with a texture op.
360 * Given a texture op, lookahead to see how many such bundles we can flag for
361 * OoO execution */
362
363 static bool
364 mir_can_run_ooo(midgard_block *block, midgard_bundle *bundle,
365 unsigned dependency)
366 {
367 /* Don't read out of bounds */
368 if (bundle >= (midgard_bundle *) ((char *) block->bundles.data + block->bundles.size))
369 return false;
370
371 /* Texture ops can't execute with other texture ops */
372 if (!IS_ALU(bundle->tag) && bundle->tag != TAG_LOAD_STORE_4)
373 return false;
374
375 /* Ensure there is no read-after-write dependency */
376
377 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
378 midgard_instruction *ins = bundle->instructions[i];
379
380 mir_foreach_src(ins, s) {
381 if (ins->src[s] == dependency)
382 return false;
383 }
384 }
385
386 /* Otherwise, we're okay */
387 return true;
388 }
389
390 static void
391 mir_pack_tex_ooo(midgard_block *block, midgard_bundle *bundle, midgard_instruction *ins)
392 {
393 unsigned count = 0;
394
395 for (count = 0; count < 3; ++count) {
396 if (!mir_can_run_ooo(block, bundle + count + 1, ins->dest))
397 break;
398 }
399
400 ins->texture.out_of_order = count;
401 }
402
403 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
404 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
405 */
406
407 static void
408 mir_pack_ldst_mask(midgard_instruction *ins)
409 {
410 unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
411 unsigned packed = ins->mask;
412
413 if (sz == 64) {
414 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
415 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
416 } else if (sz == 16) {
417 packed = 0;
418
419 for (unsigned i = 0; i < 4; ++i) {
420 /* Make sure we're duplicated */
421 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
422 ASSERTED bool v = (ins->mask & (1 << (2*i + 1))) != 0;
423 assert(u == v);
424
425 packed |= (u << i);
426 }
427 } else {
428 assert(sz == 32);
429 }
430
431 ins->load_store.mask = packed;
432 }
433
434 static void
435 mir_lower_inverts(midgard_instruction *ins)
436 {
437 bool inv[3] = {
438 ins->src_invert[0],
439 ins->src_invert[1],
440 ins->src_invert[2]
441 };
442
443 switch (ins->op) {
444 case midgard_alu_op_iand:
445 /* a & ~b = iandnot(a, b) */
446 /* ~a & ~b = ~(a | b) = inor(a, b) */
447
448 if (inv[0] && inv[1])
449 ins->op = midgard_alu_op_inor;
450 else if (inv[1])
451 ins->op = midgard_alu_op_iandnot;
452
453 break;
454 case midgard_alu_op_ior:
455 /* a | ~b = iornot(a, b) */
456 /* ~a | ~b = ~(a & b) = inand(a, b) */
457
458 if (inv[0] && inv[1])
459 ins->op = midgard_alu_op_inand;
460 else if (inv[1])
461 ins->op = midgard_alu_op_iornot;
462
463 break;
464
465 case midgard_alu_op_ixor:
466 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
467 /* ~a ^ ~b = a ^ b */
468
469 if (inv[0] ^ inv[1])
470 ins->op = midgard_alu_op_inxor;
471
472 break;
473
474 default:
475 break;
476 }
477 }
478
479 /* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
480
481 static void
482 mir_lower_roundmode(midgard_instruction *ins)
483 {
484 if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
485 assert(ins->roundmode <= 0x3);
486 ins->op += ins->roundmode;
487 }
488 }
489
490 static midgard_load_store_word
491 load_store_from_instr(midgard_instruction *ins)
492 {
493 midgard_load_store_word ldst = ins->load_store;
494 ldst.op = ins->op;
495
496 if (OP_IS_STORE(ldst.op)) {
497 ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1;
498 } else {
499 ldst.reg = SSA_REG_FROM_FIXED(ins->dest);
500 }
501
502 /* Atomic opcode swizzles have a special meaning:
503 * - The first two bits say which component of the implicit register should be used
504 * - The next two bits say if the implicit register is r26 or r27 */
505 if (OP_IS_ATOMIC(ins->op)) {
506 ldst.swizzle = 0;
507 ldst.swizzle |= ins->swizzle[3][0] & 3;
508 ldst.swizzle |= (SSA_REG_FROM_FIXED(ins->src[3]) & 1 ? 1 : 0) << 2;
509 }
510
511 if (ins->src[1] != ~0) {
512 unsigned src = SSA_REG_FROM_FIXED(ins->src[1]);
513 unsigned sz = nir_alu_type_get_type_size(ins->src_types[1]);
514 ldst.arg_1 |= midgard_ldst_reg(src, ins->swizzle[1][0], sz);
515 }
516
517 if (ins->src[2] != ~0) {
518 unsigned src = SSA_REG_FROM_FIXED(ins->src[2]);
519 unsigned sz = nir_alu_type_get_type_size(ins->src_types[2]);
520 ldst.arg_2 |= midgard_ldst_reg(src, ins->swizzle[2][0], sz);
521 }
522
523 return ldst;
524 }
525
526 static midgard_texture_word
527 texture_word_from_instr(midgard_instruction *ins)
528 {
529 midgard_texture_word tex = ins->texture;
530 tex.op = ins->op;
531
532 unsigned src1 = ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]);
533 tex.in_reg_select = src1 & 1;
534
535 unsigned dest = ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest);
536 tex.out_reg_select = dest & 1;
537
538 if (ins->src[2] != ~0) {
539 midgard_tex_register_select sel = {
540 .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1,
541 .full = 1,
542 .component = ins->swizzle[2][0]
543 };
544 uint8_t packed;
545 memcpy(&packed, &sel, sizeof(packed));
546 tex.bias = packed;
547 }
548
549 if (ins->src[3] != ~0) {
550 unsigned x = ins->swizzle[3][0];
551 unsigned y = x + 1;
552 unsigned z = x + 2;
553
554 /* Check range, TODO: half-registers */
555 assert(z < 4);
556
557 unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]);
558 tex.offset =
559 (1) | /* full */
560 (offset_reg & 1) << 1 | /* select */
561 (0 << 2) | /* upper */
562 (x << 3) | /* swizzle */
563 (y << 5) | /* swizzle */
564 (z << 7); /* swizzle */
565 }
566
567 return tex;
568 }
569
570 static midgard_vector_alu
571 vector_alu_from_instr(midgard_instruction *ins)
572 {
573 midgard_vector_alu alu = {
574 .op = ins->op,
575 .outmod = ins->outmod,
576 .reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins))
577 };
578
579 if (ins->has_inline_constant) {
580 /* Encode inline 16-bit constant. See disassembler for
581 * where the algorithm is from */
582
583 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
584 uint16_t imm = ((lower_11 >> 8) & 0x7) |
585 ((lower_11 & 0xFF) << 3);
586
587 alu.src2 = imm << 2;
588 }
589
590 return alu;
591 }
592
593 static midgard_branch_extended
594 midgard_create_branch_extended( midgard_condition cond,
595 midgard_jmp_writeout_op op,
596 unsigned dest_tag,
597 signed quadword_offset)
598 {
599 /* The condition code is actually a LUT describing a function to
600 * combine multiple condition codes. However, we only support a single
601 * condition code at the moment, so we just duplicate over a bunch of
602 * times. */
603
604 uint16_t duplicated_cond =
605 (cond << 14) |
606 (cond << 12) |
607 (cond << 10) |
608 (cond << 8) |
609 (cond << 6) |
610 (cond << 4) |
611 (cond << 2) |
612 (cond << 0);
613
614 midgard_branch_extended branch = {
615 .op = op,
616 .dest_tag = dest_tag,
617 .offset = quadword_offset,
618 .cond = duplicated_cond
619 };
620
621 return branch;
622 }
623
624 static void
625 emit_branch(midgard_instruction *ins,
626 compiler_context *ctx,
627 midgard_block *block,
628 midgard_bundle *bundle,
629 struct util_dynarray *emission)
630 {
631 /* Parse some basic branch info */
632 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
633 bool is_conditional = ins->branch.conditional;
634 bool is_inverted = ins->branch.invert_conditional;
635 bool is_discard = ins->branch.target_type == TARGET_DISCARD;
636 bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT;
637 bool is_special = is_discard || is_tilebuf_wait;
638 bool is_writeout = ins->writeout;
639
640 /* Determine the block we're jumping to */
641 int target_number = ins->branch.target_block;
642
643 /* Report the destination tag */
644 int dest_tag = is_discard ? 0 :
645 is_tilebuf_wait ? bundle->tag :
646 midgard_get_first_tag_from_block(ctx, target_number);
647
648 /* Count up the number of quadwords we're
649 * jumping over = number of quadwords until
650 * (br_block_idx, target_number) */
651
652 int quadword_offset = 0;
653
654 if (is_discard) {
655 /* Fixed encoding, not actually an offset */
656 quadword_offset = 0x2;
657 } else if (is_tilebuf_wait) {
658 quadword_offset = -1;
659 } else if (target_number > block->base.name) {
660 /* Jump forward */
661
662 for (int idx = block->base.name+1; idx < target_number; ++idx) {
663 midgard_block *blk = mir_get_block(ctx, idx);
664 assert(blk);
665
666 quadword_offset += blk->quadword_count;
667 }
668 } else {
669 /* Jump backwards */
670
671 for (int idx = block->base.name; idx >= target_number; --idx) {
672 midgard_block *blk = mir_get_block(ctx, idx);
673 assert(blk);
674
675 quadword_offset -= blk->quadword_count;
676 }
677 }
678
679 /* Unconditional extended branches (far jumps)
680 * have issues, so we always use a conditional
681 * branch, setting the condition to always for
682 * unconditional. For compact unconditional
683 * branches, cond isn't used so it doesn't
684 * matter what we pick. */
685
686 midgard_condition cond =
687 !is_conditional ? midgard_condition_always :
688 is_inverted ? midgard_condition_false :
689 midgard_condition_true;
690
691 midgard_jmp_writeout_op op =
692 is_discard ? midgard_jmp_writeout_op_discard :
693 is_tilebuf_wait ? midgard_jmp_writeout_op_tilebuffer_pending :
694 is_writeout ? midgard_jmp_writeout_op_writeout :
695 (is_compact && !is_conditional) ?
696 midgard_jmp_writeout_op_branch_uncond :
697 midgard_jmp_writeout_op_branch_cond;
698
699 if (is_compact) {
700 unsigned size = sizeof(midgard_branch_cond);
701
702 if (is_conditional || is_special) {
703 midgard_branch_cond branch = {
704 .op = op,
705 .dest_tag = dest_tag,
706 .offset = quadword_offset,
707 .cond = cond
708 };
709 memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
710 } else {
711 assert(op == midgard_jmp_writeout_op_branch_uncond);
712 midgard_branch_uncond branch = {
713 .op = op,
714 .dest_tag = dest_tag,
715 .offset = quadword_offset,
716 .unknown = 1
717 };
718 assert(branch.offset == quadword_offset);
719 memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
720 }
721 } else { /* `ins->compact_branch`, misnomer */
722 unsigned size = sizeof(midgard_branch_extended);
723
724 midgard_branch_extended branch =
725 midgard_create_branch_extended(
726 cond, op,
727 dest_tag,
728 quadword_offset);
729
730 memcpy(util_dynarray_grow_bytes(emission, size, 1), &branch, size);
731 }
732 }
733
734 static void
735 emit_alu_bundle(compiler_context *ctx,
736 midgard_block *block,
737 midgard_bundle *bundle,
738 struct util_dynarray *emission,
739 unsigned lookahead)
740 {
741 /* Emit the control word */
742 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
743
744 /* Next up, emit register words */
745 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
746 midgard_instruction *ins = bundle->instructions[i];
747
748 /* Check if this instruction has registers */
749 if (ins->compact_branch) continue;
750
751 unsigned src2_reg = REGISTER_UNUSED;
752 if (ins->has_inline_constant)
753 src2_reg = ins->inline_constant >> 11;
754 else if (ins->src[1] != ~0)
755 src2_reg = SSA_REG_FROM_FIXED(ins->src[1]);
756
757 /* Otherwise, just emit the registers */
758 uint16_t reg_word = 0;
759 midgard_reg_info registers = {
760 .src1_reg = (ins->src[0] == ~0 ?
761 REGISTER_UNUSED :
762 SSA_REG_FROM_FIXED(ins->src[0])),
763 .src2_reg = src2_reg,
764 .src2_imm = ins->has_inline_constant,
765 .out_reg = (ins->dest == ~0 ?
766 REGISTER_UNUSED :
767 SSA_REG_FROM_FIXED(ins->dest)),
768 };
769 memcpy(&reg_word, &registers, sizeof(uint16_t));
770 util_dynarray_append(emission, uint16_t, reg_word);
771 }
772
773 /* Now, we emit the body itself */
774 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
775 midgard_instruction *ins = bundle->instructions[i];
776
777 if (!ins->compact_branch) {
778 mir_lower_inverts(ins);
779 mir_lower_roundmode(ins);
780 }
781
782 if (midgard_is_branch_unit(ins->unit)) {
783 emit_branch(ins, ctx, block, bundle, emission);
784 } else if (ins->unit & UNITS_ANY_VECTOR) {
785 midgard_vector_alu source = vector_alu_from_instr(ins);
786 mir_pack_mask_alu(ins, &source);
787 mir_pack_vector_srcs(ins, &source);
788 unsigned size = sizeof(source);
789 memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
790 } else {
791 midgard_scalar_alu source = vector_to_scalar_alu(vector_alu_from_instr(ins), ins);
792 unsigned size = sizeof(source);
793 memcpy(util_dynarray_grow_bytes(emission, size, 1), &source, size);
794 }
795 }
796
797 /* Emit padding (all zero) */
798 memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
799
800 /* Tack on constants */
801
802 if (bundle->has_embedded_constants)
803 util_dynarray_append(emission, midgard_constants, bundle->constants);
804 }
805
806 /* Shift applied to the immediate used as an offset. Probably this is papering
807 * over some other semantic distinction else well, but it unifies things in the
808 * compiler so I don't mind. */
809
810 static unsigned
811 mir_ldst_imm_shift(midgard_load_store_op op)
812 {
813 if (OP_IS_UBO_READ(op))
814 return 3;
815 else
816 return 1;
817 }
818
819 static enum mali_sampler_type
820 midgard_sampler_type(nir_alu_type t) {
821 switch (nir_alu_type_get_base_type(t))
822 {
823 case nir_type_float:
824 return MALI_SAMPLER_FLOAT;
825 case nir_type_int:
826 return MALI_SAMPLER_SIGNED;
827 case nir_type_uint:
828 return MALI_SAMPLER_UNSIGNED;
829 default:
830 unreachable("Unknown sampler type");
831 }
832 }
833
834 /* After everything is scheduled, emit whole bundles at a time */
835
836 void
837 emit_binary_bundle(compiler_context *ctx,
838 midgard_block *block,
839 midgard_bundle *bundle,
840 struct util_dynarray *emission,
841 int next_tag)
842 {
843 int lookahead = next_tag << 4;
844
845 switch (bundle->tag) {
846 case TAG_ALU_4:
847 case TAG_ALU_8:
848 case TAG_ALU_12:
849 case TAG_ALU_16:
850 case TAG_ALU_4 + 4:
851 case TAG_ALU_8 + 4:
852 case TAG_ALU_12 + 4:
853 case TAG_ALU_16 + 4:
854 emit_alu_bundle(ctx, block, bundle, emission, lookahead);
855 break;
856
857 case TAG_LOAD_STORE_4: {
858 /* One or two composing instructions */
859
860 uint64_t current64, next64 = LDST_NOP;
861
862 /* Copy masks */
863
864 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
865 mir_pack_ldst_mask(bundle->instructions[i]);
866
867 /* Atomic ops don't use this swizzle the same way as other ops */
868 if (!OP_IS_ATOMIC(bundle->instructions[i]->op))
869 mir_pack_swizzle_ldst(bundle->instructions[i]);
870
871 /* Apply a constant offset */
872 unsigned offset = bundle->instructions[i]->constants.u32[0];
873
874 if (offset) {
875 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->op);
876 unsigned upper_shift = 10 - shift;
877
878 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
879 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
880 }
881 }
882
883 midgard_load_store_word ldst0 =
884 load_store_from_instr(bundle->instructions[0]);
885 memcpy(&current64, &ldst0, sizeof(current64));
886
887 if (bundle->instruction_count == 2) {
888 midgard_load_store_word ldst1 =
889 load_store_from_instr(bundle->instructions[1]);
890 memcpy(&next64, &ldst1, sizeof(next64));
891 }
892
893 midgard_load_store instruction = {
894 .type = bundle->tag,
895 .next_type = next_tag,
896 .word1 = current64,
897 .word2 = next64
898 };
899
900 util_dynarray_append(emission, midgard_load_store, instruction);
901
902 break;
903 }
904
905 case TAG_TEXTURE_4:
906 case TAG_TEXTURE_4_VTX:
907 case TAG_TEXTURE_4_BARRIER: {
908 /* Texture instructions are easy, since there is no pipelining
909 * nor VLIW to worry about. We may need to set .cont/.last
910 * flags. */
911
912 midgard_instruction *ins = bundle->instructions[0];
913
914 ins->texture.type = bundle->tag;
915 ins->texture.next_type = next_tag;
916
917 /* Nothing else to pack for barriers */
918 if (ins->op == TEXTURE_OP_BARRIER) {
919 ins->texture.cont = ins->texture.last = 1;
920 ins->texture.op = ins->op;
921 util_dynarray_append(emission, midgard_texture_word, ins->texture);
922 return;
923 }
924
925 signed override = mir_upper_override(ins, 32);
926
927 ins->texture.mask = override > 0 ?
928 ins->mask >> override :
929 ins->mask;
930
931 mir_pack_swizzle_tex(ins);
932
933 if (!(ctx->quirks & MIDGARD_NO_OOO))
934 mir_pack_tex_ooo(block, bundle, ins);
935
936 unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
937 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
938
939 assert(osz == 32 || osz == 16);
940 assert(isz == 32 || isz == 16);
941
942 ins->texture.out_full = (osz == 32);
943 ins->texture.out_upper = override > 0;
944 ins->texture.in_reg_full = (isz == 32);
945 ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
946 ins->texture.outmod = ins->outmod;
947
948 if (mir_op_computes_derivatives(ctx->stage, ins->op)) {
949 ins->texture.cont = !ins->helper_terminate;
950 ins->texture.last = ins->helper_terminate || ins->helper_execute;
951 } else {
952 ins->texture.cont = ins->texture.last = 1;
953 }
954
955 midgard_texture_word texture = texture_word_from_instr(ins);
956 util_dynarray_append(emission, midgard_texture_word, texture);
957 break;
958 }
959
960 default:
961 unreachable("Unknown midgard instruction type\n");
962 }
963 }