2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
28 static midgard_int_mod
29 mir_get_imod(bool shift
, nir_alu_type T
, bool half
, bool scalar
)
33 /* Sign-extension, really... */
34 return scalar
? 0 : midgard_int_normal
;
38 return midgard_int_shift
;
40 if (nir_alu_type_get_base_type(T
) == nir_type_int
)
41 return midgard_int_sign_extend
;
43 return midgard_int_zero_extend
;
47 mir_pack_mod(midgard_instruction
*ins
, unsigned i
, bool scalar
)
49 bool integer
= midgard_is_integer_op(ins
->op
);
50 unsigned base_size
= max_bitsize_for_alu(ins
);
51 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[i
]);
52 bool half
= (sz
== (base_size
>> 1));
55 mir_get_imod(ins
->src_shift
[i
], ins
->src_types
[i
], half
, scalar
) :
56 ((ins
->src_abs
[i
] << 0) |
57 ((ins
->src_neg
[i
] << 1)));
60 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
61 * use scalar ALU instructions, for functional or performance reasons. To do
62 * this, we just demote vector ALU payloads to scalar. */
65 component_from_mask(unsigned mask
)
67 for (int c
= 0; c
< 8; ++c
) {
77 mir_pack_scalar_source(unsigned mod
, bool is_full
, unsigned component
)
79 midgard_scalar_alu_src s
= {
82 .component
= component
<< (is_full
? 1 : 0)
86 memcpy(&o
, &s
, sizeof(s
));
88 return o
& ((1 << 6) - 1);
91 static midgard_scalar_alu
92 vector_to_scalar_alu(midgard_vector_alu v
, midgard_instruction
*ins
)
94 bool is_full
= nir_alu_type_get_type_size(ins
->dest_type
) == 32;
96 bool half_0
= nir_alu_type_get_type_size(ins
->src_types
[0]) == 16;
97 bool half_1
= nir_alu_type_get_type_size(ins
->src_types
[1]) == 16;
98 unsigned comp
= component_from_mask(ins
->mask
);
100 unsigned packed_src
[2] = {
101 mir_pack_scalar_source(mir_pack_mod(ins
, 0, true), !half_0
, ins
->swizzle
[0][comp
]),
102 mir_pack_scalar_source(mir_pack_mod(ins
, 1, true), !half_1
, ins
->swizzle
[1][comp
])
105 /* The output component is from the mask */
106 midgard_scalar_alu s
= {
108 .src1
= packed_src
[0],
109 .src2
= packed_src
[1],
112 .output_full
= is_full
,
113 .output_component
= comp
116 /* Full components are physically spaced out */
118 assert(s
.output_component
< 4);
119 s
.output_component
<<= 1;
122 /* Inline constant is passed along rather than trying to extract it
125 if (ins
->has_inline_constant
) {
127 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
128 imm
|= (lower_11
>> 9) & 3;
129 imm
|= (lower_11
>> 6) & 4;
130 imm
|= (lower_11
>> 2) & 0x38;
131 imm
|= (lower_11
& 63) << 6;
139 /* 64-bit swizzles are super easy since there are 2 components of 2 components
140 * in an 8-bit field ... lots of duplication to go around!
142 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
143 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
144 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
145 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
146 * with rep. Pretty nifty, huh? */
149 mir_pack_swizzle_64(unsigned *swizzle
, unsigned max_component
)
153 for (unsigned i
= 0; i
< 2; ++i
) {
154 assert(swizzle
[i
] <= max_component
);
156 unsigned a
= (swizzle
[i
] & 1) ?
157 (COMPONENT_W
<< 2) | COMPONENT_Z
:
158 (COMPONENT_Y
<< 2) | COMPONENT_X
;
160 packed
|= a
<< (i
* 4);
167 mir_pack_mask_alu(midgard_instruction
*ins
, midgard_vector_alu
*alu
)
169 unsigned effective
= ins
->mask
;
171 /* If we have a destination override, we need to figure out whether to
172 * override to the lower or upper half, shifting the effective mask in
173 * the latter, so AAAA.... becomes AAAA */
175 unsigned inst_size
= max_bitsize_for_alu(ins
);
176 signed upper_shift
= mir_upper_override(ins
, inst_size
);
178 if (upper_shift
>= 0) {
179 effective
>>= upper_shift
;
180 alu
->dest_override
= upper_shift
?
181 midgard_dest_override_upper
:
182 midgard_dest_override_lower
;
184 alu
->dest_override
= midgard_dest_override_none
;
188 alu
->mask
= expand_writemask(effective
, 2);
189 else if (inst_size
== 64)
190 alu
->mask
= expand_writemask(effective
, 1);
192 alu
->mask
= effective
;
196 mir_pack_swizzle(unsigned mask
, unsigned *swizzle
,
197 nir_alu_type T
, midgard_reg_mode reg_mode
,
198 bool op_channeled
, bool *rep_low
, bool *rep_high
)
201 unsigned sz
= nir_alu_type_get_type_size(T
);
203 if (reg_mode
== midgard_reg_mode_64
) {
204 assert(sz
== 64 || sz
== 32);
205 unsigned components
= (sz
== 32) ? 4 : 2;
207 packed
= mir_pack_swizzle_64(swizzle
, components
);
210 bool lo
= swizzle
[0] >= COMPONENT_Z
;
211 bool hi
= swizzle
[1] >= COMPONENT_Z
;
214 /* We can't mix halves... */
222 } else if (sz
< 32) {
223 unreachable("Cannot encode 8/16 swizzle in 64-bit");
226 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
227 * the strategy is to check whether the nibble we're on is
228 * upper or lower. We need all components to be on the same
229 * "side"; that much is enforced by the ISA and should have
230 * been lowered. TODO: 8-bit packing. TODO: vec8 */
232 unsigned first
= mask
? ffs(mask
) - 1 : 0;
233 bool upper
= swizzle
[first
] > 3;
238 bool dest_up
= !op_channeled
&& (first
>= 4);
240 for (unsigned c
= (dest_up
? 4 : 0); c
< (dest_up
? 8 : 4); ++c
) {
241 unsigned v
= swizzle
[c
];
243 ASSERTED
bool t_upper
= v
> 3;
245 /* Ensure we're doing something sane */
247 if (mask
& (1 << c
)) {
248 assert(t_upper
== upper
);
252 /* Use the non upper part */
255 packed
|= v
<< (2 * (c
% 4));
259 /* Replicate for now.. should really pick a side for
262 if (reg_mode
== midgard_reg_mode_16
&& sz
== 16) {
265 } else if (reg_mode
== midgard_reg_mode_16
&& sz
== 8) {
268 } else if (reg_mode
== midgard_reg_mode_32
) {
271 unreachable("Unhandled reg mode");
279 mir_pack_vector_srcs(midgard_instruction
*ins
, midgard_vector_alu
*alu
)
281 bool channeled
= GET_CHANNEL_COUNT(alu_opcode_props
[ins
->op
].props
);
283 unsigned base_size
= max_bitsize_for_alu(ins
);
285 for (unsigned i
= 0; i
< 2; ++i
) {
286 if (ins
->has_inline_constant
&& (i
== 1))
289 if (ins
->src
[i
] == ~0)
292 bool rep_lo
= false, rep_hi
= false;
293 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[i
]);
294 bool half
= (sz
== (base_size
>> 1));
296 assert((sz
== base_size
) || half
);
298 unsigned swizzle
= mir_pack_swizzle(ins
->mask
, ins
->swizzle
[i
],
299 ins
->src_types
[i
], reg_mode_for_bitsize(base_size
),
300 channeled
, &rep_lo
, &rep_hi
);
302 midgard_vector_alu_src pack
= {
303 .mod
= mir_pack_mod(ins
, i
, false),
310 unsigned p
= vector_alu_srco_unsigned(pack
);
320 mir_pack_swizzle_ldst(midgard_instruction
*ins
)
322 /* TODO: non-32-bit, non-vec4 */
323 for (unsigned c
= 0; c
< 4; ++c
) {
324 unsigned v
= ins
->swizzle
[0][c
];
329 ins
->load_store
.swizzle
|= v
<< (2 * c
);
336 mir_pack_swizzle_tex(midgard_instruction
*ins
)
338 for (unsigned i
= 0; i
< 2; ++i
) {
341 for (unsigned c
= 0; c
< 4; ++c
) {
342 unsigned v
= ins
->swizzle
[i
][c
];
347 packed
|= v
<< (2 * c
);
351 ins
->texture
.swizzle
= packed
;
353 ins
->texture
.in_reg_swizzle
= packed
;
356 /* TODO: bias component */
359 /* Up to 3 { ALU, LDST } bundles can execute in parallel with a texture op.
360 * Given a texture op, lookahead to see how many such bundles we can flag for
364 mir_can_run_ooo(midgard_block
*block
, midgard_bundle
*bundle
,
367 /* Don't read out of bounds */
368 if (bundle
>= (midgard_bundle
*) ((char *) block
->bundles
.data
+ block
->bundles
.size
))
371 /* Texture ops can't execute with other texture ops */
372 if (!IS_ALU(bundle
->tag
) && bundle
->tag
!= TAG_LOAD_STORE_4
)
375 /* Ensure there is no read-after-write dependency */
377 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
378 midgard_instruction
*ins
= bundle
->instructions
[i
];
380 mir_foreach_src(ins
, s
) {
381 if (ins
->src
[s
] == dependency
)
386 /* Otherwise, we're okay */
391 mir_pack_tex_ooo(midgard_block
*block
, midgard_bundle
*bundle
, midgard_instruction
*ins
)
395 for (count
= 0; count
< 3; ++count
) {
396 if (!mir_can_run_ooo(block
, bundle
+ count
+ 1, ins
->dest
))
400 ins
->texture
.out_of_order
= count
;
403 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
404 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
408 mir_pack_ldst_mask(midgard_instruction
*ins
)
410 unsigned sz
= nir_alu_type_get_type_size(ins
->dest_type
);
411 unsigned packed
= ins
->mask
;
414 packed
= ((ins
->mask
& 0x2) ? (0x8 | 0x4) : 0) |
415 ((ins
->mask
& 0x1) ? (0x2 | 0x1) : 0);
416 } else if (sz
== 16) {
419 for (unsigned i
= 0; i
< 4; ++i
) {
420 /* Make sure we're duplicated */
421 bool u
= (ins
->mask
& (1 << (2*i
+ 0))) != 0;
422 ASSERTED
bool v
= (ins
->mask
& (1 << (2*i
+ 1))) != 0;
431 ins
->load_store
.mask
= packed
;
435 mir_lower_inverts(midgard_instruction
*ins
)
444 case midgard_alu_op_iand
:
445 /* a & ~b = iandnot(a, b) */
446 /* ~a & ~b = ~(a | b) = inor(a, b) */
448 if (inv
[0] && inv
[1])
449 ins
->op
= midgard_alu_op_inor
;
451 ins
->op
= midgard_alu_op_iandnot
;
454 case midgard_alu_op_ior
:
455 /* a | ~b = iornot(a, b) */
456 /* ~a | ~b = ~(a & b) = inand(a, b) */
458 if (inv
[0] && inv
[1])
459 ins
->op
= midgard_alu_op_inand
;
461 ins
->op
= midgard_alu_op_iornot
;
465 case midgard_alu_op_ixor
:
466 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
467 /* ~a ^ ~b = a ^ b */
470 ins
->op
= midgard_alu_op_inxor
;
479 /* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
482 mir_lower_roundmode(midgard_instruction
*ins
)
484 if (alu_opcode_props
[ins
->op
].props
& MIDGARD_ROUNDS
) {
485 assert(ins
->roundmode
<= 0x3);
486 ins
->op
+= ins
->roundmode
;
490 static midgard_load_store_word
491 load_store_from_instr(midgard_instruction
*ins
)
493 midgard_load_store_word ldst
= ins
->load_store
;
496 if (OP_IS_STORE(ldst
.op
)) {
497 ldst
.reg
= SSA_REG_FROM_FIXED(ins
->src
[0]) & 1;
499 ldst
.reg
= SSA_REG_FROM_FIXED(ins
->dest
);
502 /* Atomic opcode swizzles have a special meaning:
503 * - The first two bits say which component of the implicit register should be used
504 * - The next two bits say if the implicit register is r26 or r27 */
505 if (OP_IS_ATOMIC(ins
->op
)) {
507 ldst
.swizzle
|= ins
->swizzle
[3][0] & 3;
508 ldst
.swizzle
|= (SSA_REG_FROM_FIXED(ins
->src
[3]) & 1 ? 1 : 0) << 2;
511 if (ins
->src
[1] != ~0) {
512 unsigned src
= SSA_REG_FROM_FIXED(ins
->src
[1]);
513 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[1]);
514 ldst
.arg_1
|= midgard_ldst_reg(src
, ins
->swizzle
[1][0], sz
);
517 if (ins
->src
[2] != ~0) {
518 unsigned src
= SSA_REG_FROM_FIXED(ins
->src
[2]);
519 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[2]);
520 ldst
.arg_2
|= midgard_ldst_reg(src
, ins
->swizzle
[2][0], sz
);
526 static midgard_texture_word
527 texture_word_from_instr(midgard_instruction
*ins
)
529 midgard_texture_word tex
= ins
->texture
;
532 unsigned src1
= ins
->src
[1] == ~0 ? REGISTER_UNUSED
: SSA_REG_FROM_FIXED(ins
->src
[1]);
533 tex
.in_reg_select
= src1
& 1;
535 unsigned dest
= ins
->dest
== ~0 ? REGISTER_UNUSED
: SSA_REG_FROM_FIXED(ins
->dest
);
536 tex
.out_reg_select
= dest
& 1;
538 if (ins
->src
[2] != ~0) {
539 midgard_tex_register_select sel
= {
540 .select
= SSA_REG_FROM_FIXED(ins
->src
[2]) & 1,
542 .component
= ins
->swizzle
[2][0]
545 memcpy(&packed
, &sel
, sizeof(packed
));
549 if (ins
->src
[3] != ~0) {
550 unsigned x
= ins
->swizzle
[3][0];
554 /* Check range, TODO: half-registers */
557 unsigned offset_reg
= SSA_REG_FROM_FIXED(ins
->src
[3]);
560 (offset_reg
& 1) << 1 | /* select */
561 (0 << 2) | /* upper */
562 (x
<< 3) | /* swizzle */
563 (y
<< 5) | /* swizzle */
564 (z
<< 7); /* swizzle */
570 static midgard_vector_alu
571 vector_alu_from_instr(midgard_instruction
*ins
)
573 midgard_vector_alu alu
= {
575 .outmod
= ins
->outmod
,
576 .reg_mode
= reg_mode_for_bitsize(max_bitsize_for_alu(ins
))
579 if (ins
->has_inline_constant
) {
580 /* Encode inline 16-bit constant. See disassembler for
581 * where the algorithm is from */
583 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
584 uint16_t imm
= ((lower_11
>> 8) & 0x7) |
585 ((lower_11
& 0xFF) << 3);
593 static midgard_branch_extended
594 midgard_create_branch_extended( midgard_condition cond
,
595 midgard_jmp_writeout_op op
,
597 signed quadword_offset
)
599 /* The condition code is actually a LUT describing a function to
600 * combine multiple condition codes. However, we only support a single
601 * condition code at the moment, so we just duplicate over a bunch of
604 uint16_t duplicated_cond
=
614 midgard_branch_extended branch
= {
616 .dest_tag
= dest_tag
,
617 .offset
= quadword_offset
,
618 .cond
= duplicated_cond
625 emit_branch(midgard_instruction
*ins
,
626 compiler_context
*ctx
,
627 midgard_block
*block
,
628 midgard_bundle
*bundle
,
629 struct util_dynarray
*emission
)
631 /* Parse some basic branch info */
632 bool is_compact
= ins
->unit
== ALU_ENAB_BR_COMPACT
;
633 bool is_conditional
= ins
->branch
.conditional
;
634 bool is_inverted
= ins
->branch
.invert_conditional
;
635 bool is_discard
= ins
->branch
.target_type
== TARGET_DISCARD
;
636 bool is_tilebuf_wait
= ins
->branch
.target_type
== TARGET_TILEBUF_WAIT
;
637 bool is_special
= is_discard
|| is_tilebuf_wait
;
638 bool is_writeout
= ins
->writeout
;
640 /* Determine the block we're jumping to */
641 int target_number
= ins
->branch
.target_block
;
643 /* Report the destination tag */
644 int dest_tag
= is_discard
? 0 :
645 is_tilebuf_wait
? bundle
->tag
:
646 midgard_get_first_tag_from_block(ctx
, target_number
);
648 /* Count up the number of quadwords we're
649 * jumping over = number of quadwords until
650 * (br_block_idx, target_number) */
652 int quadword_offset
= 0;
655 /* Fixed encoding, not actually an offset */
656 quadword_offset
= 0x2;
657 } else if (is_tilebuf_wait
) {
658 quadword_offset
= -1;
659 } else if (target_number
> block
->base
.name
) {
662 for (int idx
= block
->base
.name
+1; idx
< target_number
; ++idx
) {
663 midgard_block
*blk
= mir_get_block(ctx
, idx
);
666 quadword_offset
+= blk
->quadword_count
;
671 for (int idx
= block
->base
.name
; idx
>= target_number
; --idx
) {
672 midgard_block
*blk
= mir_get_block(ctx
, idx
);
675 quadword_offset
-= blk
->quadword_count
;
679 /* Unconditional extended branches (far jumps)
680 * have issues, so we always use a conditional
681 * branch, setting the condition to always for
682 * unconditional. For compact unconditional
683 * branches, cond isn't used so it doesn't
684 * matter what we pick. */
686 midgard_condition cond
=
687 !is_conditional
? midgard_condition_always
:
688 is_inverted
? midgard_condition_false
:
689 midgard_condition_true
;
691 midgard_jmp_writeout_op op
=
692 is_discard
? midgard_jmp_writeout_op_discard
:
693 is_tilebuf_wait
? midgard_jmp_writeout_op_tilebuffer_pending
:
694 is_writeout
? midgard_jmp_writeout_op_writeout
:
695 (is_compact
&& !is_conditional
) ?
696 midgard_jmp_writeout_op_branch_uncond
:
697 midgard_jmp_writeout_op_branch_cond
;
700 unsigned size
= sizeof(midgard_branch_cond
);
702 if (is_conditional
|| is_special
) {
703 midgard_branch_cond branch
= {
705 .dest_tag
= dest_tag
,
706 .offset
= quadword_offset
,
709 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), &branch
, size
);
711 assert(op
== midgard_jmp_writeout_op_branch_uncond
);
712 midgard_branch_uncond branch
= {
714 .dest_tag
= dest_tag
,
715 .offset
= quadword_offset
,
718 assert(branch
.offset
== quadword_offset
);
719 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), &branch
, size
);
721 } else { /* `ins->compact_branch`, misnomer */
722 unsigned size
= sizeof(midgard_branch_extended
);
724 midgard_branch_extended branch
=
725 midgard_create_branch_extended(
730 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), &branch
, size
);
735 emit_alu_bundle(compiler_context
*ctx
,
736 midgard_block
*block
,
737 midgard_bundle
*bundle
,
738 struct util_dynarray
*emission
,
741 /* Emit the control word */
742 util_dynarray_append(emission
, uint32_t, bundle
->control
| lookahead
);
744 /* Next up, emit register words */
745 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
746 midgard_instruction
*ins
= bundle
->instructions
[i
];
748 /* Check if this instruction has registers */
749 if (ins
->compact_branch
) continue;
751 unsigned src2_reg
= REGISTER_UNUSED
;
752 if (ins
->has_inline_constant
)
753 src2_reg
= ins
->inline_constant
>> 11;
754 else if (ins
->src
[1] != ~0)
755 src2_reg
= SSA_REG_FROM_FIXED(ins
->src
[1]);
757 /* Otherwise, just emit the registers */
758 uint16_t reg_word
= 0;
759 midgard_reg_info registers
= {
760 .src1_reg
= (ins
->src
[0] == ~0 ?
762 SSA_REG_FROM_FIXED(ins
->src
[0])),
763 .src2_reg
= src2_reg
,
764 .src2_imm
= ins
->has_inline_constant
,
765 .out_reg
= (ins
->dest
== ~0 ?
767 SSA_REG_FROM_FIXED(ins
->dest
)),
769 memcpy(®_word
, ®isters
, sizeof(uint16_t));
770 util_dynarray_append(emission
, uint16_t, reg_word
);
773 /* Now, we emit the body itself */
774 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
775 midgard_instruction
*ins
= bundle
->instructions
[i
];
777 if (!ins
->compact_branch
) {
778 mir_lower_inverts(ins
);
779 mir_lower_roundmode(ins
);
782 if (midgard_is_branch_unit(ins
->unit
)) {
783 emit_branch(ins
, ctx
, block
, bundle
, emission
);
784 } else if (ins
->unit
& UNITS_ANY_VECTOR
) {
785 midgard_vector_alu source
= vector_alu_from_instr(ins
);
786 mir_pack_mask_alu(ins
, &source
);
787 mir_pack_vector_srcs(ins
, &source
);
788 unsigned size
= sizeof(source
);
789 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), &source
, size
);
791 midgard_scalar_alu source
= vector_to_scalar_alu(vector_alu_from_instr(ins
), ins
);
792 unsigned size
= sizeof(source
);
793 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), &source
, size
);
797 /* Emit padding (all zero) */
798 memset(util_dynarray_grow_bytes(emission
, bundle
->padding
, 1), 0, bundle
->padding
);
800 /* Tack on constants */
802 if (bundle
->has_embedded_constants
)
803 util_dynarray_append(emission
, midgard_constants
, bundle
->constants
);
806 /* Shift applied to the immediate used as an offset. Probably this is papering
807 * over some other semantic distinction else well, but it unifies things in the
808 * compiler so I don't mind. */
811 mir_ldst_imm_shift(midgard_load_store_op op
)
813 if (OP_IS_UBO_READ(op
))
819 static enum mali_sampler_type
820 midgard_sampler_type(nir_alu_type t
) {
821 switch (nir_alu_type_get_base_type(t
))
824 return MALI_SAMPLER_FLOAT
;
826 return MALI_SAMPLER_SIGNED
;
828 return MALI_SAMPLER_UNSIGNED
;
830 unreachable("Unknown sampler type");
834 /* After everything is scheduled, emit whole bundles at a time */
837 emit_binary_bundle(compiler_context
*ctx
,
838 midgard_block
*block
,
839 midgard_bundle
*bundle
,
840 struct util_dynarray
*emission
,
843 int lookahead
= next_tag
<< 4;
845 switch (bundle
->tag
) {
854 emit_alu_bundle(ctx
, block
, bundle
, emission
, lookahead
);
857 case TAG_LOAD_STORE_4
: {
858 /* One or two composing instructions */
860 uint64_t current64
, next64
= LDST_NOP
;
864 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
865 mir_pack_ldst_mask(bundle
->instructions
[i
]);
867 /* Atomic ops don't use this swizzle the same way as other ops */
868 if (!OP_IS_ATOMIC(bundle
->instructions
[i
]->op
))
869 mir_pack_swizzle_ldst(bundle
->instructions
[i
]);
871 /* Apply a constant offset */
872 unsigned offset
= bundle
->instructions
[i
]->constants
.u32
[0];
875 unsigned shift
= mir_ldst_imm_shift(bundle
->instructions
[i
]->op
);
876 unsigned upper_shift
= 10 - shift
;
878 bundle
->instructions
[i
]->load_store
.varying_parameters
|= (offset
& ((1 << upper_shift
) - 1)) << shift
;
879 bundle
->instructions
[i
]->load_store
.address
|= (offset
>> upper_shift
);
883 midgard_load_store_word ldst0
=
884 load_store_from_instr(bundle
->instructions
[0]);
885 memcpy(¤t64
, &ldst0
, sizeof(current64
));
887 if (bundle
->instruction_count
== 2) {
888 midgard_load_store_word ldst1
=
889 load_store_from_instr(bundle
->instructions
[1]);
890 memcpy(&next64
, &ldst1
, sizeof(next64
));
893 midgard_load_store instruction
= {
895 .next_type
= next_tag
,
900 util_dynarray_append(emission
, midgard_load_store
, instruction
);
906 case TAG_TEXTURE_4_VTX
:
907 case TAG_TEXTURE_4_BARRIER
: {
908 /* Texture instructions are easy, since there is no pipelining
909 * nor VLIW to worry about. We may need to set .cont/.last
912 midgard_instruction
*ins
= bundle
->instructions
[0];
914 ins
->texture
.type
= bundle
->tag
;
915 ins
->texture
.next_type
= next_tag
;
917 /* Nothing else to pack for barriers */
918 if (ins
->op
== TEXTURE_OP_BARRIER
) {
919 ins
->texture
.cont
= ins
->texture
.last
= 1;
920 ins
->texture
.op
= ins
->op
;
921 util_dynarray_append(emission
, midgard_texture_word
, ins
->texture
);
925 signed override
= mir_upper_override(ins
, 32);
927 ins
->texture
.mask
= override
> 0 ?
928 ins
->mask
>> override
:
931 mir_pack_swizzle_tex(ins
);
933 if (!(ctx
->quirks
& MIDGARD_NO_OOO
))
934 mir_pack_tex_ooo(block
, bundle
, ins
);
936 unsigned osz
= nir_alu_type_get_type_size(ins
->dest_type
);
937 unsigned isz
= nir_alu_type_get_type_size(ins
->src_types
[1]);
939 assert(osz
== 32 || osz
== 16);
940 assert(isz
== 32 || isz
== 16);
942 ins
->texture
.out_full
= (osz
== 32);
943 ins
->texture
.out_upper
= override
> 0;
944 ins
->texture
.in_reg_full
= (isz
== 32);
945 ins
->texture
.sampler_type
= midgard_sampler_type(ins
->dest_type
);
946 ins
->texture
.outmod
= ins
->outmod
;
948 if (mir_op_computes_derivatives(ctx
->stage
, ins
->op
)) {
949 ins
->texture
.cont
= !ins
->helper_terminate
;
950 ins
->texture
.last
= ins
->helper_terminate
|| ins
->helper_execute
;
952 ins
->texture
.cont
= ins
->texture
.last
= 1;
955 midgard_texture_word texture
= texture_word_from_instr(ins
);
956 util_dynarray_append(emission
, midgard_texture_word
, texture
);
961 unreachable("Unknown midgard instruction type\n");