2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
26 #include "midgard_quirks.h"
28 static midgard_int_mod
29 mir_get_imod(bool shift
, nir_alu_type T
, bool half
, bool scalar
)
33 /* Sign-extension, really... */
34 return scalar
? 0 : midgard_int_normal
;
38 return midgard_int_shift
;
40 if (nir_alu_type_get_base_type(T
) == nir_type_int
)
41 return midgard_int_sign_extend
;
43 return midgard_int_zero_extend
;
47 mir_pack_mod(midgard_instruction
*ins
, unsigned i
, bool scalar
)
49 bool integer
= midgard_is_integer_op(ins
->alu
.op
);
50 unsigned base_size
= (8 << ins
->alu
.reg_mode
);
51 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[i
]);
52 bool half
= (sz
== (base_size
>> 1));
55 mir_get_imod(ins
->src_shift
[i
], ins
->src_types
[i
], half
, scalar
) :
56 ((ins
->src_abs
[i
] << 0) |
57 ((ins
->src_neg
[i
] << 1)));
60 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
61 * use scalar ALU instructions, for functional or performance reasons. To do
62 * this, we just demote vector ALU payloads to scalar. */
65 component_from_mask(unsigned mask
)
67 for (int c
= 0; c
< 8; ++c
) {
77 mir_pack_scalar_source(unsigned mod
, bool is_full
, unsigned component
)
79 midgard_scalar_alu_src s
= {
82 .component
= component
<< (is_full
? 1 : 0)
86 memcpy(&o
, &s
, sizeof(s
));
88 return o
& ((1 << 6) - 1);
91 static midgard_scalar_alu
92 vector_to_scalar_alu(midgard_vector_alu v
, midgard_instruction
*ins
)
94 bool is_full
= nir_alu_type_get_type_size(ins
->dest_type
) == 32;
96 bool half_0
= nir_alu_type_get_type_size(ins
->src_types
[0]) == 16;
97 bool half_1
= nir_alu_type_get_type_size(ins
->src_types
[1]) == 16;
98 unsigned comp
= component_from_mask(ins
->mask
);
100 unsigned packed_src
[2] = {
101 mir_pack_scalar_source(mir_pack_mod(ins
, 0, true), !half_0
, ins
->swizzle
[0][comp
]),
102 mir_pack_scalar_source(mir_pack_mod(ins
, 1, true), !half_1
, ins
->swizzle
[1][comp
])
105 /* The output component is from the mask */
106 midgard_scalar_alu s
= {
108 .src1
= packed_src
[0],
109 .src2
= packed_src
[1],
112 .output_full
= is_full
,
113 .output_component
= comp
116 /* Full components are physically spaced out */
118 assert(s
.output_component
< 4);
119 s
.output_component
<<= 1;
122 /* Inline constant is passed along rather than trying to extract it
125 if (ins
->has_inline_constant
) {
127 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
128 imm
|= (lower_11
>> 9) & 3;
129 imm
|= (lower_11
>> 6) & 4;
130 imm
|= (lower_11
>> 2) & 0x38;
131 imm
|= (lower_11
& 63) << 6;
139 /* 64-bit swizzles are super easy since there are 2 components of 2 components
140 * in an 8-bit field ... lots of duplication to go around!
142 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
143 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
144 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
145 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
146 * with rep. Pretty nifty, huh? */
149 mir_pack_swizzle_64(unsigned *swizzle
, unsigned max_component
)
153 for (unsigned i
= 0; i
< 2; ++i
) {
154 assert(swizzle
[i
] <= max_component
);
156 unsigned a
= (swizzle
[i
] & 1) ?
157 (COMPONENT_W
<< 2) | COMPONENT_Z
:
158 (COMPONENT_Y
<< 2) | COMPONENT_X
;
160 packed
|= a
<< (i
* 4);
167 mir_pack_mask_alu(midgard_instruction
*ins
)
169 unsigned effective
= ins
->mask
;
171 /* If we have a destination override, we need to figure out whether to
172 * override to the lower or upper half, shifting the effective mask in
173 * the latter, so AAAA.... becomes AAAA */
175 unsigned inst_size
= 8 << ins
->alu
.reg_mode
;
176 signed upper_shift
= mir_upper_override(ins
, inst_size
);
178 if (upper_shift
>= 0) {
179 effective
>>= upper_shift
;
180 ins
->alu
.dest_override
= upper_shift
?
181 midgard_dest_override_upper
:
182 midgard_dest_override_lower
;
184 ins
->alu
.dest_override
= midgard_dest_override_none
;
187 if (ins
->alu
.reg_mode
== midgard_reg_mode_32
)
188 ins
->alu
.mask
= expand_writemask(effective
, 2);
189 else if (ins
->alu
.reg_mode
== midgard_reg_mode_64
)
190 ins
->alu
.mask
= expand_writemask(effective
, 1);
192 ins
->alu
.mask
= effective
;
196 mir_pack_swizzle(unsigned mask
, unsigned *swizzle
,
197 nir_alu_type T
, midgard_reg_mode reg_mode
,
198 bool op_channeled
, bool *rep_low
, bool *rep_high
)
201 unsigned sz
= nir_alu_type_get_type_size(T
);
203 if (reg_mode
== midgard_reg_mode_64
) {
204 assert(sz
== 64 || sz
== 32);
205 unsigned components
= (sz
== 32) ? 4 : 2;
207 packed
= mir_pack_swizzle_64(swizzle
, components
);
210 bool lo
= swizzle
[0] >= COMPONENT_Z
;
211 bool hi
= swizzle
[1] >= COMPONENT_Z
;
214 /* We can't mix halves... */
222 } else if (sz
< 32) {
223 unreachable("Cannot encode 8/16 swizzle in 64-bit");
226 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
227 * the strategy is to check whether the nibble we're on is
228 * upper or lower. We need all components to be on the same
229 * "side"; that much is enforced by the ISA and should have
230 * been lowered. TODO: 8-bit packing. TODO: vec8 */
232 unsigned first
= mask
? ffs(mask
) - 1 : 0;
233 bool upper
= swizzle
[first
] > 3;
238 bool dest_up
= !op_channeled
&& (first
>= 4);
240 for (unsigned c
= (dest_up
? 4 : 0); c
< (dest_up
? 8 : 4); ++c
) {
241 unsigned v
= swizzle
[c
];
243 bool t_upper
= v
> 3;
245 /* Ensure we're doing something sane */
247 if (mask
& (1 << c
)) {
248 assert(t_upper
== upper
);
252 /* Use the non upper part */
255 packed
|= v
<< (2 * (c
% 4));
259 /* Replicate for now.. should really pick a side for
262 if (reg_mode
== midgard_reg_mode_16
&& sz
== 16) {
265 } else if (reg_mode
== midgard_reg_mode_16
&& sz
== 8) {
268 } else if (reg_mode
== midgard_reg_mode_32
) {
271 unreachable("Unhandled reg mode");
279 mir_pack_vector_srcs(midgard_instruction
*ins
)
281 bool channeled
= GET_CHANNEL_COUNT(alu_opcode_props
[ins
->alu
.op
].props
);
283 midgard_reg_mode mode
= ins
->alu
.reg_mode
;
284 unsigned base_size
= (8 << mode
);
286 for (unsigned i
= 0; i
< 2; ++i
) {
287 if (ins
->has_inline_constant
&& (i
== 1))
290 if (ins
->src
[i
] == ~0)
293 bool rep_lo
= false, rep_hi
= false;
294 unsigned sz
= nir_alu_type_get_type_size(ins
->src_types
[i
]);
295 bool half
= (sz
== (base_size
>> 1));
297 assert((sz
== base_size
) || half
);
299 unsigned swizzle
= mir_pack_swizzle(ins
->mask
, ins
->swizzle
[i
],
300 ins
->src_types
[i
], ins
->alu
.reg_mode
,
301 channeled
, &rep_lo
, &rep_hi
);
303 midgard_vector_alu_src pack
= {
304 .mod
= mir_pack_mod(ins
, i
, false),
311 unsigned p
= vector_alu_srco_unsigned(pack
);
321 mir_pack_swizzle_ldst(midgard_instruction
*ins
)
323 /* TODO: non-32-bit, non-vec4 */
324 for (unsigned c
= 0; c
< 4; ++c
) {
325 unsigned v
= ins
->swizzle
[0][c
];
330 ins
->load_store
.swizzle
|= v
<< (2 * c
);
337 mir_pack_swizzle_tex(midgard_instruction
*ins
)
339 for (unsigned i
= 0; i
< 2; ++i
) {
342 for (unsigned c
= 0; c
< 4; ++c
) {
343 unsigned v
= ins
->swizzle
[i
][c
];
348 packed
|= v
<< (2 * c
);
352 ins
->texture
.swizzle
= packed
;
354 ins
->texture
.in_reg_swizzle
= packed
;
357 /* TODO: bias component */
360 /* Up to 3 { ALU, LDST } bundles can execute in parallel with a texture op.
361 * Given a texture op, lookahead to see how many such bundles we can flag for
365 mir_can_run_ooo(midgard_block
*block
, midgard_bundle
*bundle
,
368 /* Don't read out of bounds */
369 if (bundle
>= (midgard_bundle
*) ((char *) block
->bundles
.data
+ block
->bundles
.size
))
372 /* Texture ops can't execute with other texture ops */
373 if (!IS_ALU(bundle
->tag
) && bundle
->tag
!= TAG_LOAD_STORE_4
)
376 /* Ensure there is no read-after-write dependency */
378 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
379 midgard_instruction
*ins
= bundle
->instructions
[i
];
381 mir_foreach_src(ins
, s
) {
382 if (ins
->src
[s
] == dependency
)
387 /* Otherwise, we're okay */
392 mir_pack_tex_ooo(midgard_block
*block
, midgard_bundle
*bundle
, midgard_instruction
*ins
)
396 for (count
= 0; count
< 3; ++count
) {
397 if (!mir_can_run_ooo(block
, bundle
+ count
+ 1, ins
->dest
))
401 ins
->texture
.out_of_order
= count
;
404 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
405 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
409 mir_pack_ldst_mask(midgard_instruction
*ins
)
411 unsigned sz
= nir_alu_type_get_type_size(ins
->dest_type
);
412 unsigned packed
= ins
->mask
;
415 packed
= ((ins
->mask
& 0x2) ? (0x8 | 0x4) : 0) |
416 ((ins
->mask
& 0x1) ? (0x2 | 0x1) : 0);
417 } else if (sz
== 16) {
420 for (unsigned i
= 0; i
< 4; ++i
) {
421 /* Make sure we're duplicated */
422 bool u
= (ins
->mask
& (1 << (2*i
+ 0))) != 0;
423 bool v
= (ins
->mask
& (1 << (2*i
+ 1))) != 0;
432 ins
->load_store
.mask
= packed
;
436 mir_lower_inverts(midgard_instruction
*ins
)
444 switch (ins
->alu
.op
) {
445 case midgard_alu_op_iand
:
446 /* a & ~b = iandnot(a, b) */
447 /* ~a & ~b = ~(a | b) = inor(a, b) */
449 if (inv
[0] && inv
[1])
450 ins
->alu
.op
= midgard_alu_op_inor
;
452 ins
->alu
.op
= midgard_alu_op_iandnot
;
455 case midgard_alu_op_ior
:
456 /* a | ~b = iornot(a, b) */
457 /* ~a | ~b = ~(a & b) = inand(a, b) */
459 if (inv
[0] && inv
[1])
460 ins
->alu
.op
= midgard_alu_op_inand
;
462 ins
->alu
.op
= midgard_alu_op_iornot
;
466 case midgard_alu_op_ixor
:
467 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
468 /* ~a ^ ~b = a ^ b */
471 ins
->alu
.op
= midgard_alu_op_inxor
;
481 emit_alu_bundle(compiler_context
*ctx
,
482 midgard_bundle
*bundle
,
483 struct util_dynarray
*emission
,
486 /* Emit the control word */
487 util_dynarray_append(emission
, uint32_t, bundle
->control
| lookahead
);
489 /* Next up, emit register words */
490 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
491 midgard_instruction
*ins
= bundle
->instructions
[i
];
493 /* Check if this instruction has registers */
494 if (ins
->compact_branch
) continue;
496 /* Otherwise, just emit the registers */
497 uint16_t reg_word
= 0;
498 memcpy(®_word
, &ins
->registers
, sizeof(uint16_t));
499 util_dynarray_append(emission
, uint16_t, reg_word
);
502 /* Now, we emit the body itself */
503 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
504 midgard_instruction
*ins
= bundle
->instructions
[i
];
506 /* Where is this body */
510 /* In case we demote to a scalar */
511 midgard_scalar_alu scalarized
;
513 if (!ins
->compact_branch
)
514 mir_lower_inverts(ins
);
516 if (ins
->unit
& UNITS_ANY_VECTOR
) {
517 mir_pack_mask_alu(ins
);
518 mir_pack_vector_srcs(ins
);
519 size
= sizeof(midgard_vector_alu
);
521 } else if (ins
->unit
== ALU_ENAB_BR_COMPACT
) {
522 size
= sizeof(midgard_branch_cond
);
523 source
= &ins
->br_compact
;
524 } else if (ins
->compact_branch
) { /* misnomer */
525 size
= sizeof(midgard_branch_extended
);
526 source
= &ins
->branch_extended
;
528 size
= sizeof(midgard_scalar_alu
);
529 scalarized
= vector_to_scalar_alu(ins
->alu
, ins
);
530 source
= &scalarized
;
533 memcpy(util_dynarray_grow_bytes(emission
, size
, 1), source
, size
);
536 /* Emit padding (all zero) */
537 memset(util_dynarray_grow_bytes(emission
, bundle
->padding
, 1), 0, bundle
->padding
);
539 /* Tack on constants */
541 if (bundle
->has_embedded_constants
)
542 util_dynarray_append(emission
, midgard_constants
, bundle
->constants
);
545 /* Shift applied to the immediate used as an offset. Probably this is papering
546 * over some other semantic distinction else well, but it unifies things in the
547 * compiler so I don't mind. */
550 mir_ldst_imm_shift(midgard_load_store_op op
)
552 if (OP_IS_UBO_READ(op
))
558 static enum mali_sampler_type
559 midgard_sampler_type(nir_alu_type t
) {
560 switch (nir_alu_type_get_base_type(t
))
563 return MALI_SAMPLER_FLOAT
;
565 return MALI_SAMPLER_SIGNED
;
567 return MALI_SAMPLER_UNSIGNED
;
569 unreachable("Unknown sampler type");
573 /* After everything is scheduled, emit whole bundles at a time */
576 emit_binary_bundle(compiler_context
*ctx
,
577 midgard_block
*block
,
578 midgard_bundle
*bundle
,
579 struct util_dynarray
*emission
,
582 int lookahead
= next_tag
<< 4;
584 switch (bundle
->tag
) {
593 emit_alu_bundle(ctx
, bundle
, emission
, lookahead
);
596 case TAG_LOAD_STORE_4
: {
597 /* One or two composing instructions */
599 uint64_t current64
, next64
= LDST_NOP
;
603 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
604 mir_pack_ldst_mask(bundle
->instructions
[i
]);
606 mir_pack_swizzle_ldst(bundle
->instructions
[i
]);
608 /* Apply a constant offset */
609 unsigned offset
= bundle
->instructions
[i
]->constants
.u32
[0];
612 unsigned shift
= mir_ldst_imm_shift(bundle
->instructions
[i
]->load_store
.op
);
613 unsigned upper_shift
= 10 - shift
;
615 bundle
->instructions
[i
]->load_store
.varying_parameters
|= (offset
& ((1 << upper_shift
) - 1)) << shift
;
616 bundle
->instructions
[i
]->load_store
.address
|= (offset
>> upper_shift
);
620 memcpy(¤t64
, &bundle
->instructions
[0]->load_store
, sizeof(current64
));
622 if (bundle
->instruction_count
== 2)
623 memcpy(&next64
, &bundle
->instructions
[1]->load_store
, sizeof(next64
));
625 midgard_load_store instruction
= {
627 .next_type
= next_tag
,
632 util_dynarray_append(emission
, midgard_load_store
, instruction
);
638 case TAG_TEXTURE_4_VTX
:
639 case TAG_TEXTURE_4_BARRIER
: {
640 /* Texture instructions are easy, since there is no pipelining
641 * nor VLIW to worry about. We may need to set .cont/.last
644 midgard_instruction
*ins
= bundle
->instructions
[0];
646 ins
->texture
.type
= bundle
->tag
;
647 ins
->texture
.next_type
= next_tag
;
649 /* Nothing else to pack for barriers */
650 if (ins
->texture
.op
== TEXTURE_OP_BARRIER
) {
651 ins
->texture
.cont
= ins
->texture
.last
= 1;
652 util_dynarray_append(emission
, midgard_texture_word
, ins
->texture
);
656 signed override
= mir_upper_override(ins
, 32);
658 ins
->texture
.mask
= override
> 0 ?
659 ins
->mask
>> override
:
662 mir_pack_swizzle_tex(ins
);
664 if (!(ctx
->quirks
& MIDGARD_NO_OOO
))
665 mir_pack_tex_ooo(block
, bundle
, ins
);
667 unsigned osz
= nir_alu_type_get_type_size(ins
->dest_type
);
668 unsigned isz
= nir_alu_type_get_type_size(ins
->src_types
[1]);
670 assert(osz
== 32 || osz
== 16);
671 assert(isz
== 32 || isz
== 16);
673 ins
->texture
.out_full
= (osz
== 32);
674 ins
->texture
.out_upper
= override
> 0;
675 ins
->texture
.in_reg_full
= (isz
== 32);
676 ins
->texture
.sampler_type
= midgard_sampler_type(ins
->dest_type
);
678 if (mir_op_computes_derivatives(ctx
->stage
, ins
->texture
.op
)) {
679 ins
->texture
.cont
= !ins
->helper_terminate
;
680 ins
->texture
.last
= ins
->helper_terminate
|| ins
->helper_execute
;
682 ins
->texture
.cont
= ins
->texture
.last
= 1;
685 util_dynarray_append(emission
, midgard_texture_word
, ins
->texture
);
690 unreachable("Unknown midgard instruction type\n");