2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
32 component_from_mask(unsigned mask
)
34 for (int c
= 0; c
< 8; ++c
) {
44 vector_to_scalar_source(unsigned u
, bool is_int
, bool is_full
,
47 midgard_vector_alu_src v
;
48 memcpy(&v
, &u
, sizeof(v
));
52 midgard_scalar_alu_src s
= { 0 };
55 /* For a 32-bit op, just check the source half flag */
58 /* For a 16-bit op that's not subdivided, never full */
61 /* We can't do 8-bit scalar, abort! */
65 /* Component indexing takes size into account */
68 s
.component
= component
<< 1;
70 s
.component
= component
;
75 s
.abs
= v
.mod
& MIDGARD_FLOAT_MOD_ABS
;
76 s
.negate
= v
.mod
& MIDGARD_FLOAT_MOD_NEG
;
80 memcpy(&o
, &s
, sizeof(s
));
82 return o
& ((1 << 6) - 1);
85 static midgard_scalar_alu
86 vector_to_scalar_alu(midgard_vector_alu v
, midgard_instruction
*ins
)
88 bool is_int
= midgard_is_integer_op(v
.op
);
89 bool is_full
= v
.reg_mode
== midgard_reg_mode_32
;
90 bool is_inline_constant
= ins
->has_inline_constant
;
92 unsigned comp
= component_from_mask(ins
->mask
);
94 /* The output component is from the mask */
95 midgard_scalar_alu s
= {
97 .src1
= vector_to_scalar_source(v
.src1
, is_int
, is_full
, ins
->swizzle
[0][comp
]),
98 .src2
= !is_inline_constant
? vector_to_scalar_source(v
.src2
, is_int
, is_full
, ins
->swizzle
[1][comp
]) : 0,
101 .output_full
= is_full
,
102 .output_component
= comp
105 /* Full components are physically spaced out */
107 assert(s
.output_component
< 4);
108 s
.output_component
<<= 1;
111 /* Inline constant is passed along rather than trying to extract it
114 if (ins
->has_inline_constant
) {
116 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
117 imm
|= (lower_11
>> 9) & 3;
118 imm
|= (lower_11
>> 6) & 4;
119 imm
|= (lower_11
>> 2) & 0x38;
120 imm
|= (lower_11
& 63) << 6;
128 /* 64-bit swizzles are super easy since there are 2 components of 2 components
129 * in an 8-bit field ... lots of duplication to go around!
131 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
132 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
133 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
134 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
135 * with rep. Pretty nifty, huh? */
138 mir_pack_swizzle_64(unsigned *swizzle
, unsigned max_component
)
142 for (unsigned i
= 0; i
< 2; ++i
) {
143 assert(swizzle
[i
] <= max_component
);
145 unsigned a
= (swizzle
[i
] & 1) ?
146 (COMPONENT_W
<< 2) | COMPONENT_Z
:
147 (COMPONENT_Y
<< 2) | COMPONENT_X
;
149 packed
|= a
<< (i
* 4);
156 mir_pack_mask_alu(midgard_instruction
*ins
)
158 unsigned effective
= ins
->mask
;
160 /* If we have a destination override, we need to figure out whether to
161 * override to the lower or upper half, shifting the effective mask in
162 * the latter, so AAAA.... becomes AAAA */
164 unsigned upper_shift
= mir_upper_override(ins
);
167 effective
>>= upper_shift
;
168 ins
->alu
.dest_override
= midgard_dest_override_upper
;
171 if (ins
->alu
.reg_mode
== midgard_reg_mode_32
)
172 ins
->alu
.mask
= expand_writemask(effective
, 4);
173 else if (ins
->alu
.reg_mode
== midgard_reg_mode_64
)
174 ins
->alu
.mask
= expand_writemask(effective
, 2);
176 ins
->alu
.mask
= effective
;
180 mir_pack_swizzle_alu(midgard_instruction
*ins
)
182 midgard_vector_alu_src src
[] = {
183 vector_alu_from_unsigned(ins
->alu
.src1
),
184 vector_alu_from_unsigned(ins
->alu
.src2
)
187 for (unsigned i
= 0; i
< 2; ++i
) {
190 if (ins
->alu
.reg_mode
== midgard_reg_mode_64
) {
191 midgard_reg_mode mode
= mir_srcsize(ins
, i
);
192 unsigned components
= 16 / mir_bytes_for_mode(mode
);
194 packed
= mir_pack_swizzle_64(ins
->swizzle
[i
], components
);
196 if (mode
== midgard_reg_mode_32
) {
197 src
[i
].rep_low
|= (ins
->swizzle
[i
][0] >= COMPONENT_Z
);
198 src
[i
].rep_high
|= (ins
->swizzle
[i
][1] >= COMPONENT_Z
);
199 } else if (mode
< midgard_reg_mode_32
) {
200 unreachable("Cannot encode 8/16 swizzle in 64-bit");
203 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
204 * the strategy is to check whether the nibble we're on is
205 * upper or lower. We need all components to be on the same
206 * "side"; that much is enforced by the ISA and should have
207 * been lowered. TODO: 8-bit packing. TODO: vec8 */
209 unsigned first
= ins
->mask
? ffs(ins
->mask
) - 1 : 0;
210 bool upper
= ins
->swizzle
[i
][first
] > 3;
212 if (upper
&& ins
->mask
)
213 assert(mir_srcsize(ins
, i
) <= midgard_reg_mode_16
);
215 for (unsigned c
= 0; c
< 4; ++c
) {
216 unsigned v
= ins
->swizzle
[i
][c
];
218 bool t_upper
= v
> 3;
220 /* Ensure we're doing something sane */
222 if (ins
->mask
& (1 << c
)) {
223 assert(t_upper
== upper
);
227 /* Use the non upper part */
230 packed
|= v
<< (2 * c
);
233 src
[i
].rep_high
= upper
;
236 src
[i
].swizzle
= packed
;
239 ins
->alu
.src1
= vector_alu_srco_unsigned(src
[0]);
241 if (!ins
->has_inline_constant
)
242 ins
->alu
.src2
= vector_alu_srco_unsigned(src
[1]);
246 mir_pack_swizzle_ldst(midgard_instruction
*ins
)
248 /* TODO: non-32-bit, non-vec4 */
249 for (unsigned c
= 0; c
< 4; ++c
) {
250 unsigned v
= ins
->swizzle
[0][c
];
255 ins
->load_store
.swizzle
|= v
<< (2 * c
);
262 mir_pack_swizzle_tex(midgard_instruction
*ins
)
264 for (unsigned i
= 0; i
< 2; ++i
) {
267 for (unsigned c
= 0; c
< 4; ++c
) {
268 unsigned v
= ins
->swizzle
[i
][c
];
273 packed
|= v
<< (2 * c
);
277 ins
->texture
.swizzle
= packed
;
279 ins
->texture
.in_reg_swizzle
= packed
;
282 /* TODO: bias component */
285 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
286 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
290 mir_pack_ldst_mask(midgard_instruction
*ins
)
292 midgard_reg_mode mode
= mir_typesize(ins
);
293 unsigned packed
= ins
->mask
;
295 if (mode
== midgard_reg_mode_64
) {
296 packed
= ((ins
->mask
& 0x2) ? (0x8 | 0x4) : 0) |
297 ((ins
->mask
& 0x1) ? (0x2 | 0x1) : 0);
298 } else if (mode
== midgard_reg_mode_16
) {
301 for (unsigned i
= 0; i
< 4; ++i
) {
302 /* Make sure we're duplicated */
303 bool u
= (ins
->mask
& (1 << (2*i
+ 0))) != 0;
304 bool v
= (ins
->mask
& (1 << (2*i
+ 1))) != 0;
311 ins
->load_store
.mask
= packed
;
315 emit_alu_bundle(compiler_context
*ctx
,
316 midgard_bundle
*bundle
,
317 struct util_dynarray
*emission
,
320 /* Emit the control word */
321 util_dynarray_append(emission
, uint32_t, bundle
->control
| lookahead
);
323 /* Next up, emit register words */
324 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
325 midgard_instruction
*ins
= bundle
->instructions
[i
];
327 /* Check if this instruction has registers */
328 if (ins
->compact_branch
) continue;
330 /* Otherwise, just emit the registers */
331 uint16_t reg_word
= 0;
332 memcpy(®_word
, &ins
->registers
, sizeof(uint16_t));
333 util_dynarray_append(emission
, uint16_t, reg_word
);
336 /* Now, we emit the body itself */
337 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
338 midgard_instruction
*ins
= bundle
->instructions
[i
];
340 /* Where is this body */
344 /* In case we demote to a scalar */
345 midgard_scalar_alu scalarized
;
347 if (ins
->unit
& UNITS_ANY_VECTOR
) {
348 mir_pack_mask_alu(ins
);
349 mir_pack_swizzle_alu(ins
);
350 size
= sizeof(midgard_vector_alu
);
352 } else if (ins
->unit
== ALU_ENAB_BR_COMPACT
) {
353 size
= sizeof(midgard_branch_cond
);
354 source
= &ins
->br_compact
;
355 } else if (ins
->compact_branch
) { /* misnomer */
356 size
= sizeof(midgard_branch_extended
);
357 source
= &ins
->branch_extended
;
359 size
= sizeof(midgard_scalar_alu
);
360 scalarized
= vector_to_scalar_alu(ins
->alu
, ins
);
361 source
= &scalarized
;
364 memcpy(util_dynarray_grow_bytes(emission
, 1, size
), source
, size
);
367 /* Emit padding (all zero) */
368 memset(util_dynarray_grow_bytes(emission
, 1, bundle
->padding
), 0, bundle
->padding
);
370 /* Tack on constants */
372 if (bundle
->has_embedded_constants
)
373 util_dynarray_append(emission
, midgard_constants
, bundle
->constants
);
376 /* Shift applied to the immediate used as an offset. Probably this is papering
377 * over some other semantic distinction else well, but it unifies things in the
378 * compiler so I don't mind. */
381 mir_ldst_imm_shift(midgard_load_store_op op
)
383 if (OP_IS_UBO_READ(op
))
389 /* After everything is scheduled, emit whole bundles at a time */
392 emit_binary_bundle(compiler_context
*ctx
,
393 midgard_bundle
*bundle
,
394 struct util_dynarray
*emission
,
397 int lookahead
= next_tag
<< 4;
399 switch (bundle
->tag
) {
408 emit_alu_bundle(ctx
, bundle
, emission
, lookahead
);
411 case TAG_LOAD_STORE_4
: {
412 /* One or two composing instructions */
414 uint64_t current64
, next64
= LDST_NOP
;
418 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
419 mir_pack_ldst_mask(bundle
->instructions
[i
]);
421 mir_pack_swizzle_ldst(bundle
->instructions
[i
]);
423 /* Apply a constant offset */
424 unsigned offset
= bundle
->instructions
[i
]->constants
.u32
[0];
427 unsigned shift
= mir_ldst_imm_shift(bundle
->instructions
[i
]->load_store
.op
);
428 unsigned upper_shift
= 10 - shift
;
430 bundle
->instructions
[i
]->load_store
.varying_parameters
|= (offset
& ((1 << upper_shift
) - 1)) << shift
;
431 bundle
->instructions
[i
]->load_store
.address
|= (offset
>> upper_shift
);
435 memcpy(¤t64
, &bundle
->instructions
[0]->load_store
, sizeof(current64
));
437 if (bundle
->instruction_count
== 2)
438 memcpy(&next64
, &bundle
->instructions
[1]->load_store
, sizeof(next64
));
440 midgard_load_store instruction
= {
442 .next_type
= next_tag
,
447 util_dynarray_append(emission
, midgard_load_store
, instruction
);
453 case TAG_TEXTURE_4_VTX
: {
454 /* Texture instructions are easy, since there is no pipelining
455 * nor VLIW to worry about. We may need to set .cont/.last
458 midgard_instruction
*ins
= bundle
->instructions
[0];
460 ins
->texture
.type
= bundle
->tag
;
461 ins
->texture
.next_type
= next_tag
;
462 ins
->texture
.mask
= ins
->mask
;
463 mir_pack_swizzle_tex(ins
);
465 ctx
->texture_op_count
--;
467 if (mir_op_computes_derivatives(ctx
->stage
, ins
->texture
.op
)) {
468 bool continues
= ctx
->texture_op_count
> 0;
470 /* Control flow complicates helper invocation
471 * lifespans, so for now just keep helper threads
472 * around indefinitely with loops. TODO: Proper
474 continues
|= ctx
->loop_count
> 0;
476 ins
->texture
.cont
= continues
;
477 ins
->texture
.last
= !continues
;
479 ins
->texture
.cont
= ins
->texture
.last
= 1;
482 util_dynarray_append(emission
, midgard_texture_word
, ins
->texture
);
487 unreachable("Unknown midgard instruction type\n");