2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "midgard_ops.h"
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
32 component_from_mask(unsigned mask
)
34 for (int c
= 0; c
< 8; ++c
) {
44 vector_to_scalar_source(unsigned u
, bool is_int
, bool is_full
,
45 unsigned masked_component
)
47 midgard_vector_alu_src v
;
48 memcpy(&v
, &u
, sizeof(v
));
52 unsigned component
= (v
.swizzle
>> (2*masked_component
)) & 3;
53 bool upper
= false; /* TODO */
55 midgard_scalar_alu_src s
= { 0 };
58 /* For a 32-bit op, just check the source half flag */
61 /* For a 16-bit op that's not subdivided, never full */
64 /* We can't do 8-bit scalar, abort! */
68 /* Component indexing takes size into account */
71 s
.component
= component
<< 1;
73 s
.component
= component
+ (upper
<< 2);
78 s
.abs
= v
.mod
& MIDGARD_FLOAT_MOD_ABS
;
79 s
.negate
= v
.mod
& MIDGARD_FLOAT_MOD_NEG
;
83 memcpy(&o
, &s
, sizeof(s
));
85 return o
& ((1 << 6) - 1);
88 static midgard_scalar_alu
89 vector_to_scalar_alu(midgard_vector_alu v
, midgard_instruction
*ins
)
91 bool is_int
= midgard_is_integer_op(v
.op
);
92 bool is_full
= v
.reg_mode
== midgard_reg_mode_32
;
93 bool is_inline_constant
= ins
->ssa_args
.inline_constant
;
95 unsigned comp
= component_from_mask(ins
->mask
);
97 /* The output component is from the mask */
98 midgard_scalar_alu s
= {
100 .src1
= vector_to_scalar_source(v
.src1
, is_int
, is_full
, comp
),
101 .src2
= !is_inline_constant
? vector_to_scalar_source(v
.src2
, is_int
, is_full
, comp
) : 0,
104 .output_full
= is_full
,
105 .output_component
= comp
108 /* Full components are physically spaced out */
110 assert(s
.output_component
< 4);
111 s
.output_component
<<= 1;
114 /* Inline constant is passed along rather than trying to extract it
117 if (ins
->ssa_args
.inline_constant
) {
119 int lower_11
= ins
->inline_constant
& ((1 << 12) - 1);
120 imm
|= (lower_11
>> 9) & 3;
121 imm
|= (lower_11
>> 6) & 4;
122 imm
|= (lower_11
>> 2) & 0x38;
123 imm
|= (lower_11
& 63) << 6;
132 emit_alu_bundle(compiler_context
*ctx
,
133 midgard_bundle
*bundle
,
134 struct util_dynarray
*emission
,
137 /* Emit the control word */
138 util_dynarray_append(emission
, uint32_t, bundle
->control
| lookahead
);
140 /* Next up, emit register words */
141 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
142 midgard_instruction
*ins
= bundle
->instructions
[i
];
144 /* Check if this instruction has registers */
145 if (ins
->compact_branch
|| ins
->prepacked_branch
) continue;
147 /* Otherwise, just emit the registers */
148 uint16_t reg_word
= 0;
149 memcpy(®_word
, &ins
->registers
, sizeof(uint16_t));
150 util_dynarray_append(emission
, uint16_t, reg_word
);
153 /* Now, we emit the body itself */
154 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
155 midgard_instruction
*ins
= bundle
->instructions
[i
];
157 /* Where is this body */
161 /* In case we demote to a scalar */
162 midgard_scalar_alu scalarized
;
164 if (ins
->unit
& UNITS_ANY_VECTOR
) {
165 if (ins
->alu
.reg_mode
== midgard_reg_mode_32
)
166 ins
->alu
.mask
= expand_writemask_32(ins
->mask
);
168 ins
->alu
.mask
= ins
->mask
;
170 size
= sizeof(midgard_vector_alu
);
172 } else if (ins
->unit
== ALU_ENAB_BR_COMPACT
) {
173 size
= sizeof(midgard_branch_cond
);
174 source
= &ins
->br_compact
;
175 } else if (ins
->compact_branch
) { /* misnomer */
176 size
= sizeof(midgard_branch_extended
);
177 source
= &ins
->branch_extended
;
179 size
= sizeof(midgard_scalar_alu
);
180 scalarized
= vector_to_scalar_alu(ins
->alu
, ins
);
181 source
= &scalarized
;
184 memcpy(util_dynarray_grow_bytes(emission
, 1, size
), source
, size
);
187 /* Emit padding (all zero) */
188 memset(util_dynarray_grow_bytes(emission
, 1, bundle
->padding
), 0, bundle
->padding
);
190 /* Tack on constants */
192 if (bundle
->has_embedded_constants
) {
193 util_dynarray_append(emission
, float, bundle
->constants
[0]);
194 util_dynarray_append(emission
, float, bundle
->constants
[1]);
195 util_dynarray_append(emission
, float, bundle
->constants
[2]);
196 util_dynarray_append(emission
, float, bundle
->constants
[3]);
200 /* After everything is scheduled, emit whole bundles at a time */
203 emit_binary_bundle(compiler_context
*ctx
,
204 midgard_bundle
*bundle
,
205 struct util_dynarray
*emission
,
208 int lookahead
= next_tag
<< 4;
210 switch (bundle
->tag
) {
215 emit_alu_bundle(ctx
, bundle
, emission
, lookahead
);
218 case TAG_LOAD_STORE_4
: {
219 /* One or two composing instructions */
221 uint64_t current64
, next64
= LDST_NOP
;
225 for (unsigned i
= 0; i
< bundle
->instruction_count
; ++i
) {
226 bundle
->instructions
[i
]->load_store
.mask
=
227 bundle
->instructions
[i
]->mask
;
230 memcpy(¤t64
, &bundle
->instructions
[0]->load_store
, sizeof(current64
));
232 if (bundle
->instruction_count
== 2)
233 memcpy(&next64
, &bundle
->instructions
[1]->load_store
, sizeof(next64
));
235 midgard_load_store instruction
= {
237 .next_type
= next_tag
,
242 util_dynarray_append(emission
, midgard_load_store
, instruction
);
248 case TAG_TEXTURE_4_VTX
: {
249 /* Texture instructions are easy, since there is no pipelining
250 * nor VLIW to worry about. We may need to set .cont/.last
253 midgard_instruction
*ins
= bundle
->instructions
[0];
255 ins
->texture
.type
= bundle
->tag
;
256 ins
->texture
.next_type
= next_tag
;
257 ins
->texture
.mask
= ins
->mask
;
259 ctx
->texture_op_count
--;
261 if (mir_op_computes_derivatives(ins
->texture
.op
)) {
262 bool continues
= ctx
->texture_op_count
> 0;
264 /* Control flow complicates helper invocation
265 * lifespans, so for now just keep helper threads
266 * around indefinitely with loops. TODO: Proper
268 continues
|= ctx
->loop_count
> 0;
270 ins
->texture
.cont
= continues
;
271 ins
->texture
.last
= !continues
;
273 ins
->texture
.cont
= ins
->texture
.last
= 1;
276 util_dynarray_append(emission
, midgard_texture_word
, ins
->texture
);
281 unreachable("Unknown midgard instruction type\n");