pan/midgard: Pack load/store masks
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
30
31 static int
32 component_from_mask(unsigned mask)
33 {
34 for (int c = 0; c < 8; ++c) {
35 if (mask & (1 << c))
36 return c;
37 }
38
39 assert(0);
40 return 0;
41 }
42
43 static unsigned
44 vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
45 unsigned component)
46 {
47 midgard_vector_alu_src v;
48 memcpy(&v, &u, sizeof(v));
49
50 /* TODO: Integers */
51
52 midgard_scalar_alu_src s = { 0 };
53
54 if (is_full) {
55 /* For a 32-bit op, just check the source half flag */
56 s.full = !v.half;
57 } else if (!v.half) {
58 /* For a 16-bit op that's not subdivided, never full */
59 s.full = false;
60 } else {
61 /* We can't do 8-bit scalar, abort! */
62 assert(0);
63 }
64
65 /* Component indexing takes size into account */
66
67 if (s.full)
68 s.component = component << 1;
69 else
70 s.component = component;
71
72 if (is_int) {
73 /* TODO */
74 } else {
75 s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
76 s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
77 }
78
79 unsigned o;
80 memcpy(&o, &s, sizeof(s));
81
82 return o & ((1 << 6) - 1);
83 }
84
85 static midgard_scalar_alu
86 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
87 {
88 bool is_int = midgard_is_integer_op(v.op);
89 bool is_full = v.reg_mode == midgard_reg_mode_32;
90 bool is_inline_constant = ins->has_inline_constant;
91
92 unsigned comp = component_from_mask(ins->mask);
93
94 /* The output component is from the mask */
95 midgard_scalar_alu s = {
96 .op = v.op,
97 .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
98 .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
99 .unknown = 0,
100 .outmod = v.outmod,
101 .output_full = is_full,
102 .output_component = comp
103 };
104
105 /* Full components are physically spaced out */
106 if (is_full) {
107 assert(s.output_component < 4);
108 s.output_component <<= 1;
109 }
110
111 /* Inline constant is passed along rather than trying to extract it
112 * from v */
113
114 if (ins->has_inline_constant) {
115 uint16_t imm = 0;
116 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
117 imm |= (lower_11 >> 9) & 3;
118 imm |= (lower_11 >> 6) & 4;
119 imm |= (lower_11 >> 2) & 0x38;
120 imm |= (lower_11 & 63) << 6;
121
122 s.src2 = imm;
123 }
124
125 return s;
126 }
127
128 static void
129 mir_pack_swizzle_alu(midgard_instruction *ins)
130 {
131 midgard_vector_alu_src src[] = {
132 vector_alu_from_unsigned(ins->alu.src1),
133 vector_alu_from_unsigned(ins->alu.src2)
134 };
135
136 for (unsigned i = 0; i < 2; ++i) {
137 unsigned packed = 0;
138
139 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
140 * the strategy is to check whether the nibble we're on is
141 * upper or lower. We need all components to be on the same
142 * "side"; that much is enforced by the ISA and should have
143 * been lowered. TODO: 8-bit/64-bit packing. TODO: vec8 */
144
145 unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
146 bool upper = ins->swizzle[i][first] > 3;
147
148 if (upper && ins->mask)
149 assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
150
151 for (unsigned c = 0; c < 4; ++c) {
152 unsigned v = ins->swizzle[i][c];
153
154 bool t_upper = v > 3;
155
156 /* Ensure we're doing something sane */
157
158 if (ins->mask & (1 << c)) {
159 assert(t_upper == upper);
160 assert(v <= 7);
161 }
162
163 /* Use the non upper part */
164 v &= 0x3;
165
166 packed |= v << (2 * c);
167 }
168
169 src[i].swizzle = packed;
170 src[i].rep_high = upper;
171 }
172
173 ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
174
175 if (!ins->has_inline_constant)
176 ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
177 }
178
179 static void
180 mir_pack_swizzle_ldst(midgard_instruction *ins)
181 {
182 /* TODO: non-32-bit, non-vec4 */
183 for (unsigned c = 0; c < 4; ++c) {
184 unsigned v = ins->swizzle[0][c];
185
186 /* Check vec4 */
187 assert(v <= 3);
188
189 ins->load_store.swizzle |= v << (2 * c);
190 }
191
192 /* TODO: arg_1/2 */
193 }
194
195 static void
196 mir_pack_swizzle_tex(midgard_instruction *ins)
197 {
198 for (unsigned i = 0; i < 2; ++i) {
199 unsigned packed = 0;
200
201 for (unsigned c = 0; c < 4; ++c) {
202 unsigned v = ins->swizzle[i][c];
203
204 /* Check vec4 */
205 assert(v <= 3);
206
207 packed |= v << (2 * c);
208 }
209
210 if (i == 0)
211 ins->texture.swizzle = packed;
212 else
213 ins->texture.in_reg_swizzle = packed;
214 }
215
216 /* TODO: bias component */
217 }
218
219 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
220 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
221 */
222
223 static void
224 mir_pack_ldst_mask(midgard_instruction *ins)
225 {
226 midgard_reg_mode mode = mir_typesize(ins);
227 unsigned packed = ins->mask;
228
229 if (mode == midgard_reg_mode_64) {
230 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
231 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
232 } else if (mode == midgard_reg_mode_16) {
233 packed = 0;
234
235 for (unsigned i = 0; i < 4; ++i) {
236 /* Make sure we're duplicated */
237 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
238 bool v = (ins->mask & (1 << (2*i + 1))) != 0;
239 assert(u == v);
240
241 packed |= (u << i);
242 }
243 }
244
245 ins->load_store.mask = packed;
246 }
247
248 static void
249 emit_alu_bundle(compiler_context *ctx,
250 midgard_bundle *bundle,
251 struct util_dynarray *emission,
252 unsigned lookahead)
253 {
254 /* Emit the control word */
255 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
256
257 /* Next up, emit register words */
258 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
259 midgard_instruction *ins = bundle->instructions[i];
260
261 /* Check if this instruction has registers */
262 if (ins->compact_branch || ins->prepacked_branch) continue;
263
264 /* Otherwise, just emit the registers */
265 uint16_t reg_word = 0;
266 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
267 util_dynarray_append(emission, uint16_t, reg_word);
268 }
269
270 /* Now, we emit the body itself */
271 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
272 midgard_instruction *ins = bundle->instructions[i];
273
274 /* Where is this body */
275 unsigned size = 0;
276 void *source = NULL;
277
278 /* In case we demote to a scalar */
279 midgard_scalar_alu scalarized;
280
281 if (ins->unit & UNITS_ANY_VECTOR) {
282 if (ins->alu.reg_mode == midgard_reg_mode_32)
283 ins->alu.mask = expand_writemask_32(ins->mask);
284 else
285 ins->alu.mask = ins->mask;
286
287 mir_pack_swizzle_alu(ins);
288 size = sizeof(midgard_vector_alu);
289 source = &ins->alu;
290 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
291 size = sizeof(midgard_branch_cond);
292 source = &ins->br_compact;
293 } else if (ins->compact_branch) { /* misnomer */
294 size = sizeof(midgard_branch_extended);
295 source = &ins->branch_extended;
296 } else {
297 size = sizeof(midgard_scalar_alu);
298 scalarized = vector_to_scalar_alu(ins->alu, ins);
299 source = &scalarized;
300 }
301
302 memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
303 }
304
305 /* Emit padding (all zero) */
306 memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
307
308 /* Tack on constants */
309
310 if (bundle->has_embedded_constants) {
311 util_dynarray_append(emission, float, bundle->constants[0]);
312 util_dynarray_append(emission, float, bundle->constants[1]);
313 util_dynarray_append(emission, float, bundle->constants[2]);
314 util_dynarray_append(emission, float, bundle->constants[3]);
315 }
316 }
317
318 /* After everything is scheduled, emit whole bundles at a time */
319
320 void
321 emit_binary_bundle(compiler_context *ctx,
322 midgard_bundle *bundle,
323 struct util_dynarray *emission,
324 int next_tag)
325 {
326 int lookahead = next_tag << 4;
327
328 switch (bundle->tag) {
329 case TAG_ALU_4:
330 case TAG_ALU_8:
331 case TAG_ALU_12:
332 case TAG_ALU_16:
333 emit_alu_bundle(ctx, bundle, emission, lookahead);
334 break;
335
336 case TAG_LOAD_STORE_4: {
337 /* One or two composing instructions */
338
339 uint64_t current64, next64 = LDST_NOP;
340
341 /* Copy masks */
342
343 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
344 mir_pack_ldst_mask(bundle->instructions[i]);
345
346 mir_pack_swizzle_ldst(bundle->instructions[i]);
347 }
348
349 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
350
351 if (bundle->instruction_count == 2)
352 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
353
354 midgard_load_store instruction = {
355 .type = bundle->tag,
356 .next_type = next_tag,
357 .word1 = current64,
358 .word2 = next64
359 };
360
361 util_dynarray_append(emission, midgard_load_store, instruction);
362
363 break;
364 }
365
366 case TAG_TEXTURE_4:
367 case TAG_TEXTURE_4_VTX: {
368 /* Texture instructions are easy, since there is no pipelining
369 * nor VLIW to worry about. We may need to set .cont/.last
370 * flags. */
371
372 midgard_instruction *ins = bundle->instructions[0];
373
374 ins->texture.type = bundle->tag;
375 ins->texture.next_type = next_tag;
376 ins->texture.mask = ins->mask;
377 mir_pack_swizzle_tex(ins);
378
379 ctx->texture_op_count--;
380
381 if (mir_op_computes_derivatives(ins->texture.op)) {
382 bool continues = ctx->texture_op_count > 0;
383
384 /* Control flow complicates helper invocation
385 * lifespans, so for now just keep helper threads
386 * around indefinitely with loops. TODO: Proper
387 * analysis */
388 continues |= ctx->loop_count > 0;
389
390 ins->texture.cont = continues;
391 ins->texture.last = !continues;
392 } else {
393 ins->texture.cont = ins->texture.last = 1;
394 }
395
396 util_dynarray_append(emission, midgard_texture_word, ins->texture);
397 break;
398 }
399
400 default:
401 unreachable("Unknown midgard instruction type\n");
402 }
403 }