pan/midgard: Fix 32/64 mixed swizzle packing
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
30
31 static int
32 component_from_mask(unsigned mask)
33 {
34 for (int c = 0; c < 8; ++c) {
35 if (mask & (1 << c))
36 return c;
37 }
38
39 assert(0);
40 return 0;
41 }
42
43 static unsigned
44 vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
45 unsigned component)
46 {
47 midgard_vector_alu_src v;
48 memcpy(&v, &u, sizeof(v));
49
50 /* TODO: Integers */
51
52 midgard_scalar_alu_src s = { 0 };
53
54 if (is_full) {
55 /* For a 32-bit op, just check the source half flag */
56 s.full = !v.half;
57 } else if (!v.half) {
58 /* For a 16-bit op that's not subdivided, never full */
59 s.full = false;
60 } else {
61 /* We can't do 8-bit scalar, abort! */
62 assert(0);
63 }
64
65 /* Component indexing takes size into account */
66
67 if (s.full)
68 s.component = component << 1;
69 else
70 s.component = component;
71
72 if (is_int) {
73 /* TODO */
74 } else {
75 s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
76 s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
77 }
78
79 unsigned o;
80 memcpy(&o, &s, sizeof(s));
81
82 return o & ((1 << 6) - 1);
83 }
84
85 static midgard_scalar_alu
86 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
87 {
88 bool is_int = midgard_is_integer_op(v.op);
89 bool is_full = v.reg_mode == midgard_reg_mode_32;
90 bool is_inline_constant = ins->has_inline_constant;
91
92 unsigned comp = component_from_mask(ins->mask);
93
94 /* The output component is from the mask */
95 midgard_scalar_alu s = {
96 .op = v.op,
97 .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
98 .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
99 .unknown = 0,
100 .outmod = v.outmod,
101 .output_full = is_full,
102 .output_component = comp
103 };
104
105 /* Full components are physically spaced out */
106 if (is_full) {
107 assert(s.output_component < 4);
108 s.output_component <<= 1;
109 }
110
111 /* Inline constant is passed along rather than trying to extract it
112 * from v */
113
114 if (ins->has_inline_constant) {
115 uint16_t imm = 0;
116 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
117 imm |= (lower_11 >> 9) & 3;
118 imm |= (lower_11 >> 6) & 4;
119 imm |= (lower_11 >> 2) & 0x38;
120 imm |= (lower_11 & 63) << 6;
121
122 s.src2 = imm;
123 }
124
125 return s;
126 }
127
128 /* 64-bit swizzles are super easy since there are 2 components of 2 components
129 * in an 8-bit field ... lots of duplication to go around!
130 *
131 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
132 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
133 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
134 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
135 * with rep. Pretty nifty, huh? */
136
137 static unsigned
138 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
139 {
140 unsigned packed = 0;
141
142 for (unsigned i = 0; i < 2; ++i) {
143 assert(swizzle[i] <= max_component);
144
145 unsigned a = (swizzle[i] & 1) ?
146 (COMPONENT_W << 2) | COMPONENT_Z :
147 (COMPONENT_Y << 2) | COMPONENT_X;
148
149 packed |= a << (i * 4);
150 }
151
152 return packed;
153 }
154
155 static void
156 mir_pack_mask_alu(midgard_instruction *ins)
157 {
158 unsigned effective = ins->mask;
159
160 /* If we have a destination override, we need to figure out whether to
161 * override to the lower or upper half, shifting the effective mask in
162 * the latter, so AAAA.... becomes AAAA */
163
164 unsigned upper_shift = mir_upper_override(ins);
165
166 if (upper_shift) {
167 effective >>= upper_shift;
168 ins->alu.dest_override = midgard_dest_override_upper;
169 }
170
171 if (ins->alu.reg_mode == midgard_reg_mode_32)
172 ins->alu.mask = expand_writemask(effective, 4);
173 else if (ins->alu.reg_mode == midgard_reg_mode_64)
174 ins->alu.mask = expand_writemask(effective, 2);
175 else
176 ins->alu.mask = effective;
177 }
178
179 static void
180 mir_pack_swizzle_alu(midgard_instruction *ins)
181 {
182 midgard_vector_alu_src src[] = {
183 vector_alu_from_unsigned(ins->alu.src1),
184 vector_alu_from_unsigned(ins->alu.src2)
185 };
186
187 for (unsigned i = 0; i < 2; ++i) {
188 unsigned packed = 0;
189
190 if (ins->alu.reg_mode == midgard_reg_mode_64) {
191 midgard_reg_mode mode = mir_srcsize(ins, i);
192 unsigned components = 16 / mir_bytes_for_mode(mode);
193
194 packed = mir_pack_swizzle_64(ins->swizzle[i], components);
195
196 if (mode == midgard_reg_mode_32) {
197 bool lo = ins->swizzle[i][0] >= COMPONENT_Z;
198 bool hi = ins->swizzle[i][1] >= COMPONENT_Z;
199
200 /* TODO: can we mix halves? */
201 assert(lo == hi);
202
203 src[i].rep_low |= lo;
204 } else if (mode < midgard_reg_mode_32) {
205 unreachable("Cannot encode 8/16 swizzle in 64-bit");
206 }
207 } else {
208 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
209 * the strategy is to check whether the nibble we're on is
210 * upper or lower. We need all components to be on the same
211 * "side"; that much is enforced by the ISA and should have
212 * been lowered. TODO: 8-bit packing. TODO: vec8 */
213
214 unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
215 bool upper = ins->swizzle[i][first] > 3;
216
217 if (upper && ins->mask)
218 assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
219
220 for (unsigned c = 0; c < 4; ++c) {
221 unsigned v = ins->swizzle[i][c];
222
223 bool t_upper = v > 3;
224
225 /* Ensure we're doing something sane */
226
227 if (ins->mask & (1 << c)) {
228 assert(t_upper == upper);
229 assert(v <= 7);
230 }
231
232 /* Use the non upper part */
233 v &= 0x3;
234
235 packed |= v << (2 * c);
236 }
237
238 src[i].rep_high = upper;
239 }
240
241 src[i].swizzle = packed;
242 }
243
244 ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
245
246 if (!ins->has_inline_constant)
247 ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
248 }
249
250 static void
251 mir_pack_swizzle_ldst(midgard_instruction *ins)
252 {
253 /* TODO: non-32-bit, non-vec4 */
254 for (unsigned c = 0; c < 4; ++c) {
255 unsigned v = ins->swizzle[0][c];
256
257 /* Check vec4 */
258 assert(v <= 3);
259
260 ins->load_store.swizzle |= v << (2 * c);
261 }
262
263 /* TODO: arg_1/2 */
264 }
265
266 static void
267 mir_pack_swizzle_tex(midgard_instruction *ins)
268 {
269 for (unsigned i = 0; i < 2; ++i) {
270 unsigned packed = 0;
271
272 for (unsigned c = 0; c < 4; ++c) {
273 unsigned v = ins->swizzle[i][c];
274
275 /* Check vec4 */
276 assert(v <= 3);
277
278 packed |= v << (2 * c);
279 }
280
281 if (i == 0)
282 ins->texture.swizzle = packed;
283 else
284 ins->texture.in_reg_swizzle = packed;
285 }
286
287 /* TODO: bias component */
288 }
289
290 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
291 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
292 */
293
294 static void
295 mir_pack_ldst_mask(midgard_instruction *ins)
296 {
297 midgard_reg_mode mode = mir_typesize(ins);
298 unsigned packed = ins->mask;
299
300 if (mode == midgard_reg_mode_64) {
301 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
302 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
303 } else if (mode == midgard_reg_mode_16) {
304 packed = 0;
305
306 for (unsigned i = 0; i < 4; ++i) {
307 /* Make sure we're duplicated */
308 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
309 bool v = (ins->mask & (1 << (2*i + 1))) != 0;
310 assert(u == v);
311
312 packed |= (u << i);
313 }
314 }
315
316 ins->load_store.mask = packed;
317 }
318
319 static void
320 emit_alu_bundle(compiler_context *ctx,
321 midgard_bundle *bundle,
322 struct util_dynarray *emission,
323 unsigned lookahead)
324 {
325 /* Emit the control word */
326 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
327
328 /* Next up, emit register words */
329 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
330 midgard_instruction *ins = bundle->instructions[i];
331
332 /* Check if this instruction has registers */
333 if (ins->compact_branch) continue;
334
335 /* Otherwise, just emit the registers */
336 uint16_t reg_word = 0;
337 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
338 util_dynarray_append(emission, uint16_t, reg_word);
339 }
340
341 /* Now, we emit the body itself */
342 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
343 midgard_instruction *ins = bundle->instructions[i];
344
345 /* Where is this body */
346 unsigned size = 0;
347 void *source = NULL;
348
349 /* In case we demote to a scalar */
350 midgard_scalar_alu scalarized;
351
352 if (ins->unit & UNITS_ANY_VECTOR) {
353 mir_pack_mask_alu(ins);
354 mir_pack_swizzle_alu(ins);
355 size = sizeof(midgard_vector_alu);
356 source = &ins->alu;
357 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
358 size = sizeof(midgard_branch_cond);
359 source = &ins->br_compact;
360 } else if (ins->compact_branch) { /* misnomer */
361 size = sizeof(midgard_branch_extended);
362 source = &ins->branch_extended;
363 } else {
364 size = sizeof(midgard_scalar_alu);
365 scalarized = vector_to_scalar_alu(ins->alu, ins);
366 source = &scalarized;
367 }
368
369 memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
370 }
371
372 /* Emit padding (all zero) */
373 memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
374
375 /* Tack on constants */
376
377 if (bundle->has_embedded_constants)
378 util_dynarray_append(emission, midgard_constants, bundle->constants);
379 }
380
381 /* Shift applied to the immediate used as an offset. Probably this is papering
382 * over some other semantic distinction else well, but it unifies things in the
383 * compiler so I don't mind. */
384
385 static unsigned
386 mir_ldst_imm_shift(midgard_load_store_op op)
387 {
388 if (OP_IS_UBO_READ(op))
389 return 3;
390 else
391 return 1;
392 }
393
394 /* After everything is scheduled, emit whole bundles at a time */
395
396 void
397 emit_binary_bundle(compiler_context *ctx,
398 midgard_bundle *bundle,
399 struct util_dynarray *emission,
400 int next_tag)
401 {
402 int lookahead = next_tag << 4;
403
404 switch (bundle->tag) {
405 case TAG_ALU_4:
406 case TAG_ALU_8:
407 case TAG_ALU_12:
408 case TAG_ALU_16:
409 case TAG_ALU_4 + 4:
410 case TAG_ALU_8 + 4:
411 case TAG_ALU_12 + 4:
412 case TAG_ALU_16 + 4:
413 emit_alu_bundle(ctx, bundle, emission, lookahead);
414 break;
415
416 case TAG_LOAD_STORE_4: {
417 /* One or two composing instructions */
418
419 uint64_t current64, next64 = LDST_NOP;
420
421 /* Copy masks */
422
423 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
424 mir_pack_ldst_mask(bundle->instructions[i]);
425
426 mir_pack_swizzle_ldst(bundle->instructions[i]);
427
428 /* Apply a constant offset */
429 unsigned offset = bundle->instructions[i]->constants.u32[0];
430
431 if (offset) {
432 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
433 unsigned upper_shift = 10 - shift;
434
435 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
436 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
437 }
438 }
439
440 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
441
442 if (bundle->instruction_count == 2)
443 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
444
445 midgard_load_store instruction = {
446 .type = bundle->tag,
447 .next_type = next_tag,
448 .word1 = current64,
449 .word2 = next64
450 };
451
452 util_dynarray_append(emission, midgard_load_store, instruction);
453
454 break;
455 }
456
457 case TAG_TEXTURE_4:
458 case TAG_TEXTURE_4_VTX:
459 case TAG_TEXTURE_4_BARRIER: {
460 /* Texture instructions are easy, since there is no pipelining
461 * nor VLIW to worry about. We may need to set .cont/.last
462 * flags. */
463
464 midgard_instruction *ins = bundle->instructions[0];
465
466 ins->texture.type = bundle->tag;
467 ins->texture.next_type = next_tag;
468 ins->texture.mask = ins->mask;
469 mir_pack_swizzle_tex(ins);
470
471 ctx->texture_op_count--;
472
473 if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
474 bool continues = ctx->texture_op_count > 0;
475
476 /* Control flow complicates helper invocation
477 * lifespans, so for now just keep helper threads
478 * around indefinitely with loops. TODO: Proper
479 * analysis */
480 continues |= ctx->loop_count > 0;
481
482 ins->texture.cont = continues;
483 ins->texture.last = !continues;
484 } else {
485 ins->texture.cont = ins->texture.last = 1;
486 }
487
488 util_dynarray_append(emission, midgard_texture_word, ins->texture);
489 break;
490 }
491
492 default:
493 unreachable("Unknown midgard instruction type\n");
494 }
495 }