pan/midgard: Partially fix 64-bit swizzle alignment
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
30
31 static int
32 component_from_mask(unsigned mask)
33 {
34 for (int c = 0; c < 8; ++c) {
35 if (mask & (1 << c))
36 return c;
37 }
38
39 assert(0);
40 return 0;
41 }
42
43 static unsigned
44 vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
45 unsigned component)
46 {
47 midgard_vector_alu_src v;
48 memcpy(&v, &u, sizeof(v));
49
50 /* TODO: Integers */
51
52 midgard_scalar_alu_src s = { 0 };
53
54 if (is_full) {
55 /* For a 32-bit op, just check the source half flag */
56 s.full = !v.half;
57 } else if (!v.half) {
58 /* For a 16-bit op that's not subdivided, never full */
59 s.full = false;
60 } else {
61 /* We can't do 8-bit scalar, abort! */
62 assert(0);
63 }
64
65 /* Component indexing takes size into account */
66
67 if (s.full)
68 s.component = component << 1;
69 else
70 s.component = component;
71
72 if (is_int) {
73 /* TODO */
74 } else {
75 s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
76 s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
77 }
78
79 unsigned o;
80 memcpy(&o, &s, sizeof(s));
81
82 return o & ((1 << 6) - 1);
83 }
84
85 static midgard_scalar_alu
86 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
87 {
88 bool is_int = midgard_is_integer_op(v.op);
89 bool is_full = v.reg_mode == midgard_reg_mode_32;
90 bool is_inline_constant = ins->has_inline_constant;
91
92 unsigned comp = component_from_mask(ins->mask);
93
94 /* The output component is from the mask */
95 midgard_scalar_alu s = {
96 .op = v.op,
97 .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
98 .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
99 .unknown = 0,
100 .outmod = v.outmod,
101 .output_full = is_full,
102 .output_component = comp
103 };
104
105 /* Full components are physically spaced out */
106 if (is_full) {
107 assert(s.output_component < 4);
108 s.output_component <<= 1;
109 }
110
111 /* Inline constant is passed along rather than trying to extract it
112 * from v */
113
114 if (ins->has_inline_constant) {
115 uint16_t imm = 0;
116 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
117 imm |= (lower_11 >> 9) & 3;
118 imm |= (lower_11 >> 6) & 4;
119 imm |= (lower_11 >> 2) & 0x38;
120 imm |= (lower_11 & 63) << 6;
121
122 s.src2 = imm;
123 }
124
125 return s;
126 }
127
128 /* 64-bit swizzles are super easy since there are 2 components of 2 components
129 * in an 8-bit field ... lots of duplication to go around!
130 *
131 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
132 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
133 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
134 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
135 * with rep. Pretty nifty, huh? */
136
137 static unsigned
138 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
139 {
140 unsigned packed = 0;
141
142 for (unsigned i = 0; i < 2; ++i) {
143 assert(swizzle[i] <= max_component);
144
145 unsigned a = (swizzle[i] & 1) ?
146 (COMPONENT_W << 2) | COMPONENT_Z :
147 (COMPONENT_Y << 2) | COMPONENT_X;
148
149 packed |= a << (i * 4);
150 }
151
152 return packed;
153 }
154
155 static void
156 mir_pack_mask_alu(midgard_instruction *ins)
157 {
158 unsigned effective = ins->mask;
159
160 /* If we have a destination override, we need to figure out whether to
161 * override to the lower or upper half, shifting the effective mask in
162 * the latter, so AAAA.... becomes AAAA */
163
164 unsigned upper_shift = mir_upper_override(ins);
165
166 if (upper_shift) {
167 effective >>= upper_shift;
168 ins->alu.dest_override = midgard_dest_override_upper;
169 }
170
171 if (ins->alu.reg_mode == midgard_reg_mode_32)
172 ins->alu.mask = expand_writemask(effective, 4);
173 else if (ins->alu.reg_mode == midgard_reg_mode_64)
174 ins->alu.mask = expand_writemask(effective, 2);
175 else
176 ins->alu.mask = effective;
177 }
178
179 static void
180 mir_pack_swizzle_alu(midgard_instruction *ins)
181 {
182 midgard_vector_alu_src src[] = {
183 vector_alu_from_unsigned(ins->alu.src1),
184 vector_alu_from_unsigned(ins->alu.src2)
185 };
186
187 for (unsigned i = 0; i < 2; ++i) {
188 unsigned packed = 0;
189
190 if (ins->alu.reg_mode == midgard_reg_mode_64) {
191 midgard_reg_mode mode = mir_srcsize(ins, i);
192 unsigned components = 16 / mir_bytes_for_mode(mode);
193
194 packed = mir_pack_swizzle_64(ins->swizzle[i], components);
195
196 if (mode == midgard_reg_mode_32) {
197 bool lo = ins->swizzle[i][0] >= COMPONENT_Z;
198 bool hi = ins->swizzle[i][1] >= COMPONENT_Z;
199 unsigned mask = mir_bytemask(ins);
200
201 if (mask & 0xFF) {
202 /* We can't mix halves... */
203 if (mask & 0xFF00)
204 assert(lo == hi);
205
206 src[i].rep_low |= lo;
207 } else {
208 src[i].rep_low |= hi;
209 }
210 } else if (mode < midgard_reg_mode_32) {
211 unreachable("Cannot encode 8/16 swizzle in 64-bit");
212 }
213 } else {
214 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
215 * the strategy is to check whether the nibble we're on is
216 * upper or lower. We need all components to be on the same
217 * "side"; that much is enforced by the ISA and should have
218 * been lowered. TODO: 8-bit packing. TODO: vec8 */
219
220 unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
221 bool upper = ins->swizzle[i][first] > 3;
222
223 if (upper && ins->mask)
224 assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
225
226 for (unsigned c = 0; c < 4; ++c) {
227 unsigned v = ins->swizzle[i][c];
228
229 bool t_upper = v > 3;
230
231 /* Ensure we're doing something sane */
232
233 if (ins->mask & (1 << c)) {
234 assert(t_upper == upper);
235 assert(v <= 7);
236 }
237
238 /* Use the non upper part */
239 v &= 0x3;
240
241 packed |= v << (2 * c);
242 }
243
244 src[i].rep_high = upper;
245 }
246
247 src[i].swizzle = packed;
248 }
249
250 ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
251
252 if (!ins->has_inline_constant)
253 ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
254 }
255
256 static void
257 mir_pack_swizzle_ldst(midgard_instruction *ins)
258 {
259 /* TODO: non-32-bit, non-vec4 */
260 for (unsigned c = 0; c < 4; ++c) {
261 unsigned v = ins->swizzle[0][c];
262
263 /* Check vec4 */
264 assert(v <= 3);
265
266 ins->load_store.swizzle |= v << (2 * c);
267 }
268
269 /* TODO: arg_1/2 */
270 }
271
272 static void
273 mir_pack_swizzle_tex(midgard_instruction *ins)
274 {
275 for (unsigned i = 0; i < 2; ++i) {
276 unsigned packed = 0;
277
278 for (unsigned c = 0; c < 4; ++c) {
279 unsigned v = ins->swizzle[i][c];
280
281 /* Check vec4 */
282 assert(v <= 3);
283
284 packed |= v << (2 * c);
285 }
286
287 if (i == 0)
288 ins->texture.swizzle = packed;
289 else
290 ins->texture.in_reg_swizzle = packed;
291 }
292
293 /* TODO: bias component */
294 }
295
296 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
297 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
298 */
299
300 static void
301 mir_pack_ldst_mask(midgard_instruction *ins)
302 {
303 midgard_reg_mode mode = mir_typesize(ins);
304 unsigned packed = ins->mask;
305
306 if (mode == midgard_reg_mode_64) {
307 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
308 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
309 } else if (mode == midgard_reg_mode_16) {
310 packed = 0;
311
312 for (unsigned i = 0; i < 4; ++i) {
313 /* Make sure we're duplicated */
314 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
315 bool v = (ins->mask & (1 << (2*i + 1))) != 0;
316 assert(u == v);
317
318 packed |= (u << i);
319 }
320 }
321
322 ins->load_store.mask = packed;
323 }
324
325 static void
326 emit_alu_bundle(compiler_context *ctx,
327 midgard_bundle *bundle,
328 struct util_dynarray *emission,
329 unsigned lookahead)
330 {
331 /* Emit the control word */
332 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
333
334 /* Next up, emit register words */
335 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
336 midgard_instruction *ins = bundle->instructions[i];
337
338 /* Check if this instruction has registers */
339 if (ins->compact_branch) continue;
340
341 /* Otherwise, just emit the registers */
342 uint16_t reg_word = 0;
343 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
344 util_dynarray_append(emission, uint16_t, reg_word);
345 }
346
347 /* Now, we emit the body itself */
348 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
349 midgard_instruction *ins = bundle->instructions[i];
350
351 /* Where is this body */
352 unsigned size = 0;
353 void *source = NULL;
354
355 /* In case we demote to a scalar */
356 midgard_scalar_alu scalarized;
357
358 if (ins->unit & UNITS_ANY_VECTOR) {
359 mir_pack_mask_alu(ins);
360 mir_pack_swizzle_alu(ins);
361 size = sizeof(midgard_vector_alu);
362 source = &ins->alu;
363 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
364 size = sizeof(midgard_branch_cond);
365 source = &ins->br_compact;
366 } else if (ins->compact_branch) { /* misnomer */
367 size = sizeof(midgard_branch_extended);
368 source = &ins->branch_extended;
369 } else {
370 size = sizeof(midgard_scalar_alu);
371 scalarized = vector_to_scalar_alu(ins->alu, ins);
372 source = &scalarized;
373 }
374
375 memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
376 }
377
378 /* Emit padding (all zero) */
379 memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
380
381 /* Tack on constants */
382
383 if (bundle->has_embedded_constants)
384 util_dynarray_append(emission, midgard_constants, bundle->constants);
385 }
386
387 /* Shift applied to the immediate used as an offset. Probably this is papering
388 * over some other semantic distinction else well, but it unifies things in the
389 * compiler so I don't mind. */
390
391 static unsigned
392 mir_ldst_imm_shift(midgard_load_store_op op)
393 {
394 if (OP_IS_UBO_READ(op))
395 return 3;
396 else
397 return 1;
398 }
399
400 /* After everything is scheduled, emit whole bundles at a time */
401
402 void
403 emit_binary_bundle(compiler_context *ctx,
404 midgard_bundle *bundle,
405 struct util_dynarray *emission,
406 int next_tag)
407 {
408 int lookahead = next_tag << 4;
409
410 switch (bundle->tag) {
411 case TAG_ALU_4:
412 case TAG_ALU_8:
413 case TAG_ALU_12:
414 case TAG_ALU_16:
415 case TAG_ALU_4 + 4:
416 case TAG_ALU_8 + 4:
417 case TAG_ALU_12 + 4:
418 case TAG_ALU_16 + 4:
419 emit_alu_bundle(ctx, bundle, emission, lookahead);
420 break;
421
422 case TAG_LOAD_STORE_4: {
423 /* One or two composing instructions */
424
425 uint64_t current64, next64 = LDST_NOP;
426
427 /* Copy masks */
428
429 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
430 mir_pack_ldst_mask(bundle->instructions[i]);
431
432 mir_pack_swizzle_ldst(bundle->instructions[i]);
433
434 /* Apply a constant offset */
435 unsigned offset = bundle->instructions[i]->constants.u32[0];
436
437 if (offset) {
438 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
439 unsigned upper_shift = 10 - shift;
440
441 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
442 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
443 }
444 }
445
446 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
447
448 if (bundle->instruction_count == 2)
449 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
450
451 midgard_load_store instruction = {
452 .type = bundle->tag,
453 .next_type = next_tag,
454 .word1 = current64,
455 .word2 = next64
456 };
457
458 util_dynarray_append(emission, midgard_load_store, instruction);
459
460 break;
461 }
462
463 case TAG_TEXTURE_4:
464 case TAG_TEXTURE_4_VTX:
465 case TAG_TEXTURE_4_BARRIER: {
466 /* Texture instructions are easy, since there is no pipelining
467 * nor VLIW to worry about. We may need to set .cont/.last
468 * flags. */
469
470 midgard_instruction *ins = bundle->instructions[0];
471
472 ins->texture.type = bundle->tag;
473 ins->texture.next_type = next_tag;
474 ins->texture.mask = ins->mask;
475 mir_pack_swizzle_tex(ins);
476
477 ctx->texture_op_count--;
478
479 if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
480 bool continues = ctx->texture_op_count > 0;
481
482 /* Control flow complicates helper invocation
483 * lifespans, so for now just keep helper threads
484 * around indefinitely with loops. TODO: Proper
485 * analysis */
486 continues |= ctx->loop_count > 0;
487
488 ins->texture.cont = continues;
489 ins->texture.last = !continues;
490 } else {
491 ins->texture.cont = ins->texture.last = 1;
492 }
493
494 util_dynarray_append(emission, midgard_texture_word, ins->texture);
495 break;
496 }
497
498 default:
499 unreachable("Unknown midgard instruction type\n");
500 }
501 }