pan/mdg: Handle dest up/lower correctly with swizzles
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
28 * use scalar ALU instructions, for functional or performance reasons. To do
29 * this, we just demote vector ALU payloads to scalar. */
30
31 static int
32 component_from_mask(unsigned mask)
33 {
34 for (int c = 0; c < 8; ++c) {
35 if (mask & (1 << c))
36 return c;
37 }
38
39 assert(0);
40 return 0;
41 }
42
43 static unsigned
44 vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
45 unsigned component)
46 {
47 midgard_vector_alu_src v;
48 memcpy(&v, &u, sizeof(v));
49
50 /* TODO: Integers */
51
52 midgard_scalar_alu_src s = { 0 };
53
54 if (is_full) {
55 /* For a 32-bit op, just check the source half flag */
56 s.full = !v.half;
57 } else if (!v.half) {
58 /* For a 16-bit op that's not subdivided, never full */
59 s.full = false;
60 } else {
61 /* We can't do 8-bit scalar, abort! */
62 assert(0);
63 }
64
65 /* Component indexing takes size into account */
66
67 if (s.full)
68 s.component = component << 1;
69 else
70 s.component = component;
71
72 if (is_int) {
73 /* TODO */
74 } else {
75 s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
76 s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
77 }
78
79 unsigned o;
80 memcpy(&o, &s, sizeof(s));
81
82 return o & ((1 << 6) - 1);
83 }
84
85 static midgard_scalar_alu
86 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
87 {
88 bool is_int = midgard_is_integer_op(v.op);
89 bool is_full = v.reg_mode == midgard_reg_mode_32;
90 bool is_inline_constant = ins->has_inline_constant;
91
92 unsigned comp = component_from_mask(ins->mask);
93
94 /* The output component is from the mask */
95 midgard_scalar_alu s = {
96 .op = v.op,
97 .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
98 .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
99 .unknown = 0,
100 .outmod = v.outmod,
101 .output_full = is_full,
102 .output_component = comp
103 };
104
105 /* Full components are physically spaced out */
106 if (is_full) {
107 assert(s.output_component < 4);
108 s.output_component <<= 1;
109 }
110
111 /* Inline constant is passed along rather than trying to extract it
112 * from v */
113
114 if (ins->has_inline_constant) {
115 uint16_t imm = 0;
116 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
117 imm |= (lower_11 >> 9) & 3;
118 imm |= (lower_11 >> 6) & 4;
119 imm |= (lower_11 >> 2) & 0x38;
120 imm |= (lower_11 & 63) << 6;
121
122 s.src2 = imm;
123 }
124
125 return s;
126 }
127
128 /* 64-bit swizzles are super easy since there are 2 components of 2 components
129 * in an 8-bit field ... lots of duplication to go around!
130 *
131 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
132 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
133 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
134 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
135 * with rep. Pretty nifty, huh? */
136
137 static unsigned
138 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
139 {
140 unsigned packed = 0;
141
142 for (unsigned i = 0; i < 2; ++i) {
143 assert(swizzle[i] <= max_component);
144
145 unsigned a = (swizzle[i] & 1) ?
146 (COMPONENT_W << 2) | COMPONENT_Z :
147 (COMPONENT_Y << 2) | COMPONENT_X;
148
149 packed |= a << (i * 4);
150 }
151
152 return packed;
153 }
154
155 static void
156 mir_pack_mask_alu(midgard_instruction *ins)
157 {
158 unsigned effective = ins->mask;
159
160 /* If we have a destination override, we need to figure out whether to
161 * override to the lower or upper half, shifting the effective mask in
162 * the latter, so AAAA.... becomes AAAA */
163
164 unsigned upper_shift = mir_upper_override(ins);
165
166 if (upper_shift) {
167 effective >>= upper_shift;
168 ins->alu.dest_override = midgard_dest_override_upper;
169 }
170
171 if (ins->alu.reg_mode == midgard_reg_mode_32)
172 ins->alu.mask = expand_writemask(effective, 4);
173 else if (ins->alu.reg_mode == midgard_reg_mode_64)
174 ins->alu.mask = expand_writemask(effective, 2);
175 else
176 ins->alu.mask = effective;
177 }
178
179 static void
180 mir_pack_swizzle_alu(midgard_instruction *ins)
181 {
182 midgard_vector_alu_src src[] = {
183 vector_alu_from_unsigned(ins->alu.src1),
184 vector_alu_from_unsigned(ins->alu.src2)
185 };
186
187 for (unsigned i = 0; i < 2; ++i) {
188 unsigned packed = 0;
189
190 if (ins->alu.reg_mode == midgard_reg_mode_64) {
191 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
192 unsigned components = 64 / sz;
193
194 packed = mir_pack_swizzle_64(ins->swizzle[i], components);
195
196 if (sz == 32) {
197 bool lo = ins->swizzle[i][0] >= COMPONENT_Z;
198 bool hi = ins->swizzle[i][1] >= COMPONENT_Z;
199 unsigned mask = mir_bytemask(ins);
200
201 if (mask & 0xFF) {
202 /* We can't mix halves... */
203 if (mask & 0xFF00)
204 assert(lo == hi);
205
206 src[i].rep_low |= lo;
207 } else {
208 src[i].rep_low |= hi;
209 }
210 } else if (sz < 32) {
211 unreachable("Cannot encode 8/16 swizzle in 64-bit");
212 }
213 } else {
214 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
215 * the strategy is to check whether the nibble we're on is
216 * upper or lower. We need all components to be on the same
217 * "side"; that much is enforced by the ISA and should have
218 * been lowered. TODO: 8-bit packing. TODO: vec8 */
219
220 unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
221 bool upper = ins->swizzle[i][first] > 3;
222
223 if (upper && ins->mask)
224 assert(nir_alu_type_get_type_size(ins->src_types[i]) <= 16);
225
226 bool dest_up =
227 GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? false :
228 (first >= 4);
229
230 for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
231 unsigned v = ins->swizzle[i][c];
232
233 bool t_upper = v > 3;
234
235 /* Ensure we're doing something sane */
236
237 if (ins->mask & (1 << c)) {
238 assert(t_upper == upper);
239 assert(v <= 7);
240 }
241
242 /* Use the non upper part */
243 v &= 0x3;
244
245 packed |= v << (2 * (c % 4));
246 }
247
248
249 /* Replicate for now.. should really pick a side for
250 * dot products */
251
252 if (ins->alu.reg_mode == midgard_reg_mode_16) {
253 src[i].rep_low = !upper;
254 src[i].rep_high = upper;
255 } else if (ins->alu.reg_mode == midgard_reg_mode_32) {
256 src[i].rep_low = upper;
257 } else {
258 unreachable("Unhandled reg mode");
259 }
260 }
261
262 src[i].swizzle = packed;
263 }
264
265 ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
266
267 if (!ins->has_inline_constant)
268 ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
269 }
270
271 static void
272 mir_pack_swizzle_ldst(midgard_instruction *ins)
273 {
274 /* TODO: non-32-bit, non-vec4 */
275 for (unsigned c = 0; c < 4; ++c) {
276 unsigned v = ins->swizzle[0][c];
277
278 /* Check vec4 */
279 assert(v <= 3);
280
281 ins->load_store.swizzle |= v << (2 * c);
282 }
283
284 /* TODO: arg_1/2 */
285 }
286
287 static void
288 mir_pack_swizzle_tex(midgard_instruction *ins)
289 {
290 for (unsigned i = 0; i < 2; ++i) {
291 unsigned packed = 0;
292
293 for (unsigned c = 0; c < 4; ++c) {
294 unsigned v = ins->swizzle[i][c];
295
296 /* Check vec4 */
297 assert(v <= 3);
298
299 packed |= v << (2 * c);
300 }
301
302 if (i == 0)
303 ins->texture.swizzle = packed;
304 else
305 ins->texture.in_reg_swizzle = packed;
306 }
307
308 /* TODO: bias component */
309 }
310
311 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
312 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
313 */
314
315 static void
316 mir_pack_ldst_mask(midgard_instruction *ins)
317 {
318 unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
319 unsigned packed = ins->mask;
320
321 if (sz == 64) {
322 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
323 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
324 } else if (sz == 16) {
325 packed = 0;
326
327 for (unsigned i = 0; i < 4; ++i) {
328 /* Make sure we're duplicated */
329 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
330 bool v = (ins->mask & (1 << (2*i + 1))) != 0;
331 assert(u == v);
332
333 packed |= (u << i);
334 }
335 } else {
336 assert(sz == 32);
337 }
338
339 ins->load_store.mask = packed;
340 }
341
342 static void
343 mir_lower_inverts(midgard_instruction *ins)
344 {
345 bool inv[3] = {
346 ins->src_invert[0],
347 ins->src_invert[1],
348 ins->src_invert[2]
349 };
350
351 switch (ins->alu.op) {
352 case midgard_alu_op_iand:
353 /* a & ~b = iandnot(a, b) */
354 /* ~a & ~b = ~(a | b) = inor(a, b) */
355
356 if (inv[0] && inv[1])
357 ins->alu.op = midgard_alu_op_inor;
358 else if (inv[1])
359 ins->alu.op = midgard_alu_op_iandnot;
360
361 break;
362 case midgard_alu_op_ior:
363 /* a | ~b = iornot(a, b) */
364 /* ~a | ~b = ~(a & b) = inand(a, b) */
365
366 if (inv[0] && inv[1])
367 ins->alu.op = midgard_alu_op_inand;
368 else if (inv[1])
369 ins->alu.op = midgard_alu_op_iornot;
370
371 break;
372
373 case midgard_alu_op_ixor:
374 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
375 /* ~a ^ ~b = a ^ b */
376
377 if (inv[0] ^ inv[1])
378 ins->alu.op = midgard_alu_op_inxor;
379
380 break;
381
382 default:
383 break;
384 }
385 }
386
387 static void
388 emit_alu_bundle(compiler_context *ctx,
389 midgard_bundle *bundle,
390 struct util_dynarray *emission,
391 unsigned lookahead)
392 {
393 /* Emit the control word */
394 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
395
396 /* Next up, emit register words */
397 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
398 midgard_instruction *ins = bundle->instructions[i];
399
400 /* Check if this instruction has registers */
401 if (ins->compact_branch) continue;
402
403 /* Otherwise, just emit the registers */
404 uint16_t reg_word = 0;
405 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
406 util_dynarray_append(emission, uint16_t, reg_word);
407 }
408
409 /* Now, we emit the body itself */
410 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
411 midgard_instruction *ins = bundle->instructions[i];
412
413 /* Where is this body */
414 unsigned size = 0;
415 void *source = NULL;
416
417 /* In case we demote to a scalar */
418 midgard_scalar_alu scalarized;
419
420 if (!ins->compact_branch)
421 mir_lower_inverts(ins);
422
423 if (ins->unit & UNITS_ANY_VECTOR) {
424 mir_pack_mask_alu(ins);
425 mir_pack_swizzle_alu(ins);
426 size = sizeof(midgard_vector_alu);
427 source = &ins->alu;
428 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
429 size = sizeof(midgard_branch_cond);
430 source = &ins->br_compact;
431 } else if (ins->compact_branch) { /* misnomer */
432 size = sizeof(midgard_branch_extended);
433 source = &ins->branch_extended;
434 } else {
435 size = sizeof(midgard_scalar_alu);
436 scalarized = vector_to_scalar_alu(ins->alu, ins);
437 source = &scalarized;
438 }
439
440 memcpy(util_dynarray_grow_bytes(emission, size, 1), source, size);
441 }
442
443 /* Emit padding (all zero) */
444 memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
445
446 /* Tack on constants */
447
448 if (bundle->has_embedded_constants)
449 util_dynarray_append(emission, midgard_constants, bundle->constants);
450 }
451
452 /* Shift applied to the immediate used as an offset. Probably this is papering
453 * over some other semantic distinction else well, but it unifies things in the
454 * compiler so I don't mind. */
455
456 static unsigned
457 mir_ldst_imm_shift(midgard_load_store_op op)
458 {
459 if (OP_IS_UBO_READ(op))
460 return 3;
461 else
462 return 1;
463 }
464
465 static enum mali_sampler_type
466 midgard_sampler_type(nir_alu_type t) {
467 switch (nir_alu_type_get_base_type(t))
468 {
469 case nir_type_float:
470 return MALI_SAMPLER_FLOAT;
471 case nir_type_int:
472 return MALI_SAMPLER_SIGNED;
473 case nir_type_uint:
474 return MALI_SAMPLER_UNSIGNED;
475 default:
476 unreachable("Unknown sampler type");
477 }
478 }
479
480 /* After everything is scheduled, emit whole bundles at a time */
481
482 void
483 emit_binary_bundle(compiler_context *ctx,
484 midgard_bundle *bundle,
485 struct util_dynarray *emission,
486 int next_tag)
487 {
488 int lookahead = next_tag << 4;
489
490 switch (bundle->tag) {
491 case TAG_ALU_4:
492 case TAG_ALU_8:
493 case TAG_ALU_12:
494 case TAG_ALU_16:
495 case TAG_ALU_4 + 4:
496 case TAG_ALU_8 + 4:
497 case TAG_ALU_12 + 4:
498 case TAG_ALU_16 + 4:
499 emit_alu_bundle(ctx, bundle, emission, lookahead);
500 break;
501
502 case TAG_LOAD_STORE_4: {
503 /* One or two composing instructions */
504
505 uint64_t current64, next64 = LDST_NOP;
506
507 /* Copy masks */
508
509 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
510 mir_pack_ldst_mask(bundle->instructions[i]);
511
512 mir_pack_swizzle_ldst(bundle->instructions[i]);
513
514 /* Apply a constant offset */
515 unsigned offset = bundle->instructions[i]->constants.u32[0];
516
517 if (offset) {
518 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
519 unsigned upper_shift = 10 - shift;
520
521 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
522 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
523 }
524 }
525
526 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
527
528 if (bundle->instruction_count == 2)
529 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
530
531 midgard_load_store instruction = {
532 .type = bundle->tag,
533 .next_type = next_tag,
534 .word1 = current64,
535 .word2 = next64
536 };
537
538 util_dynarray_append(emission, midgard_load_store, instruction);
539
540 break;
541 }
542
543 case TAG_TEXTURE_4:
544 case TAG_TEXTURE_4_VTX:
545 case TAG_TEXTURE_4_BARRIER: {
546 /* Texture instructions are easy, since there is no pipelining
547 * nor VLIW to worry about. We may need to set .cont/.last
548 * flags. */
549
550 midgard_instruction *ins = bundle->instructions[0];
551
552 ins->texture.type = bundle->tag;
553 ins->texture.next_type = next_tag;
554 ins->texture.mask = ins->mask;
555 mir_pack_swizzle_tex(ins);
556
557 unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
558 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
559
560 assert(osz == 32 || osz == 16);
561 assert(isz == 32 || isz == 16);
562
563 ins->texture.out_full = (osz == 32);
564 ins->texture.in_reg_full = (isz == 32);
565 ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
566
567 if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
568 ins->texture.cont = !ins->helper_terminate;
569 ins->texture.last = ins->helper_terminate || ins->helper_execute;
570 } else {
571 ins->texture.cont = ins->texture.last = 1;
572 }
573
574 util_dynarray_append(emission, midgard_texture_word, ins->texture);
575 break;
576 }
577
578 default:
579 unreachable("Unknown midgard instruction type\n");
580 }
581 }