pan/mdg: Eliminate 64-bit swizzle packing division
[mesa.git] / src / panfrost / midgard / midgard_emit.c
1 /*
2 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 static midgard_int_mod
28 mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
29 {
30 if (!half) {
31 assert(!shift);
32 /* Sign-extension, really... */
33 return scalar ? 0 : midgard_int_normal;
34 }
35
36 if (shift)
37 return midgard_int_shift;
38
39 if (nir_alu_type_get_base_type(T) == nir_type_int)
40 return midgard_int_sign_extend;
41 else
42 return midgard_int_zero_extend;
43 }
44
45 static unsigned
46 mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
47 {
48 bool integer = midgard_is_integer_op(ins->alu.op);
49 unsigned base_size = (8 << ins->alu.reg_mode);
50 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
51 bool half = (sz == (base_size >> 1));
52
53 return integer ?
54 mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
55 ((ins->src_abs[i] << 0) |
56 ((ins->src_neg[i] << 1)));
57 }
58
59 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
60 * use scalar ALU instructions, for functional or performance reasons. To do
61 * this, we just demote vector ALU payloads to scalar. */
62
63 static int
64 component_from_mask(unsigned mask)
65 {
66 for (int c = 0; c < 8; ++c) {
67 if (mask & (1 << c))
68 return c;
69 }
70
71 assert(0);
72 return 0;
73 }
74
75 static unsigned
76 mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
77 {
78 midgard_scalar_alu_src s = {
79 .mod = mod,
80 .full = is_full,
81 .component = component << (is_full ? 1 : 0)
82 };
83
84 unsigned o;
85 memcpy(&o, &s, sizeof(s));
86
87 return o & ((1 << 6) - 1);
88 }
89
90 static midgard_scalar_alu
91 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
92 {
93 bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
94
95 bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
96 bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
97 unsigned comp = component_from_mask(ins->mask);
98
99 unsigned packed_src[2] = {
100 mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
101 mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
102 };
103
104 /* The output component is from the mask */
105 midgard_scalar_alu s = {
106 .op = v.op,
107 .src1 = packed_src[0],
108 .src2 = packed_src[1],
109 .unknown = 0,
110 .outmod = v.outmod,
111 .output_full = is_full,
112 .output_component = comp
113 };
114
115 /* Full components are physically spaced out */
116 if (is_full) {
117 assert(s.output_component < 4);
118 s.output_component <<= 1;
119 }
120
121 /* Inline constant is passed along rather than trying to extract it
122 * from v */
123
124 if (ins->has_inline_constant) {
125 uint16_t imm = 0;
126 int lower_11 = ins->inline_constant & ((1 << 12) - 1);
127 imm |= (lower_11 >> 9) & 3;
128 imm |= (lower_11 >> 6) & 4;
129 imm |= (lower_11 >> 2) & 0x38;
130 imm |= (lower_11 & 63) << 6;
131
132 s.src2 = imm;
133 }
134
135 return s;
136 }
137
138 /* 64-bit swizzles are super easy since there are 2 components of 2 components
139 * in an 8-bit field ... lots of duplication to go around!
140 *
141 * Swizzles of 32-bit vectors accessed from 64-bit instructions are a little
142 * funny -- pack them *as if* they were native 64-bit, using rep_* flags to
143 * flag upper. For instance, xy would become 64-bit XY but that's just xyzw
144 * native. Likewise, zz would become 64-bit XX with rep* so it would be xyxy
145 * with rep. Pretty nifty, huh? */
146
147 static unsigned
148 mir_pack_swizzle_64(unsigned *swizzle, unsigned max_component)
149 {
150 unsigned packed = 0;
151
152 for (unsigned i = 0; i < 2; ++i) {
153 assert(swizzle[i] <= max_component);
154
155 unsigned a = (swizzle[i] & 1) ?
156 (COMPONENT_W << 2) | COMPONENT_Z :
157 (COMPONENT_Y << 2) | COMPONENT_X;
158
159 packed |= a << (i * 4);
160 }
161
162 return packed;
163 }
164
165 static void
166 mir_pack_mask_alu(midgard_instruction *ins)
167 {
168 unsigned effective = ins->mask;
169
170 /* If we have a destination override, we need to figure out whether to
171 * override to the lower or upper half, shifting the effective mask in
172 * the latter, so AAAA.... becomes AAAA */
173
174 unsigned inst_size = 8 << ins->alu.reg_mode;
175 signed upper_shift = mir_upper_override(ins, inst_size);
176
177 if (upper_shift >= 0) {
178 effective >>= upper_shift;
179 ins->alu.dest_override = upper_shift ?
180 midgard_dest_override_upper :
181 midgard_dest_override_lower;
182 } else {
183 ins->alu.dest_override = midgard_dest_override_none;
184 }
185
186 if (ins->alu.reg_mode == midgard_reg_mode_32)
187 ins->alu.mask = expand_writemask(effective, 2);
188 else if (ins->alu.reg_mode == midgard_reg_mode_64)
189 ins->alu.mask = expand_writemask(effective, 1);
190 else
191 ins->alu.mask = effective;
192 }
193
194 static unsigned
195 mir_pack_swizzle(unsigned mask, unsigned *swizzle,
196 nir_alu_type T, midgard_reg_mode reg_mode,
197 bool op_channeled, bool *rep_low, bool *rep_high)
198 {
199 unsigned packed = 0;
200 unsigned sz = nir_alu_type_get_type_size(T);
201
202 if (reg_mode == midgard_reg_mode_64) {
203 assert(sz == 64 || sz == 32);
204 unsigned components = (sz == 32) ? 4 : 2;
205
206 packed = mir_pack_swizzle_64(swizzle, components);
207
208 if (sz == 32) {
209 bool lo = swizzle[0] >= COMPONENT_Z;
210 bool hi = swizzle[1] >= COMPONENT_Z;
211
212 if (mask & 0x1) {
213 /* We can't mix halves... */
214 if (mask & 2)
215 assert(lo == hi);
216
217 *rep_low = lo;
218 } else {
219 *rep_low = hi;
220 }
221 } else if (sz < 32) {
222 unreachable("Cannot encode 8/16 swizzle in 64-bit");
223 }
224 } else {
225 /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
226 * the strategy is to check whether the nibble we're on is
227 * upper or lower. We need all components to be on the same
228 * "side"; that much is enforced by the ISA and should have
229 * been lowered. TODO: 8-bit packing. TODO: vec8 */
230
231 unsigned first = mask ? ffs(mask) - 1 : 0;
232 bool upper = swizzle[first] > 3;
233
234 if (upper && mask)
235 assert(sz <= 16);
236
237 bool dest_up = !op_channeled && (first >= 4);
238
239 for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
240 unsigned v = swizzle[c];
241
242 bool t_upper = v > 3;
243
244 /* Ensure we're doing something sane */
245
246 if (mask & (1 << c)) {
247 assert(t_upper == upper);
248 assert(v <= 7);
249 }
250
251 /* Use the non upper part */
252 v &= 0x3;
253
254 packed |= v << (2 * (c % 4));
255 }
256
257
258 /* Replicate for now.. should really pick a side for
259 * dot products */
260
261 if (reg_mode == midgard_reg_mode_16 && sz == 16) {
262 *rep_low = !upper;
263 *rep_high = upper;
264 } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
265 *rep_low = upper;
266 *rep_high = upper;
267 } else if (reg_mode == midgard_reg_mode_32) {
268 *rep_low = upper;
269 } else {
270 unreachable("Unhandled reg mode");
271 }
272 }
273
274 return packed;
275 }
276
277 static void
278 mir_pack_vector_srcs(midgard_instruction *ins)
279 {
280 bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
281
282 midgard_reg_mode mode = ins->alu.reg_mode;
283 unsigned base_size = (8 << mode);
284
285 for (unsigned i = 0; i < 2; ++i) {
286 if (ins->has_inline_constant && (i == 1))
287 continue;
288
289 if (ins->src[i] == ~0)
290 continue;
291
292 bool rep_lo = false, rep_hi = false;
293 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
294 bool half = (sz == (base_size >> 1));
295
296 assert((sz == base_size) || half);
297
298 unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
299 ins->src_types[i], ins->alu.reg_mode,
300 channeled, &rep_lo, &rep_hi);
301
302 midgard_vector_alu_src pack = {
303 .mod = mir_pack_mod(ins, i, false),
304 .rep_low = rep_lo,
305 .rep_high = rep_hi,
306 .half = half,
307 .swizzle = swizzle
308 };
309
310 unsigned p = vector_alu_srco_unsigned(pack);
311
312 if (i == 0)
313 ins->alu.src1 = p;
314 else
315 ins->alu.src2 = p;
316 }
317 }
318
319 static void
320 mir_pack_swizzle_ldst(midgard_instruction *ins)
321 {
322 /* TODO: non-32-bit, non-vec4 */
323 for (unsigned c = 0; c < 4; ++c) {
324 unsigned v = ins->swizzle[0][c];
325
326 /* Check vec4 */
327 assert(v <= 3);
328
329 ins->load_store.swizzle |= v << (2 * c);
330 }
331
332 /* TODO: arg_1/2 */
333 }
334
335 static void
336 mir_pack_swizzle_tex(midgard_instruction *ins)
337 {
338 for (unsigned i = 0; i < 2; ++i) {
339 unsigned packed = 0;
340
341 for (unsigned c = 0; c < 4; ++c) {
342 unsigned v = ins->swizzle[i][c];
343
344 /* Check vec4 */
345 assert(v <= 3);
346
347 packed |= v << (2 * c);
348 }
349
350 if (i == 0)
351 ins->texture.swizzle = packed;
352 else
353 ins->texture.in_reg_swizzle = packed;
354 }
355
356 /* TODO: bias component */
357 }
358
359 /* Load store masks are 4-bits. Load/store ops pack for that. vec4 is the
360 * natural mask width; vec8 is constrained to be in pairs, vec2 is duplicated. TODO: 8-bit?
361 */
362
363 static void
364 mir_pack_ldst_mask(midgard_instruction *ins)
365 {
366 unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
367 unsigned packed = ins->mask;
368
369 if (sz == 64) {
370 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
371 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
372 } else if (sz == 16) {
373 packed = 0;
374
375 for (unsigned i = 0; i < 4; ++i) {
376 /* Make sure we're duplicated */
377 bool u = (ins->mask & (1 << (2*i + 0))) != 0;
378 bool v = (ins->mask & (1 << (2*i + 1))) != 0;
379 assert(u == v);
380
381 packed |= (u << i);
382 }
383 } else {
384 assert(sz == 32);
385 }
386
387 ins->load_store.mask = packed;
388 }
389
390 static void
391 mir_lower_inverts(midgard_instruction *ins)
392 {
393 bool inv[3] = {
394 ins->src_invert[0],
395 ins->src_invert[1],
396 ins->src_invert[2]
397 };
398
399 switch (ins->alu.op) {
400 case midgard_alu_op_iand:
401 /* a & ~b = iandnot(a, b) */
402 /* ~a & ~b = ~(a | b) = inor(a, b) */
403
404 if (inv[0] && inv[1])
405 ins->alu.op = midgard_alu_op_inor;
406 else if (inv[1])
407 ins->alu.op = midgard_alu_op_iandnot;
408
409 break;
410 case midgard_alu_op_ior:
411 /* a | ~b = iornot(a, b) */
412 /* ~a | ~b = ~(a & b) = inand(a, b) */
413
414 if (inv[0] && inv[1])
415 ins->alu.op = midgard_alu_op_inand;
416 else if (inv[1])
417 ins->alu.op = midgard_alu_op_iornot;
418
419 break;
420
421 case midgard_alu_op_ixor:
422 /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
423 /* ~a ^ ~b = a ^ b */
424
425 if (inv[0] ^ inv[1])
426 ins->alu.op = midgard_alu_op_inxor;
427
428 break;
429
430 default:
431 break;
432 }
433 }
434
435 static void
436 emit_alu_bundle(compiler_context *ctx,
437 midgard_bundle *bundle,
438 struct util_dynarray *emission,
439 unsigned lookahead)
440 {
441 /* Emit the control word */
442 util_dynarray_append(emission, uint32_t, bundle->control | lookahead);
443
444 /* Next up, emit register words */
445 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
446 midgard_instruction *ins = bundle->instructions[i];
447
448 /* Check if this instruction has registers */
449 if (ins->compact_branch) continue;
450
451 /* Otherwise, just emit the registers */
452 uint16_t reg_word = 0;
453 memcpy(&reg_word, &ins->registers, sizeof(uint16_t));
454 util_dynarray_append(emission, uint16_t, reg_word);
455 }
456
457 /* Now, we emit the body itself */
458 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
459 midgard_instruction *ins = bundle->instructions[i];
460
461 /* Where is this body */
462 unsigned size = 0;
463 void *source = NULL;
464
465 /* In case we demote to a scalar */
466 midgard_scalar_alu scalarized;
467
468 if (!ins->compact_branch)
469 mir_lower_inverts(ins);
470
471 if (ins->unit & UNITS_ANY_VECTOR) {
472 mir_pack_mask_alu(ins);
473 mir_pack_vector_srcs(ins);
474 size = sizeof(midgard_vector_alu);
475 source = &ins->alu;
476 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
477 size = sizeof(midgard_branch_cond);
478 source = &ins->br_compact;
479 } else if (ins->compact_branch) { /* misnomer */
480 size = sizeof(midgard_branch_extended);
481 source = &ins->branch_extended;
482 } else {
483 size = sizeof(midgard_scalar_alu);
484 scalarized = vector_to_scalar_alu(ins->alu, ins);
485 source = &scalarized;
486 }
487
488 memcpy(util_dynarray_grow_bytes(emission, size, 1), source, size);
489 }
490
491 /* Emit padding (all zero) */
492 memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
493
494 /* Tack on constants */
495
496 if (bundle->has_embedded_constants)
497 util_dynarray_append(emission, midgard_constants, bundle->constants);
498 }
499
500 /* Shift applied to the immediate used as an offset. Probably this is papering
501 * over some other semantic distinction else well, but it unifies things in the
502 * compiler so I don't mind. */
503
504 static unsigned
505 mir_ldst_imm_shift(midgard_load_store_op op)
506 {
507 if (OP_IS_UBO_READ(op))
508 return 3;
509 else
510 return 1;
511 }
512
513 static enum mali_sampler_type
514 midgard_sampler_type(nir_alu_type t) {
515 switch (nir_alu_type_get_base_type(t))
516 {
517 case nir_type_float:
518 return MALI_SAMPLER_FLOAT;
519 case nir_type_int:
520 return MALI_SAMPLER_SIGNED;
521 case nir_type_uint:
522 return MALI_SAMPLER_UNSIGNED;
523 default:
524 unreachable("Unknown sampler type");
525 }
526 }
527
528 /* After everything is scheduled, emit whole bundles at a time */
529
530 void
531 emit_binary_bundle(compiler_context *ctx,
532 midgard_bundle *bundle,
533 struct util_dynarray *emission,
534 int next_tag)
535 {
536 int lookahead = next_tag << 4;
537
538 switch (bundle->tag) {
539 case TAG_ALU_4:
540 case TAG_ALU_8:
541 case TAG_ALU_12:
542 case TAG_ALU_16:
543 case TAG_ALU_4 + 4:
544 case TAG_ALU_8 + 4:
545 case TAG_ALU_12 + 4:
546 case TAG_ALU_16 + 4:
547 emit_alu_bundle(ctx, bundle, emission, lookahead);
548 break;
549
550 case TAG_LOAD_STORE_4: {
551 /* One or two composing instructions */
552
553 uint64_t current64, next64 = LDST_NOP;
554
555 /* Copy masks */
556
557 for (unsigned i = 0; i < bundle->instruction_count; ++i) {
558 mir_pack_ldst_mask(bundle->instructions[i]);
559
560 mir_pack_swizzle_ldst(bundle->instructions[i]);
561
562 /* Apply a constant offset */
563 unsigned offset = bundle->instructions[i]->constants.u32[0];
564
565 if (offset) {
566 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
567 unsigned upper_shift = 10 - shift;
568
569 bundle->instructions[i]->load_store.varying_parameters |= (offset & ((1 << upper_shift) - 1)) << shift;
570 bundle->instructions[i]->load_store.address |= (offset >> upper_shift);
571 }
572 }
573
574 memcpy(&current64, &bundle->instructions[0]->load_store, sizeof(current64));
575
576 if (bundle->instruction_count == 2)
577 memcpy(&next64, &bundle->instructions[1]->load_store, sizeof(next64));
578
579 midgard_load_store instruction = {
580 .type = bundle->tag,
581 .next_type = next_tag,
582 .word1 = current64,
583 .word2 = next64
584 };
585
586 util_dynarray_append(emission, midgard_load_store, instruction);
587
588 break;
589 }
590
591 case TAG_TEXTURE_4:
592 case TAG_TEXTURE_4_VTX:
593 case TAG_TEXTURE_4_BARRIER: {
594 /* Texture instructions are easy, since there is no pipelining
595 * nor VLIW to worry about. We may need to set .cont/.last
596 * flags. */
597
598 midgard_instruction *ins = bundle->instructions[0];
599
600 ins->texture.type = bundle->tag;
601 ins->texture.next_type = next_tag;
602
603 /* Nothing else to pack for barriers */
604 if (ins->texture.op == TEXTURE_OP_BARRIER) {
605 ins->texture.cont = ins->texture.last = 1;
606 util_dynarray_append(emission, midgard_texture_word, ins->texture);
607 return;
608 }
609
610 signed override = mir_upper_override(ins, 32);
611
612 ins->texture.mask = override > 0 ?
613 ins->mask >> override :
614 ins->mask;
615
616 mir_pack_swizzle_tex(ins);
617
618 unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
619 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
620
621 assert(osz == 32 || osz == 16);
622 assert(isz == 32 || isz == 16);
623
624 ins->texture.out_full = (osz == 32);
625 ins->texture.out_upper = override > 0;
626 ins->texture.in_reg_full = (isz == 32);
627 ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
628
629 if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
630 ins->texture.cont = !ins->helper_terminate;
631 ins->texture.last = ins->helper_terminate || ins->helper_execute;
632 } else {
633 ins->texture.cont = ins->texture.last = 1;
634 }
635
636 util_dynarray_append(emission, midgard_texture_word, ins->texture);
637 break;
638 }
639
640 default:
641 unreachable("Unknown midgard instruction type\n");
642 }
643 }