pan/midgard: Expose more typesize helpers
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 static midgard_vector_alu_src
42 mir_get_alu_src(midgard_instruction *ins, unsigned idx)
43 {
44 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
45 return vector_alu_from_unsigned(b);
46 }
47
48 static void
49 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
50 {
51 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
52 if (ins->src[i] != old) continue;
53
54 ins->src[i] = new;
55 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
56 }
57 }
58
59 void
60 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
61 {
62 mir_foreach_instr_global(ctx, ins) {
63 mir_rewrite_index_src_single(ins, old, new);
64 }
65 }
66
67 void
68 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
69 {
70 mir_foreach_instr_global(ctx, ins) {
71 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
72 }
73 }
74
75 void
76 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
77 {
78 mir_foreach_instr_global(ctx, ins) {
79 mir_rewrite_index_dst_single(ins, old, new);
80 }
81 }
82
83 void
84 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
85 {
86 mir_rewrite_index_src(ctx, old, new);
87 mir_rewrite_index_dst(ctx, old, new);
88 }
89
90 unsigned
91 mir_use_count(compiler_context *ctx, unsigned value)
92 {
93 unsigned used_count = 0;
94
95 mir_foreach_instr_global(ctx, ins) {
96 if (mir_has_arg(ins, value))
97 ++used_count;
98 }
99
100 return used_count;
101 }
102
103 /* Checks if a value is used only once (or totally dead), which is an important
104 * heuristic to figure out if certain optimizations are Worth It (TM) */
105
106 bool
107 mir_single_use(compiler_context *ctx, unsigned value)
108 {
109 /* We can replicate constants in places so who cares */
110 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
111 return true;
112
113 return mir_use_count(ctx, value) <= 1;
114 }
115
116 static bool
117 mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
118 {
119 if (is_int)
120 return src.mod == midgard_int_shift;
121 else
122 return src.mod;
123 }
124
125 static bool
126 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask, unsigned *swizzle)
127 {
128 if (mir_nontrivial_raw_mod(src, is_int)) return true;
129
130 /* size-conversion */
131 if (src.half) return true;
132
133 for (unsigned c = 0; c < 16; ++c) {
134 if (!(mask & (1 << c))) continue;
135 if (swizzle[c] != c) return true;
136 }
137
138 return false;
139 }
140
141 bool
142 mir_nontrivial_source2_mod(midgard_instruction *ins)
143 {
144 bool is_int = midgard_is_integer_op(ins->alu.op);
145
146 midgard_vector_alu_src src2 =
147 vector_alu_from_unsigned(ins->alu.src2);
148
149 return mir_nontrivial_mod(src2, is_int, ins->mask, ins->swizzle[1]);
150 }
151
152 bool
153 mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
154 {
155 bool is_int = midgard_is_integer_op(ins->alu.op);
156
157 midgard_vector_alu_src src2 =
158 vector_alu_from_unsigned(ins->alu.src2);
159
160 return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
161 }
162
163 bool
164 mir_nontrivial_outmod(midgard_instruction *ins)
165 {
166 bool is_int = midgard_is_integer_op(ins->alu.op);
167 unsigned mod = ins->alu.outmod;
168
169 /* Pseudo-outmod */
170 if (ins->invert)
171 return true;
172
173 /* Type conversion is a sort of outmod */
174 if (ins->alu.dest_override != midgard_dest_override_none)
175 return true;
176
177 if (is_int)
178 return mod != midgard_outmod_int_wrap;
179 else
180 return mod != midgard_outmod_none;
181 }
182
183 /* Checks if an index will be used as a special register -- basically, if we're
184 * used as the input to a non-ALU op */
185
186 bool
187 mir_special_index(compiler_context *ctx, unsigned idx)
188 {
189 mir_foreach_instr_global(ctx, ins) {
190 bool is_ldst = ins->type == TAG_LOAD_STORE_4;
191 bool is_tex = ins->type == TAG_TEXTURE_4;
192 bool is_writeout = ins->compact_branch && ins->writeout;
193
194 if (!(is_ldst || is_tex || is_writeout))
195 continue;
196
197 if (mir_has_arg(ins, idx))
198 return true;
199 }
200
201 return false;
202 }
203
204 /* Is a node written before a given instruction? */
205
206 bool
207 mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node)
208 {
209 if (node >= SSA_FIXED_MINIMUM)
210 return true;
211
212 mir_foreach_instr_global(ctx, q) {
213 if (q == ins)
214 break;
215
216 if (q->dest == node)
217 return true;
218 }
219
220 return false;
221 }
222
223 /* Grabs the type size. */
224
225 midgard_reg_mode
226 mir_typesize(midgard_instruction *ins)
227 {
228 if (ins->compact_branch)
229 return midgard_reg_mode_32;
230
231 /* TODO: Type sizes for texture */
232 if (ins->type == TAG_TEXTURE_4)
233 return midgard_reg_mode_32;
234
235 if (ins->type == TAG_LOAD_STORE_4)
236 return GET_LDST_SIZE(load_store_opcode_props[ins->load_store.op].props);
237
238 if (ins->type == TAG_ALU_4) {
239 midgard_reg_mode mode = ins->alu.reg_mode;
240
241 /* If we have an override, step down by half */
242 if (ins->alu.dest_override != midgard_dest_override_none) {
243 assert(mode > midgard_reg_mode_8);
244 mode--;
245 }
246
247 return mode;
248 }
249
250 unreachable("Invalid instruction type");
251 }
252
253 /* Grabs the size of a source */
254
255 midgard_reg_mode
256 mir_srcsize(midgard_instruction *ins, unsigned i)
257 {
258 /* TODO: 16-bit textures/ldst */
259 if (ins->type == TAG_TEXTURE_4 || ins->type == TAG_LOAD_STORE_4)
260 return midgard_reg_mode_32;
261
262 /* TODO: 16-bit branches */
263 if (ins->compact_branch)
264 return midgard_reg_mode_32;
265
266 if (i >= 2) {
267 /* TODO: 16-bit conditions, ffma */
268 assert(i == 2);
269 return midgard_reg_mode_32;
270 }
271
272 /* Default to type of the instruction */
273
274 midgard_reg_mode mode = ins->alu.reg_mode;
275
276 /* If we have a half modifier, step down by half */
277
278 if ((mir_get_alu_src(ins, i)).half) {
279 assert(mode > midgard_reg_mode_8);
280 mode--;
281 }
282
283 return mode;
284 }
285
286 midgard_reg_mode
287 mir_mode_for_destsize(unsigned size)
288 {
289 switch (size) {
290 case 8:
291 return midgard_reg_mode_8;
292 case 16:
293 return midgard_reg_mode_16;
294 case 32:
295 return midgard_reg_mode_32;
296 case 64:
297 return midgard_reg_mode_64;
298 default:
299 unreachable("Unknown destination size");
300 }
301 }
302
303
304 /* Converts per-component mask to a byte mask */
305
306 uint16_t
307 mir_to_bytemask(midgard_reg_mode mode, unsigned mask)
308 {
309 switch (mode) {
310 case midgard_reg_mode_8:
311 return mask;
312
313 case midgard_reg_mode_16: {
314 unsigned space =
315 ((mask & 0x1) << (0 - 0)) |
316 ((mask & 0x2) << (2 - 1)) |
317 ((mask & 0x4) << (4 - 2)) |
318 ((mask & 0x8) << (6 - 3)) |
319 ((mask & 0x10) << (8 - 4)) |
320 ((mask & 0x20) << (10 - 5)) |
321 ((mask & 0x40) << (12 - 6)) |
322 ((mask & 0x80) << (14 - 7));
323
324 return space | (space << 1);
325 }
326
327 case midgard_reg_mode_32: {
328 unsigned space =
329 ((mask & 0x1) << (0 - 0)) |
330 ((mask & 0x2) << (4 - 1)) |
331 ((mask & 0x4) << (8 - 2)) |
332 ((mask & 0x8) << (12 - 3));
333
334 return space | (space << 1) | (space << 2) | (space << 3);
335 }
336
337 case midgard_reg_mode_64: {
338 unsigned A = (mask & 0x1) ? 0xFF : 0x00;
339 unsigned B = (mask & 0x2) ? 0xFF : 0x00;
340 return A | (B << 8);
341 }
342
343 default:
344 unreachable("Invalid register mode");
345 }
346 }
347
348 /* ...and the inverse */
349
350 unsigned
351 mir_bytes_for_mode(midgard_reg_mode mode)
352 {
353 switch (mode) {
354 case midgard_reg_mode_8:
355 return 1;
356 case midgard_reg_mode_16:
357 return 2;
358 case midgard_reg_mode_32:
359 return 4;
360 case midgard_reg_mode_64:
361 return 8;
362 default:
363 unreachable("Invalid register mode");
364 }
365 }
366
367 uint16_t
368 mir_from_bytemask(uint16_t bytemask, midgard_reg_mode mode)
369 {
370 unsigned value = 0;
371 unsigned count = mir_bytes_for_mode(mode);
372
373 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
374 bool a = (bytemask & (1 << c)) != 0;
375
376 for (unsigned q = c; q < count; ++q)
377 assert(((bytemask & (1 << q)) != 0) == a);
378
379 value |= (a << d);
380 }
381
382 return value;
383 }
384
385 /* Rounds down a bytemask to fit a given component count. Iterate each
386 * component, and check if all bytes in the component are masked on */
387
388 uint16_t
389 mir_round_bytemask_down(uint16_t mask, midgard_reg_mode mode)
390 {
391 unsigned bytes = mir_bytes_for_mode(mode);
392 unsigned maxmask = mask_of(bytes);
393 unsigned channels = 16 / bytes;
394
395 for (unsigned c = 0; c < channels; ++c) {
396 /* Get bytes in component */
397 unsigned submask = (mask >> (c * bytes)) & maxmask;
398
399 if (submask != maxmask)
400 mask &= ~(maxmask << (c * bytes));
401 }
402
403 return mask;
404 }
405
406 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
407
408 uint16_t
409 mir_bytemask(midgard_instruction *ins)
410 {
411 return mir_to_bytemask(mir_typesize(ins), ins->mask);
412 }
413
414 void
415 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
416 {
417 ins->mask = mir_from_bytemask(bytemask, mir_typesize(ins));
418 }
419
420 /* Creates a mask of the components of a node read by an instruction, by
421 * analyzing the swizzle with respect to the instruction's mask. E.g.:
422 *
423 * fadd r0.xz, r1.yyyy, r2.zwyx
424 *
425 * will return a mask of Z/Y for r2
426 */
427
428 static uint16_t
429 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, midgard_reg_mode mode)
430 {
431 unsigned cmask = 0;
432
433 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
434 if (!(inmask & (1 << c))) continue;
435 cmask |= (1 << swizzle[c]);
436 }
437
438 return mir_to_bytemask(mode, cmask);
439 }
440
441 uint16_t
442 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
443 {
444 uint16_t mask = 0;
445
446 if (node == ~0)
447 return 0;
448
449 mir_foreach_src(ins, i) {
450 if (ins->src[i] != node) continue;
451
452 /* Branch writeout uses all components */
453 if (ins->compact_branch && ins->writeout && (i == 0))
454 return 0xFFFF;
455
456 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
457 if (ins->compact_branch && !ins->prepacked_branch && ins->branch.conditional && (i == 0))
458 return 0xF;
459
460 /* ALU ops act componentwise so we need to pay attention to
461 * their mask. Texture/ldst does not so we don't clamp source
462 * readmasks based on the writemask */
463 unsigned qmask = (ins->type == TAG_ALU_4) ? ins->mask : ~0;
464
465 /* Handle dot products and things */
466 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
467 unsigned props = alu_opcode_props[ins->alu.op].props;
468
469 unsigned channel_override = GET_CHANNEL_COUNT(props);
470
471 if (channel_override)
472 qmask = mask_of(channel_override);
473 }
474
475 mask |= mir_bytemask_of_read_components_single(ins->swizzle[i], qmask, mir_srcsize(ins, i));
476 }
477
478 return mask;
479 }
480
481 unsigned
482 mir_ubo_shift(midgard_load_store_op op)
483 {
484 switch (op) {
485 case midgard_op_ld_ubo_char:
486 return 0;
487 case midgard_op_ld_ubo_char2:
488 return 1;
489 case midgard_op_ld_ubo_char4:
490 return 2;
491 case midgard_op_ld_ubo_short4:
492 return 3;
493 case midgard_op_ld_ubo_int4:
494 return 4;
495 default:
496 unreachable("Invalid op");
497 }
498 }
499
500 /* Register allocation occurs after instruction scheduling, which is fine until
501 * we start needing to spill registers and therefore insert instructions into
502 * an already-scheduled program. We don't have to be terribly efficient about
503 * this, since spilling is already slow. So just semantically we need to insert
504 * the instruction into a new bundle before/after the bundle of the instruction
505 * in question */
506
507 static midgard_bundle
508 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
509 {
510 midgard_instruction *u = mir_upload_ins(ctx, ins);
511
512 midgard_bundle bundle = {
513 .tag = ins.type,
514 .instruction_count = 1,
515 .instructions = { u },
516 };
517
518 if (bundle.tag == TAG_ALU_4) {
519 assert(OP_IS_MOVE(u->alu.op));
520 u->unit = UNIT_VMUL;
521
522 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
523 bundle.padding = ~(bytes_emitted - 1) & 0xF;
524 bundle.control = ins.type | u->unit;
525 }
526
527 return bundle;
528 }
529
530 static unsigned
531 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
532 {
533 midgard_bundle *bundles =
534 (midgard_bundle *) block->bundles.data;
535
536 size_t count = (block->bundles.size / sizeof(midgard_bundle));
537
538 for (unsigned i = 0; i < count; ++i) {
539 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
540 if (bundles[i].instructions[j] == tag)
541 return i;
542 }
543 }
544
545 mir_print_instruction(tag);
546 unreachable("Instruction not scheduled in block");
547 }
548
549 void
550 mir_insert_instruction_before_scheduled(
551 compiler_context *ctx,
552 midgard_block *block,
553 midgard_instruction *tag,
554 midgard_instruction ins)
555 {
556 unsigned before = mir_bundle_idx_for_ins(tag, block);
557 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
558 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
559
560 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
561 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
562 midgard_bundle *before_bundle = bundles + before + 1;
563
564 midgard_bundle new = mir_bundle_for_op(ctx, ins);
565 memcpy(bundles + before, &new, sizeof(new));
566
567 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
568 block->quadword_count += quadword_size(new.tag);
569 }
570
571 void
572 mir_insert_instruction_after_scheduled(
573 compiler_context *ctx,
574 midgard_block *block,
575 midgard_instruction *tag,
576 midgard_instruction ins)
577 {
578 /* We need to grow the bundles array to add our new bundle */
579 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
580 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
581
582 /* Find the bundle that we want to insert after */
583 unsigned after = mir_bundle_idx_for_ins(tag, block);
584
585 /* All the bundles after that one, we move ahead by one */
586 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
587 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
588 midgard_bundle *after_bundle = bundles + after;
589
590 midgard_bundle new = mir_bundle_for_op(ctx, ins);
591 memcpy(bundles + after + 1, &new, sizeof(new));
592 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
593 block->quadword_count += quadword_size(new.tag);
594 }
595
596 /* Flip the first-two arguments of a (binary) op. Currently ALU
597 * only, no known uses for ldst/tex */
598
599 void
600 mir_flip(midgard_instruction *ins)
601 {
602 unsigned temp = ins->src[0];
603 ins->src[0] = ins->src[1];
604 ins->src[1] = temp;
605
606 assert(ins->type == TAG_ALU_4);
607
608 temp = ins->alu.src1;
609 ins->alu.src1 = ins->alu.src2;
610 ins->alu.src2 = temp;
611
612 unsigned temp_swizzle[16];
613 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
614 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
615 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
616 }
617
618 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
619
620 void
621 mir_compute_temp_count(compiler_context *ctx)
622 {
623 if (ctx->temp_count)
624 return;
625
626 unsigned max_dest = 0;
627
628 mir_foreach_instr_global(ctx, ins) {
629 if (ins->dest < SSA_FIXED_MINIMUM)
630 max_dest = MAX2(max_dest, ins->dest + 1);
631 }
632
633 ctx->temp_count = max_dest;
634 }