0289e5e34965749250fec4f9e2d81397e3f2482a
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 static midgard_vector_alu_src
42 mir_get_alu_src(midgard_instruction *ins, unsigned idx)
43 {
44 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
45 return vector_alu_from_unsigned(b);
46 }
47
48 static void
49 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
50 {
51 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
52 if (ins->src[i] != old) continue;
53
54 ins->src[i] = new;
55 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
56 }
57 }
58
59 void
60 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
61 {
62 mir_foreach_instr_global(ctx, ins) {
63 mir_rewrite_index_src_single(ins, old, new);
64 }
65 }
66
67 void
68 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
69 {
70 mir_foreach_instr_global(ctx, ins) {
71 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
72 }
73 }
74
75 void
76 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
77 {
78 mir_foreach_instr_global(ctx, ins) {
79 mir_rewrite_index_dst_single(ins, old, new);
80 }
81 }
82
83 void
84 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
85 {
86 mir_rewrite_index_src(ctx, old, new);
87 mir_rewrite_index_dst(ctx, old, new);
88 }
89
90 unsigned
91 mir_use_count(compiler_context *ctx, unsigned value)
92 {
93 unsigned used_count = 0;
94
95 mir_foreach_instr_global(ctx, ins) {
96 if (mir_has_arg(ins, value))
97 ++used_count;
98 }
99
100 return used_count;
101 }
102
103 /* Checks if a value is used only once (or totally dead), which is an important
104 * heuristic to figure out if certain optimizations are Worth It (TM) */
105
106 bool
107 mir_single_use(compiler_context *ctx, unsigned value)
108 {
109 /* We can replicate constants in places so who cares */
110 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
111 return true;
112
113 return mir_use_count(ctx, value) <= 1;
114 }
115
116 static bool
117 mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
118 {
119 if (is_int)
120 return src.mod == midgard_int_shift;
121 else
122 return src.mod;
123 }
124
125 static bool
126 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask, unsigned *swizzle)
127 {
128 if (mir_nontrivial_raw_mod(src, is_int)) return true;
129
130 /* size-conversion */
131 if (src.half) return true;
132
133 for (unsigned c = 0; c < 16; ++c) {
134 if (!(mask & (1 << c))) continue;
135 if (swizzle[c] != c) return true;
136 }
137
138 return false;
139 }
140
141 bool
142 mir_nontrivial_source2_mod(midgard_instruction *ins)
143 {
144 bool is_int = midgard_is_integer_op(ins->alu.op);
145
146 midgard_vector_alu_src src2 =
147 vector_alu_from_unsigned(ins->alu.src2);
148
149 return mir_nontrivial_mod(src2, is_int, ins->mask, ins->swizzle[1]);
150 }
151
152 bool
153 mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
154 {
155 bool is_int = midgard_is_integer_op(ins->alu.op);
156
157 midgard_vector_alu_src src2 =
158 vector_alu_from_unsigned(ins->alu.src2);
159
160 return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
161 }
162
163 bool
164 mir_nontrivial_outmod(midgard_instruction *ins)
165 {
166 bool is_int = midgard_is_integer_op(ins->alu.op);
167 unsigned mod = ins->alu.outmod;
168
169 /* Type conversion is a sort of outmod */
170 if (ins->alu.dest_override != midgard_dest_override_none)
171 return true;
172
173 if (is_int)
174 return mod != midgard_outmod_int_wrap;
175 else
176 return mod != midgard_outmod_none;
177 }
178
179 /* Grabs the type size. */
180
181 midgard_reg_mode
182 mir_typesize(midgard_instruction *ins)
183 {
184 if (ins->compact_branch)
185 return midgard_reg_mode_32;
186
187 /* TODO: Type sizes for texture */
188 if (ins->type == TAG_TEXTURE_4)
189 return midgard_reg_mode_32;
190
191 if (ins->type == TAG_LOAD_STORE_4)
192 return GET_LDST_SIZE(load_store_opcode_props[ins->load_store.op].props);
193
194 if (ins->type == TAG_ALU_4) {
195 midgard_reg_mode mode = ins->alu.reg_mode;
196
197 /* If we have an override, step down by half */
198 if (ins->alu.dest_override != midgard_dest_override_none) {
199 assert(mode > midgard_reg_mode_8);
200 mode--;
201 }
202
203 return mode;
204 }
205
206 unreachable("Invalid instruction type");
207 }
208
209 /* Grabs the size of a source */
210
211 midgard_reg_mode
212 mir_srcsize(midgard_instruction *ins, unsigned i)
213 {
214 if (ins->type == TAG_LOAD_STORE_4) {
215 if (OP_HAS_ADDRESS(ins->load_store.op)) {
216 if (i == 1)
217 return midgard_reg_mode_64;
218 else if (i == 2) {
219 bool zext = ins->load_store.arg_1 & 0x80;
220 return zext ? midgard_reg_mode_32 : midgard_reg_mode_64;
221 }
222 }
223 }
224
225 /* TODO: 16-bit textures/ldst */
226 if (ins->type == TAG_TEXTURE_4 || ins->type == TAG_LOAD_STORE_4)
227 return midgard_reg_mode_32;
228
229 /* TODO: 16-bit branches */
230 if (ins->compact_branch)
231 return midgard_reg_mode_32;
232
233 if (i >= 2) {
234 /* TODO: 16-bit conditions, ffma */
235 return midgard_reg_mode_32;
236 }
237
238 /* Default to type of the instruction */
239
240 midgard_reg_mode mode = ins->alu.reg_mode;
241
242 /* If we have a half modifier, step down by half */
243
244 if ((mir_get_alu_src(ins, i)).half) {
245 assert(mode > midgard_reg_mode_8);
246 mode--;
247 }
248
249 return mode;
250 }
251
252 midgard_reg_mode
253 mir_mode_for_destsize(unsigned size)
254 {
255 switch (size) {
256 case 8:
257 return midgard_reg_mode_8;
258 case 16:
259 return midgard_reg_mode_16;
260 case 32:
261 return midgard_reg_mode_32;
262 case 64:
263 return midgard_reg_mode_64;
264 default:
265 unreachable("Unknown destination size");
266 }
267 }
268
269 /* ...and the inverse */
270
271 unsigned
272 mir_bytes_for_mode(midgard_reg_mode mode)
273 {
274 switch (mode) {
275 case midgard_reg_mode_8:
276 return 1;
277 case midgard_reg_mode_16:
278 return 2;
279 case midgard_reg_mode_32:
280 return 4;
281 case midgard_reg_mode_64:
282 return 8;
283 default:
284 unreachable("Invalid register mode");
285 }
286 }
287
288 uint16_t
289 mir_from_bytemask(uint16_t bytemask, midgard_reg_mode mode)
290 {
291 unsigned value = 0;
292 unsigned count = mir_bytes_for_mode(mode);
293
294 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
295 bool a = (bytemask & (1 << c)) != 0;
296
297 for (unsigned q = c; q < count; ++q)
298 assert(((bytemask & (1 << q)) != 0) == a);
299
300 value |= (a << d);
301 }
302
303 return value;
304 }
305
306 /* Rounds up a bytemask to fill a given component count. Iterate each
307 * component, and check if any bytes in the component are masked on */
308
309 uint16_t
310 mir_round_bytemask_up(uint16_t mask, midgard_reg_mode mode)
311 {
312 unsigned bytes = mir_bytes_for_mode(mode);
313 unsigned maxmask = mask_of(bytes);
314 unsigned channels = 16 / bytes;
315
316 for (unsigned c = 0; c < channels; ++c) {
317 unsigned submask = maxmask << (c * bytes);
318
319 if (mask & submask)
320 mask |= submask;
321 }
322
323 return mask;
324 }
325
326 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
327
328 uint16_t
329 mir_bytemask(midgard_instruction *ins)
330 {
331 return pan_to_bytemask(mir_bytes_for_mode(mir_typesize(ins)) * 8, ins->mask);
332 }
333
334 void
335 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
336 {
337 ins->mask = mir_from_bytemask(bytemask, mir_typesize(ins));
338 }
339
340 /* Checks if we should use an upper destination override, rather than the lower
341 * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
342
343 unsigned
344 mir_upper_override(midgard_instruction *ins)
345 {
346 /* If there is no override, there is no upper override, tautology */
347 if (ins->alu.dest_override == midgard_dest_override_none)
348 return 0;
349
350 /* Make sure we didn't already lower somehow */
351 assert(ins->alu.dest_override == midgard_dest_override_lower);
352
353 /* What is the mask in terms of currently? */
354 midgard_reg_mode type = mir_typesize(ins);
355
356 /* There are 16 bytes per vector, so there are (16/bytes)
357 * components per vector. So the magic half is half of
358 * (16/bytes), which simplifies to 8/bytes */
359
360 unsigned threshold = 8 / mir_bytes_for_mode(type);
361
362 /* How many components did we shift over? */
363 unsigned zeroes = __builtin_ctz(ins->mask);
364
365 /* Did we hit the threshold? */
366 return (zeroes >= threshold) ? threshold : 0;
367 }
368
369 /* Creates a mask of the components of a node read by an instruction, by
370 * analyzing the swizzle with respect to the instruction's mask. E.g.:
371 *
372 * fadd r0.xz, r1.yyyy, r2.zwyx
373 *
374 * will return a mask of Z/Y for r2
375 */
376
377 static uint16_t
378 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, midgard_reg_mode mode)
379 {
380 unsigned cmask = 0;
381
382 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
383 if (!(inmask & (1 << c))) continue;
384 cmask |= (1 << swizzle[c]);
385 }
386
387 return pan_to_bytemask(mir_bytes_for_mode(mode) * 8, cmask);
388 }
389
390 uint16_t
391 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
392 {
393 if (ins->compact_branch && ins->writeout && (i == 0)) {
394 /* Non-ZS writeout uses all components */
395 if (!ins->writeout_depth && !ins->writeout_stencil)
396 return 0xFFFF;
397
398 /* For ZS-writeout, if both Z and S are written we need two
399 * components, otherwise we only need one.
400 */
401 if (ins->writeout_depth && ins->writeout_stencil)
402 return 0xFF;
403 else
404 return 0xF;
405 }
406
407 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
408 if (ins->compact_branch && ins->branch.conditional && (i == 0))
409 return 0xF;
410
411 /* ALU ops act componentwise so we need to pay attention to
412 * their mask. Texture/ldst does not so we don't clamp source
413 * readmasks based on the writemask */
414 unsigned qmask = (ins->type == TAG_ALU_4) ? ins->mask : ~0;
415
416 /* Handle dot products and things */
417 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
418 unsigned props = alu_opcode_props[ins->alu.op].props;
419
420 unsigned channel_override = GET_CHANNEL_COUNT(props);
421
422 if (channel_override)
423 qmask = mask_of(channel_override);
424 }
425
426 return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask, mir_srcsize(ins, i));
427 }
428
429 uint16_t
430 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
431 {
432 uint16_t mask = 0;
433
434 if (node == ~0)
435 return 0;
436
437 mir_foreach_src(ins, i) {
438 if (ins->src[i] != node) continue;
439 mask |= mir_bytemask_of_read_components_index(ins, i);
440 }
441
442 return mask;
443 }
444
445 /* Register allocation occurs after instruction scheduling, which is fine until
446 * we start needing to spill registers and therefore insert instructions into
447 * an already-scheduled program. We don't have to be terribly efficient about
448 * this, since spilling is already slow. So just semantically we need to insert
449 * the instruction into a new bundle before/after the bundle of the instruction
450 * in question */
451
452 static midgard_bundle
453 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
454 {
455 midgard_instruction *u = mir_upload_ins(ctx, ins);
456
457 midgard_bundle bundle = {
458 .tag = ins.type,
459 .instruction_count = 1,
460 .instructions = { u },
461 };
462
463 if (bundle.tag == TAG_ALU_4) {
464 assert(OP_IS_MOVE(u->alu.op));
465 u->unit = UNIT_VMUL;
466
467 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
468 bundle.padding = ~(bytes_emitted - 1) & 0xF;
469 bundle.control = ins.type | u->unit;
470 }
471
472 return bundle;
473 }
474
475 static unsigned
476 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
477 {
478 midgard_bundle *bundles =
479 (midgard_bundle *) block->bundles.data;
480
481 size_t count = (block->bundles.size / sizeof(midgard_bundle));
482
483 for (unsigned i = 0; i < count; ++i) {
484 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
485 if (bundles[i].instructions[j] == tag)
486 return i;
487 }
488 }
489
490 mir_print_instruction(tag);
491 unreachable("Instruction not scheduled in block");
492 }
493
494 void
495 mir_insert_instruction_before_scheduled(
496 compiler_context *ctx,
497 midgard_block *block,
498 midgard_instruction *tag,
499 midgard_instruction ins)
500 {
501 unsigned before = mir_bundle_idx_for_ins(tag, block);
502 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
503 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
504
505 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
506 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
507 midgard_bundle *before_bundle = bundles + before + 1;
508
509 midgard_bundle new = mir_bundle_for_op(ctx, ins);
510 memcpy(bundles + before, &new, sizeof(new));
511
512 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
513 block->quadword_count += midgard_tag_props[new.tag].size;
514 }
515
516 void
517 mir_insert_instruction_after_scheduled(
518 compiler_context *ctx,
519 midgard_block *block,
520 midgard_instruction *tag,
521 midgard_instruction ins)
522 {
523 /* We need to grow the bundles array to add our new bundle */
524 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
525 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
526
527 /* Find the bundle that we want to insert after */
528 unsigned after = mir_bundle_idx_for_ins(tag, block);
529
530 /* All the bundles after that one, we move ahead by one */
531 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
532 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
533 midgard_bundle *after_bundle = bundles + after;
534
535 midgard_bundle new = mir_bundle_for_op(ctx, ins);
536 memcpy(bundles + after + 1, &new, sizeof(new));
537 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
538 block->quadword_count += midgard_tag_props[new.tag].size;
539 }
540
541 /* Flip the first-two arguments of a (binary) op. Currently ALU
542 * only, no known uses for ldst/tex */
543
544 void
545 mir_flip(midgard_instruction *ins)
546 {
547 unsigned temp = ins->src[0];
548 ins->src[0] = ins->src[1];
549 ins->src[1] = temp;
550
551 assert(ins->type == TAG_ALU_4);
552
553 temp = ins->alu.src1;
554 ins->alu.src1 = ins->alu.src2;
555 ins->alu.src2 = temp;
556
557 unsigned temp_swizzle[16];
558 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
559 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
560 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
561 }
562
563 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
564
565 void
566 mir_compute_temp_count(compiler_context *ctx)
567 {
568 if (ctx->temp_count)
569 return;
570
571 unsigned max_dest = 0;
572
573 mir_foreach_instr_global(ctx, ins) {
574 if (ins->dest < SSA_FIXED_MINIMUM)
575 max_dest = MAX2(max_dest, ins->dest + 1);
576 }
577
578 ctx->temp_count = max_dest;
579 }