104e766150775487b83f6b514594e52df6af0fcf
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 unsigned
42 mir_get_swizzle(midgard_instruction *ins, unsigned idx)
43 {
44 if (ins->type == TAG_ALU_4) {
45 if (idx == 2)
46 return ins->csel_swizzle;
47
48 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
49
50 midgard_vector_alu_src s =
51 vector_alu_from_unsigned(b);
52
53 return s.swizzle;
54 } else if (ins->type == TAG_LOAD_STORE_4) {
55 /* Main swizzle of a load is on the destination */
56 if (!OP_IS_STORE(ins->load_store.op))
57 idx++;
58
59 switch (idx) {
60 case 0:
61 return ins->load_store.swizzle;
62 case 1:
63 case 2: {
64 uint8_t raw =
65 (idx == 2) ? ins->load_store.arg_2 : ins->load_store.arg_1;
66
67 return component_to_swizzle(midgard_ldst_select(raw).component);
68 }
69 default:
70 unreachable("Unknown load/store source");
71 }
72 } else if (ins->type == TAG_TEXTURE_4) {
73 switch (idx) {
74 case 0:
75 return ins->texture.in_reg_swizzle;
76 case 1:
77 /* Swizzle on bias doesn't make sense */
78 return 0;
79 default:
80 unreachable("Unknown texture source");
81 }
82 } else {
83 unreachable("Unknown type");
84 }
85 }
86
87 void
88 mir_set_swizzle(midgard_instruction *ins, unsigned idx, unsigned new)
89 {
90 if (ins->type == TAG_ALU_4) {
91 unsigned b = (idx == 0) ? ins->alu.src1 : ins->alu.src2;
92
93 midgard_vector_alu_src s =
94 vector_alu_from_unsigned(b);
95
96 s.swizzle = new;
97 unsigned pack = vector_alu_srco_unsigned(s);
98
99 if (idx == 0)
100 ins->alu.src1 = pack;
101 else
102 ins->alu.src2 = pack;
103 } else if (ins->type == TAG_LOAD_STORE_4) {
104 /* Main swizzle of a load is on the destination */
105 if (!OP_IS_STORE(ins->load_store.op))
106 idx++;
107
108 switch (idx) {
109 case 0:
110 ins->load_store.swizzle = new;
111 break;
112 case 1:
113 case 2: {
114 uint8_t raw =
115 (idx == 2) ? ins->load_store.arg_2 : ins->load_store.arg_1;
116
117 midgard_ldst_register_select sel
118 = midgard_ldst_select(raw);
119 sel.component = swizzle_to_component(new);
120 uint8_t packed = midgard_ldst_pack(sel);
121
122 if (idx == 2)
123 ins->load_store.arg_2 = packed;
124 else
125 ins->load_store.arg_1 = packed;
126
127 break;
128 }
129 default:
130 assert(new == 0);
131 break;
132 }
133 } else if (ins->type == TAG_TEXTURE_4) {
134 switch (idx) {
135 case 0:
136 ins->texture.in_reg_swizzle = new;
137 break;
138 default:
139 assert(new == 0);
140 break;
141 }
142 } else {
143 unreachable("Unknown type");
144 }
145 }
146
147 static void
148 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned swizzle)
149 {
150 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
151 if (ins->src[i] != old) continue;
152
153 ins->src[i] = new;
154
155 mir_set_swizzle(ins, i,
156 pan_compose_swizzle(mir_get_swizzle(ins, i), swizzle));
157 }
158 }
159
160 void
161 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
162 {
163 mir_foreach_instr_global(ctx, ins) {
164 mir_rewrite_index_src_single(ins, old, new);
165 }
166 }
167
168 void
169 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned swizzle)
170 {
171 mir_foreach_instr_global(ctx, ins) {
172 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
173 }
174 }
175
176 void
177 mir_rewrite_index_src_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag)
178 {
179 mir_foreach_instr_global(ctx, ins) {
180 if (ins->type != tag)
181 continue;
182
183 mir_rewrite_index_src_single(ins, old, new);
184 }
185 }
186
187
188
189 void
190 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
191 {
192 mir_foreach_instr_global(ctx, ins) {
193 mir_rewrite_index_dst_single(ins, old, new);
194 }
195 }
196
197 void
198 mir_rewrite_index_dst_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag)
199 {
200 mir_foreach_instr_global(ctx, ins) {
201 if (ins->type != tag)
202 continue;
203
204 if (ins->dest == old)
205 ins->dest = new;
206 }
207 }
208
209
210
211 void
212 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
213 {
214 mir_rewrite_index_src(ctx, old, new);
215 mir_rewrite_index_dst(ctx, old, new);
216 }
217
218 unsigned
219 mir_use_count(compiler_context *ctx, unsigned value)
220 {
221 unsigned used_count = 0;
222
223 mir_foreach_instr_global(ctx, ins) {
224 if (mir_has_arg(ins, value))
225 ++used_count;
226 }
227
228 return used_count;
229 }
230
231 /* Checks if a value is used only once (or totally dead), which is an important
232 * heuristic to figure out if certain optimizations are Worth It (TM) */
233
234 bool
235 mir_single_use(compiler_context *ctx, unsigned value)
236 {
237 /* We can replicate constants in places so who cares */
238 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
239 return true;
240
241 return mir_use_count(ctx, value) <= 1;
242 }
243
244 static bool
245 mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
246 {
247 if (is_int)
248 return src.mod == midgard_int_shift;
249 else
250 return src.mod;
251 }
252
253 bool
254 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
255 {
256 if (mir_nontrivial_raw_mod(src, is_int)) return true;
257
258 /* size-conversion */
259 if (src.half) return true;
260
261 /* swizzle */
262 for (unsigned c = 0; c < 4; ++c) {
263 if (!(mask & (1 << c))) continue;
264 if (((src.swizzle >> (2*c)) & 3) != c) return true;
265 }
266
267 return false;
268 }
269
270 bool
271 mir_nontrivial_source2_mod(midgard_instruction *ins)
272 {
273 bool is_int = midgard_is_integer_op(ins->alu.op);
274
275 midgard_vector_alu_src src2 =
276 vector_alu_from_unsigned(ins->alu.src2);
277
278 return mir_nontrivial_mod(src2, is_int, ins->mask);
279 }
280
281 bool
282 mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
283 {
284 bool is_int = midgard_is_integer_op(ins->alu.op);
285
286 midgard_vector_alu_src src2 =
287 vector_alu_from_unsigned(ins->alu.src2);
288
289 return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
290 }
291
292 bool
293 mir_nontrivial_outmod(midgard_instruction *ins)
294 {
295 bool is_int = midgard_is_integer_op(ins->alu.op);
296 unsigned mod = ins->alu.outmod;
297
298 /* Pseudo-outmod */
299 if (ins->invert)
300 return true;
301
302 /* Type conversion is a sort of outmod */
303 if (ins->alu.dest_override != midgard_dest_override_none)
304 return true;
305
306 if (is_int)
307 return mod != midgard_outmod_int_wrap;
308 else
309 return mod != midgard_outmod_none;
310 }
311
312 /* Checks if an index will be used as a special register -- basically, if we're
313 * used as the input to a non-ALU op */
314
315 bool
316 mir_special_index(compiler_context *ctx, unsigned idx)
317 {
318 mir_foreach_instr_global(ctx, ins) {
319 bool is_ldst = ins->type == TAG_LOAD_STORE_4;
320 bool is_tex = ins->type == TAG_TEXTURE_4;
321 bool is_writeout = ins->compact_branch && ins->writeout;
322
323 if (!(is_ldst || is_tex || is_writeout))
324 continue;
325
326 if (mir_has_arg(ins, idx))
327 return true;
328 }
329
330 return false;
331 }
332
333 /* Is a node written before a given instruction? */
334
335 bool
336 mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node)
337 {
338 if (node >= SSA_FIXED_MINIMUM)
339 return true;
340
341 mir_foreach_instr_global(ctx, q) {
342 if (q == ins)
343 break;
344
345 if (q->dest == node)
346 return true;
347 }
348
349 return false;
350 }
351
352 /* Creates a mask of the components of a node read by an instruction, by
353 * analyzing the swizzle with respect to the instruction's mask. E.g.:
354 *
355 * fadd r0.xz, r1.yyyy, r2.zwyx
356 *
357 * will return a mask of Z/Y for r2
358 */
359
360 static unsigned
361 mir_mask_of_read_components_single(unsigned swizzle, unsigned outmask)
362 {
363 unsigned mask = 0;
364
365 for (unsigned c = 0; c < 4; ++c) {
366 if (!(outmask & (1 << c))) continue;
367
368 unsigned comp = (swizzle >> (2*c)) & 3;
369 mask |= (1 << comp);
370 }
371
372 return mask;
373 }
374
375 static unsigned
376 mir_source_count(midgard_instruction *ins)
377 {
378 if (ins->type == TAG_ALU_4) {
379 /* ALU is always binary, except csel */
380 return OP_IS_CSEL(ins->alu.op) ? 3 : 2;
381 } else if (ins->type == TAG_LOAD_STORE_4) {
382 bool load = !OP_IS_STORE(ins->load_store.op);
383 return (load ? 2 : 3);
384 } else if (ins->type == TAG_TEXTURE_4) {
385 /* Coords, bias.. TODO: Offsets? */
386 return 2;
387 } else {
388 unreachable("Invalid instruction type");
389 }
390 }
391
392 static unsigned
393 mir_component_count_implicit(midgard_instruction *ins, unsigned i)
394 {
395 if (ins->type == TAG_LOAD_STORE_4) {
396 switch (ins->load_store.op) {
397 /* Address implicitly 64-bit */
398 case midgard_op_ld_int4:
399 return (i == 0) ? 1 : 0;
400
401 case midgard_op_st_int4:
402 return (i == 1) ? 1 : 0;
403
404 default:
405 return 0;
406 }
407 }
408
409 return 0;
410 }
411
412 unsigned
413 mir_mask_of_read_components(midgard_instruction *ins, unsigned node)
414 {
415 unsigned mask = 0;
416
417 for (unsigned i = 0; i < mir_source_count(ins); ++i) {
418 if (ins->src[i] != node) continue;
419
420 /* Branch writeout uses all components */
421 if (ins->compact_branch && ins->writeout && (i == 0))
422 return 0xF;
423
424 unsigned swizzle = mir_get_swizzle(ins, i);
425 unsigned m = mir_mask_of_read_components_single(swizzle, ins->mask);
426
427 /* Sometimes multi-arg ops are passed implicitly */
428 unsigned implicit = mir_component_count_implicit(ins, i);
429 assert(implicit < 2);
430
431 /* Extend the mask */
432 if (implicit == 1) {
433 /* Ensure it's a single bit currently */
434 assert((m >> __builtin_ctz(m)) == 0x1);
435
436 /* Set the next bit to extend one*/
437 m |= (m << 1);
438 }
439
440 /* Handle dot products and things */
441 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
442 unsigned channel_override =
443 GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
444
445 if (channel_override)
446 m = mask_of(channel_override);
447 }
448
449 mask |= m;
450 }
451
452 return mask;
453 }
454
455 unsigned
456 mir_ubo_shift(midgard_load_store_op op)
457 {
458 switch (op) {
459 case midgard_op_ld_ubo_char:
460 return 0;
461 case midgard_op_ld_ubo_char2:
462 return 1;
463 case midgard_op_ld_ubo_char4:
464 return 2;
465 case midgard_op_ld_ubo_short4:
466 return 3;
467 case midgard_op_ld_ubo_int4:
468 return 4;
469 default:
470 unreachable("Invalid op");
471 }
472 }
473
474 /* Register allocation occurs after instruction scheduling, which is fine until
475 * we start needing to spill registers and therefore insert instructions into
476 * an already-scheduled program. We don't have to be terribly efficient about
477 * this, since spilling is already slow. So just semantically we need to insert
478 * the instruction into a new bundle before/after the bundle of the instruction
479 * in question */
480
481 static midgard_bundle
482 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
483 {
484 midgard_instruction *u = mir_upload_ins(ctx, ins);
485
486 midgard_bundle bundle = {
487 .tag = ins.type,
488 .instruction_count = 1,
489 .instructions = { u },
490 };
491
492 if (bundle.tag == TAG_ALU_4) {
493 assert(OP_IS_MOVE(u->alu.op));
494 u->unit = UNIT_VMUL;
495
496 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
497 bundle.padding = ~(bytes_emitted - 1) & 0xF;
498 bundle.control = ins.type | u->unit;
499 }
500
501 return bundle;
502 }
503
504 static unsigned
505 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
506 {
507 midgard_bundle *bundles =
508 (midgard_bundle *) block->bundles.data;
509
510 size_t count = (block->bundles.size / sizeof(midgard_bundle));
511
512 for (unsigned i = 0; i < count; ++i) {
513 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
514 if (bundles[i].instructions[j] == tag)
515 return i;
516 }
517 }
518
519 mir_print_instruction(tag);
520 unreachable("Instruction not scheduled in block");
521 }
522
523 void
524 mir_insert_instruction_before_scheduled(
525 compiler_context *ctx,
526 midgard_block *block,
527 midgard_instruction *tag,
528 midgard_instruction ins)
529 {
530 unsigned before = mir_bundle_idx_for_ins(tag, block);
531 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
532 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
533
534 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
535 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
536 midgard_bundle *before_bundle = bundles + before + 1;
537
538 midgard_bundle new = mir_bundle_for_op(ctx, ins);
539 memcpy(bundles + before, &new, sizeof(new));
540
541 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
542 }
543
544 void
545 mir_insert_instruction_after_scheduled(
546 compiler_context *ctx,
547 midgard_block *block,
548 midgard_instruction *tag,
549 midgard_instruction ins)
550 {
551 unsigned after = mir_bundle_idx_for_ins(tag, block);
552 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
553 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
554
555 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
556 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
557 midgard_bundle *after_bundle_1 = bundles + after + 2;
558
559 midgard_bundle new = mir_bundle_for_op(ctx, ins);
560 memcpy(bundles + after + 1, &new, sizeof(new));
561 list_addtail(&new.instructions[0]->link, &after_bundle_1->instructions[0]->link);
562 }