pan/mdg: Use type to determine triviality of a move
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 static void
42 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
43 {
44 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
45 if (ins->src[i] != old) continue;
46
47 ins->src[i] = new;
48 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
49 }
50 }
51
52 void
53 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
54 {
55 mir_foreach_instr_global(ctx, ins) {
56 mir_rewrite_index_src_single(ins, old, new);
57 }
58 }
59
60 void
61 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
62 {
63 mir_foreach_instr_global(ctx, ins) {
64 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
65 }
66 }
67
68 void
69 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
70 {
71 mir_foreach_instr_global(ctx, ins) {
72 mir_rewrite_index_dst_single(ins, old, new);
73 }
74 }
75
76 void
77 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
78 {
79 mir_rewrite_index_src(ctx, old, new);
80 mir_rewrite_index_dst(ctx, old, new);
81 }
82
83 unsigned
84 mir_use_count(compiler_context *ctx, unsigned value)
85 {
86 unsigned used_count = 0;
87
88 mir_foreach_instr_global(ctx, ins) {
89 if (mir_has_arg(ins, value))
90 ++used_count;
91 }
92
93 return used_count;
94 }
95
96 /* Checks if a value is used only once (or totally dead), which is an important
97 * heuristic to figure out if certain optimizations are Worth It (TM) */
98
99 bool
100 mir_single_use(compiler_context *ctx, unsigned value)
101 {
102 /* We can replicate constants in places so who cares */
103 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
104 return true;
105
106 return mir_use_count(ctx, value) <= 1;
107 }
108
109 static bool
110 mir_nontrivial_raw_mod(midgard_vector_alu_src src, bool is_int)
111 {
112 if (is_int)
113 return src.mod == midgard_int_shift;
114 else
115 return src.mod;
116 }
117
118 static bool
119 mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask, unsigned *swizzle)
120 {
121 if (mir_nontrivial_raw_mod(src, is_int)) return true;
122
123 /* size-conversion */
124 if (src.half) return true;
125
126 for (unsigned c = 0; c < 16; ++c) {
127 if (!(mask & (1 << c))) continue;
128 if (swizzle[c] != c) return true;
129 }
130
131 return false;
132 }
133
134 bool
135 mir_nontrivial_source2_mod(midgard_instruction *ins)
136 {
137 bool is_int = midgard_is_integer_op(ins->alu.op);
138
139 midgard_vector_alu_src src2 =
140 vector_alu_from_unsigned(ins->alu.src2);
141
142 return mir_nontrivial_mod(src2, is_int, ins->mask, ins->swizzle[1]);
143 }
144
145 bool
146 mir_nontrivial_source2_mod_simple(midgard_instruction *ins)
147 {
148 bool is_int = midgard_is_integer_op(ins->alu.op);
149
150 midgard_vector_alu_src src2 =
151 vector_alu_from_unsigned(ins->alu.src2);
152
153 return mir_nontrivial_raw_mod(src2, is_int) || src2.half;
154 }
155
156 bool
157 mir_nontrivial_outmod(midgard_instruction *ins)
158 {
159 bool is_int = midgard_is_integer_op(ins->alu.op);
160 unsigned mod = ins->alu.outmod;
161
162 if (ins->dest_type != ins->src_types[1])
163 return true;
164
165 if (is_int)
166 return mod != midgard_outmod_int_wrap;
167 else
168 return mod != midgard_outmod_none;
169 }
170
171 uint16_t
172 mir_from_bytemask(uint16_t bytemask, unsigned bits)
173 {
174 unsigned value = 0;
175 unsigned count = bits / 8;
176
177 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
178 bool a = (bytemask & (1 << c)) != 0;
179
180 for (unsigned q = c; q < count; ++q)
181 assert(((bytemask & (1 << q)) != 0) == a);
182
183 value |= (a << d);
184 }
185
186 return value;
187 }
188
189 /* Rounds up a bytemask to fill a given component count. Iterate each
190 * component, and check if any bytes in the component are masked on */
191
192 uint16_t
193 mir_round_bytemask_up(uint16_t mask, unsigned bits)
194 {
195 unsigned bytes = bits / 8;
196 unsigned maxmask = mask_of(bytes);
197 unsigned channels = 16 / bytes;
198
199 for (unsigned c = 0; c < channels; ++c) {
200 unsigned submask = maxmask << (c * bytes);
201
202 if (mask & submask)
203 mask |= submask;
204 }
205
206 return mask;
207 }
208
209 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
210
211 uint16_t
212 mir_bytemask(midgard_instruction *ins)
213 {
214 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
215 return pan_to_bytemask(type_size, ins->mask);
216 }
217
218 void
219 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
220 {
221 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
222 ins->mask = mir_from_bytemask(bytemask, type_size);
223 }
224
225 /* Checks if we should use an upper destination override, rather than the lower
226 * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
227
228 unsigned
229 mir_upper_override(midgard_instruction *ins)
230 {
231 /* If there is no override, there is no upper override, tautology */
232 if (ins->alu.dest_override == midgard_dest_override_none)
233 return 0;
234
235 /* Make sure we didn't already lower somehow */
236 assert(ins->alu.dest_override == midgard_dest_override_lower);
237
238 /* There are 16 bytes per vector, so there are (16/bytes)
239 * components per vector. So the magic half is half of
240 * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
241 * */
242
243 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
244 unsigned threshold = 64 / type_size;
245
246 /* How many components did we shift over? */
247 unsigned zeroes = __builtin_ctz(ins->mask);
248
249 /* Did we hit the threshold? */
250 return (zeroes >= threshold) ? threshold : 0;
251 }
252
253 /* Creates a mask of the components of a node read by an instruction, by
254 * analyzing the swizzle with respect to the instruction's mask. E.g.:
255 *
256 * fadd r0.xz, r1.yyyy, r2.zwyx
257 *
258 * will return a mask of Z/Y for r2
259 */
260
261 static uint16_t
262 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsigned bits)
263 {
264 unsigned cmask = 0;
265
266 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
267 if (!(inmask & (1 << c))) continue;
268 cmask |= (1 << swizzle[c]);
269 }
270
271 return pan_to_bytemask(bits, cmask);
272 }
273
274 uint16_t
275 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
276 {
277 if (ins->compact_branch && ins->writeout && (i == 0)) {
278 /* Non-ZS writeout uses all components */
279 if (!ins->writeout_depth && !ins->writeout_stencil)
280 return 0xFFFF;
281
282 /* For ZS-writeout, if both Z and S are written we need two
283 * components, otherwise we only need one.
284 */
285 if (ins->writeout_depth && ins->writeout_stencil)
286 return 0xFF;
287 else
288 return 0xF;
289 }
290
291 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
292 if (ins->compact_branch && ins->branch.conditional && (i == 0))
293 return 0xF;
294
295 /* ALU ops act componentwise so we need to pay attention to
296 * their mask. Texture/ldst does not so we don't clamp source
297 * readmasks based on the writemask */
298 unsigned qmask = (ins->type == TAG_ALU_4) ? ins->mask : ~0;
299
300 /* Handle dot products and things */
301 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
302 unsigned props = alu_opcode_props[ins->alu.op].props;
303
304 unsigned channel_override = GET_CHANNEL_COUNT(props);
305
306 if (channel_override)
307 qmask = mask_of(channel_override);
308 }
309
310 return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,
311 nir_alu_type_get_type_size(ins->src_types[i]));
312 }
313
314 uint16_t
315 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
316 {
317 uint16_t mask = 0;
318
319 if (node == ~0)
320 return 0;
321
322 mir_foreach_src(ins, i) {
323 if (ins->src[i] != node) continue;
324 mask |= mir_bytemask_of_read_components_index(ins, i);
325 }
326
327 return mask;
328 }
329
330 /* Register allocation occurs after instruction scheduling, which is fine until
331 * we start needing to spill registers and therefore insert instructions into
332 * an already-scheduled program. We don't have to be terribly efficient about
333 * this, since spilling is already slow. So just semantically we need to insert
334 * the instruction into a new bundle before/after the bundle of the instruction
335 * in question */
336
337 static midgard_bundle
338 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
339 {
340 midgard_instruction *u = mir_upload_ins(ctx, ins);
341
342 midgard_bundle bundle = {
343 .tag = ins.type,
344 .instruction_count = 1,
345 .instructions = { u },
346 };
347
348 if (bundle.tag == TAG_ALU_4) {
349 assert(OP_IS_MOVE(u->alu.op));
350 u->unit = UNIT_VMUL;
351
352 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
353 bundle.padding = ~(bytes_emitted - 1) & 0xF;
354 bundle.control = ins.type | u->unit;
355 }
356
357 return bundle;
358 }
359
360 static unsigned
361 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
362 {
363 midgard_bundle *bundles =
364 (midgard_bundle *) block->bundles.data;
365
366 size_t count = (block->bundles.size / sizeof(midgard_bundle));
367
368 for (unsigned i = 0; i < count; ++i) {
369 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
370 if (bundles[i].instructions[j] == tag)
371 return i;
372 }
373 }
374
375 mir_print_instruction(tag);
376 unreachable("Instruction not scheduled in block");
377 }
378
379 void
380 mir_insert_instruction_before_scheduled(
381 compiler_context *ctx,
382 midgard_block *block,
383 midgard_instruction *tag,
384 midgard_instruction ins)
385 {
386 unsigned before = mir_bundle_idx_for_ins(tag, block);
387 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
388 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
389
390 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
391 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
392 midgard_bundle *before_bundle = bundles + before + 1;
393
394 midgard_bundle new = mir_bundle_for_op(ctx, ins);
395 memcpy(bundles + before, &new, sizeof(new));
396
397 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
398 block->quadword_count += midgard_tag_props[new.tag].size;
399 }
400
401 void
402 mir_insert_instruction_after_scheduled(
403 compiler_context *ctx,
404 midgard_block *block,
405 midgard_instruction *tag,
406 midgard_instruction ins)
407 {
408 /* We need to grow the bundles array to add our new bundle */
409 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
410 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
411
412 /* Find the bundle that we want to insert after */
413 unsigned after = mir_bundle_idx_for_ins(tag, block);
414
415 /* All the bundles after that one, we move ahead by one */
416 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
417 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
418 midgard_bundle *after_bundle = bundles + after;
419
420 midgard_bundle new = mir_bundle_for_op(ctx, ins);
421 memcpy(bundles + after + 1, &new, sizeof(new));
422 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
423 block->quadword_count += midgard_tag_props[new.tag].size;
424 }
425
426 /* Flip the first-two arguments of a (binary) op. Currently ALU
427 * only, no known uses for ldst/tex */
428
429 void
430 mir_flip(midgard_instruction *ins)
431 {
432 unsigned temp = ins->src[0];
433 ins->src[0] = ins->src[1];
434 ins->src[1] = temp;
435
436 assert(ins->type == TAG_ALU_4);
437
438 temp = ins->alu.src1;
439 ins->alu.src1 = ins->alu.src2;
440 ins->alu.src2 = temp;
441
442 temp = ins->src_types[0];
443 ins->src_types[0] = ins->src_types[1];
444 ins->src_types[1] = temp;
445
446 temp = ins->src_abs[0];
447 ins->src_abs[0] = ins->src_abs[1];
448 ins->src_abs[1] = temp;
449
450 temp = ins->src_neg[0];
451 ins->src_neg[0] = ins->src_neg[1];
452 ins->src_neg[1] = temp;
453
454 unsigned temp_swizzle[16];
455 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
456 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
457 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
458 }
459
460 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
461
462 void
463 mir_compute_temp_count(compiler_context *ctx)
464 {
465 if (ctx->temp_count)
466 return;
467
468 unsigned max_dest = 0;
469
470 mir_foreach_instr_global(ctx, ins) {
471 if (ins->dest < SSA_FIXED_MINIMUM)
472 max_dest = MAX2(max_dest, ins->dest + 1);
473 }
474
475 ctx->temp_count = max_dest;
476 }