806dffd8de1437ebedc9a2e6d2259f14a9e29875
[mesa.git] / src / panfrost / midgard / mir.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "compiler.h"
25 #include "midgard_ops.h"
26
27 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new)
28 {
29 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
30 if (ins->src[i] == old)
31 ins->src[i] = new;
32 }
33 }
34
35 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new)
36 {
37 if (ins->dest == old)
38 ins->dest = new;
39 }
40
41 static void
42 mir_rewrite_index_src_single_swizzle(midgard_instruction *ins, unsigned old, unsigned new, unsigned *swizzle)
43 {
44 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
45 if (ins->src[i] != old) continue;
46
47 ins->src[i] = new;
48 mir_compose_swizzle(ins->swizzle[i], swizzle, ins->swizzle[i]);
49 }
50 }
51
52 void
53 mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new)
54 {
55 mir_foreach_instr_global(ctx, ins) {
56 mir_rewrite_index_src_single(ins, old, new);
57 }
58 }
59
60 void
61 mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle)
62 {
63 mir_foreach_instr_global(ctx, ins) {
64 mir_rewrite_index_src_single_swizzle(ins, old, new, swizzle);
65 }
66 }
67
68 void
69 mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
70 {
71 mir_foreach_instr_global(ctx, ins) {
72 mir_rewrite_index_dst_single(ins, old, new);
73 }
74
75 /* Implicitly written before the shader */
76 if (ctx->blend_input == old)
77 ctx->blend_input = new;
78
79 if (ctx->blend_src1 == old)
80 ctx->blend_src1 = new;
81 }
82
83 void
84 mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new)
85 {
86 mir_rewrite_index_src(ctx, old, new);
87 mir_rewrite_index_dst(ctx, old, new);
88 }
89
90 unsigned
91 mir_use_count(compiler_context *ctx, unsigned value)
92 {
93 unsigned used_count = 0;
94
95 mir_foreach_instr_global(ctx, ins) {
96 if (mir_has_arg(ins, value))
97 ++used_count;
98 }
99
100 return used_count;
101 }
102
103 /* Checks if a value is used only once (or totally dead), which is an important
104 * heuristic to figure out if certain optimizations are Worth It (TM) */
105
106 bool
107 mir_single_use(compiler_context *ctx, unsigned value)
108 {
109 /* We can replicate constants in places so who cares */
110 if (value == SSA_FIXED_REGISTER(REGISTER_CONSTANT))
111 return true;
112
113 return mir_use_count(ctx, value) <= 1;
114 }
115
116 bool
117 mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
118 {
119 bool is_int = midgard_is_integer_op(ins->op);
120
121 if (is_int) {
122 if (ins->src_shift[i]) return true;
123 } else {
124 if (ins->src_neg[i]) return true;
125 if (ins->src_abs[i]) return true;
126 }
127
128 if (ins->dest_type != ins->src_types[i]) return true;
129
130 if (check_swizzle) {
131 for (unsigned c = 0; c < 16; ++c) {
132 if (!(ins->mask & (1 << c))) continue;
133 if (ins->swizzle[i][c] != c) return true;
134 }
135 }
136
137 return false;
138 }
139
140 bool
141 mir_nontrivial_outmod(midgard_instruction *ins)
142 {
143 bool is_int = midgard_is_integer_op(ins->op);
144 unsigned mod = ins->alu.outmod;
145
146 if (ins->dest_type != ins->src_types[1])
147 return true;
148
149 if (is_int)
150 return mod != midgard_outmod_int_wrap;
151 else
152 return mod != midgard_outmod_none;
153 }
154
155 /* 128 / sz = exp2(log2(128 / sz))
156 * = exp2(log2(128) - log2(sz))
157 * = exp2(7 - log2(sz))
158 * = 1 << (7 - log2(sz))
159 */
160
161 static unsigned
162 mir_components_for_bits(unsigned bits)
163 {
164 return 1 << (7 - util_logbase2(bits));
165 }
166
167 unsigned
168 mir_components_for_type(nir_alu_type T)
169 {
170 unsigned sz = nir_alu_type_get_type_size(T);
171 return mir_components_for_bits(sz);
172 }
173
174 uint16_t
175 mir_from_bytemask(uint16_t bytemask, unsigned bits)
176 {
177 unsigned value = 0;
178 unsigned count = bits / 8;
179
180 for (unsigned c = 0, d = 0; c < 16; c += count, ++d) {
181 bool a = (bytemask & (1 << c)) != 0;
182
183 for (unsigned q = c; q < count; ++q)
184 assert(((bytemask & (1 << q)) != 0) == a);
185
186 value |= (a << d);
187 }
188
189 return value;
190 }
191
192 /* Rounds up a bytemask to fill a given component count. Iterate each
193 * component, and check if any bytes in the component are masked on */
194
195 uint16_t
196 mir_round_bytemask_up(uint16_t mask, unsigned bits)
197 {
198 unsigned bytes = bits / 8;
199 unsigned maxmask = mask_of(bytes);
200 unsigned channels = mir_components_for_bits(bits);
201
202 for (unsigned c = 0; c < channels; ++c) {
203 unsigned submask = maxmask << (c * bytes);
204
205 if (mask & submask)
206 mask |= submask;
207 }
208
209 return mask;
210 }
211
212 /* Grabs the per-byte mask of an instruction (as opposed to per-component) */
213
214 uint16_t
215 mir_bytemask(midgard_instruction *ins)
216 {
217 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
218 return pan_to_bytemask(type_size, ins->mask);
219 }
220
221 void
222 mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
223 {
224 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
225 ins->mask = mir_from_bytemask(bytemask, type_size);
226 }
227
228 /* Checks if we should use an upper destination override, rather than the lower
229 * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
230
231 signed
232 mir_upper_override(midgard_instruction *ins, unsigned inst_size)
233 {
234 unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
235
236 /* If the sizes are the same, there's nothing to override */
237 if (type_size == inst_size)
238 return -1;
239
240 /* There are 16 bytes per vector, so there are (16/bytes)
241 * components per vector. So the magic half is half of
242 * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
243 * */
244
245 unsigned threshold = mir_components_for_bits(type_size) >> 1;
246
247 /* How many components did we shift over? */
248 unsigned zeroes = __builtin_ctz(ins->mask);
249
250 /* Did we hit the threshold? */
251 return (zeroes >= threshold) ? threshold : 0;
252 }
253
254 /* Creates a mask of the components of a node read by an instruction, by
255 * analyzing the swizzle with respect to the instruction's mask. E.g.:
256 *
257 * fadd r0.xz, r1.yyyy, r2.zwyx
258 *
259 * will return a mask of Z/Y for r2
260 */
261
262 static uint16_t
263 mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsigned bits)
264 {
265 unsigned cmask = 0;
266
267 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
268 if (!(inmask & (1 << c))) continue;
269 cmask |= (1 << swizzle[c]);
270 }
271
272 return pan_to_bytemask(bits, cmask);
273 }
274
275 uint16_t
276 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
277 {
278 /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
279 if (ins->compact_branch && ins->branch.conditional && (i == 0))
280 return 0xF;
281
282 /* ALU ops act componentwise so we need to pay attention to
283 * their mask. Texture/ldst does not so we don't clamp source
284 * readmasks based on the writemask */
285 unsigned qmask = ~0;
286
287 /* Handle dot products and things */
288 if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
289 unsigned props = alu_opcode_props[ins->op].props;
290
291 unsigned channel_override = GET_CHANNEL_COUNT(props);
292
293 if (channel_override)
294 qmask = mask_of(channel_override);
295 else
296 qmask = ins->mask;
297 }
298
299 return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,
300 nir_alu_type_get_type_size(ins->src_types[i]));
301 }
302
303 uint16_t
304 mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node)
305 {
306 uint16_t mask = 0;
307
308 if (node == ~0)
309 return 0;
310
311 mir_foreach_src(ins, i) {
312 if (ins->src[i] != node) continue;
313 mask |= mir_bytemask_of_read_components_index(ins, i);
314 }
315
316 return mask;
317 }
318
319 /* Register allocation occurs after instruction scheduling, which is fine until
320 * we start needing to spill registers and therefore insert instructions into
321 * an already-scheduled program. We don't have to be terribly efficient about
322 * this, since spilling is already slow. So just semantically we need to insert
323 * the instruction into a new bundle before/after the bundle of the instruction
324 * in question */
325
326 static midgard_bundle
327 mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
328 {
329 midgard_instruction *u = mir_upload_ins(ctx, ins);
330
331 midgard_bundle bundle = {
332 .tag = ins.type,
333 .instruction_count = 1,
334 .instructions = { u },
335 };
336
337 if (bundle.tag == TAG_ALU_4) {
338 assert(OP_IS_MOVE(u->op));
339 u->unit = UNIT_VMUL;
340
341 size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
342 bundle.padding = ~(bytes_emitted - 1) & 0xF;
343 bundle.control = ins.type | u->unit;
344 }
345
346 return bundle;
347 }
348
349 static unsigned
350 mir_bundle_idx_for_ins(midgard_instruction *tag, midgard_block *block)
351 {
352 midgard_bundle *bundles =
353 (midgard_bundle *) block->bundles.data;
354
355 size_t count = (block->bundles.size / sizeof(midgard_bundle));
356
357 for (unsigned i = 0; i < count; ++i) {
358 for (unsigned j = 0; j < bundles[i].instruction_count; ++j) {
359 if (bundles[i].instructions[j] == tag)
360 return i;
361 }
362 }
363
364 mir_print_instruction(tag);
365 unreachable("Instruction not scheduled in block");
366 }
367
368 void
369 mir_insert_instruction_before_scheduled(
370 compiler_context *ctx,
371 midgard_block *block,
372 midgard_instruction *tag,
373 midgard_instruction ins)
374 {
375 unsigned before = mir_bundle_idx_for_ins(tag, block);
376 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
377 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
378
379 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
380 memmove(bundles + before + 1, bundles + before, (count - before) * sizeof(midgard_bundle));
381 midgard_bundle *before_bundle = bundles + before + 1;
382
383 midgard_bundle new = mir_bundle_for_op(ctx, ins);
384 memcpy(bundles + before, &new, sizeof(new));
385
386 list_addtail(&new.instructions[0]->link, &before_bundle->instructions[0]->link);
387 block->quadword_count += midgard_tag_props[new.tag].size;
388 }
389
390 void
391 mir_insert_instruction_after_scheduled(
392 compiler_context *ctx,
393 midgard_block *block,
394 midgard_instruction *tag,
395 midgard_instruction ins)
396 {
397 /* We need to grow the bundles array to add our new bundle */
398 size_t count = util_dynarray_num_elements(&block->bundles, midgard_bundle);
399 UNUSED void *unused = util_dynarray_grow(&block->bundles, midgard_bundle, 1);
400
401 /* Find the bundle that we want to insert after */
402 unsigned after = mir_bundle_idx_for_ins(tag, block);
403
404 /* All the bundles after that one, we move ahead by one */
405 midgard_bundle *bundles = (midgard_bundle *) block->bundles.data;
406 memmove(bundles + after + 2, bundles + after + 1, (count - after - 1) * sizeof(midgard_bundle));
407 midgard_bundle *after_bundle = bundles + after;
408
409 midgard_bundle new = mir_bundle_for_op(ctx, ins);
410 memcpy(bundles + after + 1, &new, sizeof(new));
411 list_add(&new.instructions[0]->link, &after_bundle->instructions[after_bundle->instruction_count - 1]->link);
412 block->quadword_count += midgard_tag_props[new.tag].size;
413 }
414
415 /* Flip the first-two arguments of a (binary) op. Currently ALU
416 * only, no known uses for ldst/tex */
417
418 void
419 mir_flip(midgard_instruction *ins)
420 {
421 unsigned temp = ins->src[0];
422 ins->src[0] = ins->src[1];
423 ins->src[1] = temp;
424
425 assert(ins->type == TAG_ALU_4);
426
427 temp = ins->alu.src1;
428 ins->alu.src1 = ins->alu.src2;
429 ins->alu.src2 = temp;
430
431 temp = ins->src_types[0];
432 ins->src_types[0] = ins->src_types[1];
433 ins->src_types[1] = temp;
434
435 temp = ins->src_abs[0];
436 ins->src_abs[0] = ins->src_abs[1];
437 ins->src_abs[1] = temp;
438
439 temp = ins->src_neg[0];
440 ins->src_neg[0] = ins->src_neg[1];
441 ins->src_neg[1] = temp;
442
443 temp = ins->src_invert[0];
444 ins->src_invert[0] = ins->src_invert[1];
445 ins->src_invert[1] = temp;
446
447 unsigned temp_swizzle[16];
448 memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
449 memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));
450 memcpy(ins->swizzle[1], temp_swizzle, sizeof(ins->swizzle[0]));
451 }
452
453 /* Before squashing, calculate ctx->temp_count just by observing the MIR */
454
455 void
456 mir_compute_temp_count(compiler_context *ctx)
457 {
458 if (ctx->temp_count)
459 return;
460
461 unsigned max_dest = 0;
462
463 mir_foreach_instr_global(ctx, ins) {
464 if (ins->dest < SSA_FIXED_MINIMUM)
465 max_dest = MAX2(max_dest, ins->dest + 1);
466 }
467
468 ctx->temp_count = max_dest;
469 }