pan/midgard: Add mir_rewrite_index_dst_single helper
[mesa.git] / src / panfrost / midgard / compiler.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
26
27 #include "midgard.h"
28 #include "helpers.h"
29 #include "midgard_compile.h"
30
31 #include "util/hash_table.h"
32 #include "util/u_dynarray.h"
33 #include "util/set.h"
34 #include "util/list.h"
35
36 #include "main/mtypes.h"
37 #include "compiler/nir_types.h"
38 #include "compiler/nir/nir.h"
39
40 /* Forward declare */
41 struct midgard_block;
42
43 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
44 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
45 * instruction is actually a discard op. */
46
47 #define TARGET_GOTO 0
48 #define TARGET_BREAK 1
49 #define TARGET_CONTINUE 2
50 #define TARGET_DISCARD 3
51
52 typedef struct midgard_branch {
53 /* If conditional, the condition is specified in r31.w */
54 bool conditional;
55
56 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
57 bool invert_conditional;
58
59 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
60 unsigned target_type;
61
62 /* The actual target */
63 union {
64 int target_block;
65 int target_break;
66 int target_continue;
67 };
68 } midgard_branch;
69
70 /* Instruction arguments represented as block-local SSA indices, rather than
71 * registers. Negative values mean unused. */
72
73 typedef struct {
74 int src[3];
75 int dest;
76
77 bool inline_constant;
78 } ssa_args;
79
80 /* Generic in-memory data type repesenting a single logical instruction, rather
81 * than a single instruction group. This is the preferred form for code gen.
82 * Multiple midgard_insturctions will later be combined during scheduling,
83 * though this is not represented in this structure. Its format bridges
84 * the low-level binary representation with the higher level semantic meaning.
85 *
86 * Notably, it allows registers to be specified as block local SSA, for code
87 * emitted before the register allocation pass.
88 */
89
90 typedef struct midgard_instruction {
91 /* Must be first for casting */
92 struct list_head link;
93
94 unsigned type; /* ALU, load/store, texture */
95
96 /* If the register allocator has not run yet... */
97 ssa_args ssa_args;
98
99 /* Special fields for an ALU instruction */
100 midgard_reg_info registers;
101
102 /* I.e. (1 << alu_bit) */
103 int unit;
104
105 /* When emitting bundle, should this instruction have a break forced
106 * before it? Used for r31 writes which are valid only within a single
107 * bundle and *need* to happen as early as possible... this is a hack,
108 * TODO remove when we have a scheduler */
109 bool precede_break;
110
111 bool has_constants;
112 float constants[4];
113 uint16_t inline_constant;
114 bool has_blend_constant;
115
116 bool compact_branch;
117 bool writeout;
118 bool prepacked_branch;
119
120 /* Kind of a hack, but hint against aggressive DCE */
121 bool dont_eliminate;
122
123 /* Masks in a saneish format. One bit per channel, not packed fancy.
124 * Use this instead of the op specific ones, and switch over at emit
125 * time */
126
127 uint16_t mask;
128
129 /* For ALU ops only: set to true to invert (bitwise NOT) the
130 * destination of an integer-out op. Not imeplemented in hardware but
131 * allows more optimizations */
132
133 bool invert;
134
135 /* Hint for the register allocator not to spill the destination written
136 * from this instruction (because it is a spill/unspill node itself) */
137
138 bool no_spill;
139
140 /* Generic hint for intra-pass use */
141 bool hint;
142
143 union {
144 midgard_load_store_word load_store;
145 midgard_vector_alu alu;
146 midgard_texture_word texture;
147 midgard_branch_extended branch_extended;
148 uint16_t br_compact;
149
150 /* General branch, rather than packed br_compact. Higher level
151 * than the other components */
152 midgard_branch branch;
153 };
154 } midgard_instruction;
155
156 typedef struct midgard_block {
157 /* Link to next block. Must be first for mir_get_block */
158 struct list_head link;
159
160 /* List of midgard_instructions emitted for the current block */
161 struct list_head instructions;
162
163 /* Index of the block in source order */
164 unsigned source_id;
165
166 bool is_scheduled;
167
168 /* List of midgard_bundles emitted (after the scheduler has run) */
169 struct util_dynarray bundles;
170
171 /* Number of quadwords _actually_ emitted, as determined after scheduling */
172 unsigned quadword_count;
173
174 /* Succeeding blocks. The compiler should not necessarily rely on
175 * source-order traversal */
176 struct midgard_block *successors[2];
177 unsigned nr_successors;
178
179 struct set *predecessors;
180
181 /* The successors pointer form a graph, and in the case of
182 * complex control flow, this graph has a cycles. To aid
183 * traversal during liveness analysis, we have a visited?
184 * boolean for passes to use as they see fit, provided they
185 * clean up later */
186 bool visited;
187 } midgard_block;
188
189 typedef struct midgard_bundle {
190 /* Tag for the overall bundle */
191 int tag;
192
193 /* Instructions contained by the bundle */
194 int instruction_count;
195 midgard_instruction *instructions[5];
196
197 /* Bundle-wide ALU configuration */
198 int padding;
199 int control;
200 bool has_embedded_constants;
201 float constants[4];
202 bool has_blend_constant;
203 } midgard_bundle;
204
205 typedef struct compiler_context {
206 nir_shader *nir;
207 gl_shader_stage stage;
208
209 /* The screen we correspond to */
210 struct midgard_screen *screen;
211
212 /* Is internally a blend shader? Depends on stage == FRAGMENT */
213 bool is_blend;
214
215 /* Tracking for blend constant patching */
216 int blend_constant_offset;
217
218 /* Number of bytes used for Thread Local Storage */
219 unsigned tls_size;
220
221 /* Count of spills and fills for shaderdb */
222 unsigned spills;
223 unsigned fills;
224
225 /* Current NIR function */
226 nir_function *func;
227
228 /* Allocated compiler temporary counter */
229 unsigned temp_alloc;
230
231 /* Unordered list of midgard_blocks */
232 int block_count;
233 struct list_head blocks;
234
235 /* TODO merge with block_count? */
236 unsigned block_source_count;
237
238 /* List of midgard_instructions emitted for the current block */
239 midgard_block *current_block;
240
241 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
242 midgard_block *after_block;
243
244 /* The current "depth" of the loop, for disambiguating breaks/continues
245 * when using nested loops */
246 int current_loop_depth;
247
248 /* Total number of loops for shader-db */
249 unsigned loop_count;
250
251 /* Constants which have been loaded, for later inlining */
252 struct hash_table_u64 *ssa_constants;
253
254 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
255 struct hash_table_u64 *hash_to_temp;
256 int temp_count;
257 int max_hash;
258
259 /* Just the count of the max register used. Higher count => higher
260 * register pressure */
261 int work_registers;
262
263 /* Used for cont/last hinting. Increase when a tex op is added.
264 * Decrease when a tex op is removed. */
265 int texture_op_count;
266
267 /* Mapping of texture register -> SSA index for unaliasing */
268 int texture_index[2];
269
270 /* The number of uniforms allowable for the fast path */
271 int uniform_cutoff;
272
273 /* Count of instructions emitted from NIR overall, across all blocks */
274 int instruction_count;
275
276 /* Alpha ref value passed in */
277 float alpha_ref;
278
279 /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
280 unsigned sysvals[MAX_SYSVAL_COUNT];
281 unsigned sysval_count;
282 struct hash_table_u64 *sysval_to_id;
283 } compiler_context;
284
285 /* Helpers for manipulating the above structures (forming the driver IR) */
286
287 /* Append instruction to end of current block */
288
289 static inline midgard_instruction *
290 mir_upload_ins(struct midgard_instruction ins)
291 {
292 midgard_instruction *heap = malloc(sizeof(ins));
293 memcpy(heap, &ins, sizeof(ins));
294 return heap;
295 }
296
297 static inline midgard_instruction *
298 emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
299 {
300 midgard_instruction *u = mir_upload_ins(ins);
301 list_addtail(&u->link, &ctx->current_block->instructions);
302 return u;
303 }
304
305 static inline struct midgard_instruction *
306 mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins)
307 {
308 struct midgard_instruction *u = mir_upload_ins(ins);
309 list_addtail(&u->link, &tag->link);
310 return u;
311 }
312
313 static inline void
314 mir_remove_instruction(struct midgard_instruction *ins)
315 {
316 list_del(&ins->link);
317 }
318
319 static inline midgard_instruction*
320 mir_prev_op(struct midgard_instruction *ins)
321 {
322 return list_last_entry(&(ins->link), midgard_instruction, link);
323 }
324
325 static inline midgard_instruction*
326 mir_next_op(struct midgard_instruction *ins)
327 {
328 return list_first_entry(&(ins->link), midgard_instruction, link);
329 }
330
331 #define mir_foreach_block(ctx, v) \
332 list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
333
334 #define mir_foreach_block_from(ctx, from, v) \
335 list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
336
337 #define mir_foreach_instr(ctx, v) \
338 list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link)
339
340 #define mir_foreach_instr_safe(ctx, v) \
341 list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link)
342
343 #define mir_foreach_instr_in_block(block, v) \
344 list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
345 #define mir_foreach_instr_in_block_rev(block, v) \
346 list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
347
348 #define mir_foreach_instr_in_block_safe(block, v) \
349 list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
350
351 #define mir_foreach_instr_in_block_safe_rev(block, v) \
352 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
353
354 #define mir_foreach_instr_in_block_from(block, v, from) \
355 list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
356
357 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
358 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
359
360 #define mir_foreach_bundle_in_block(block, v) \
361 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
362
363 #define mir_foreach_instr_global(ctx, v) \
364 mir_foreach_block(ctx, v_block) \
365 mir_foreach_instr_in_block(v_block, v)
366
367 #define mir_foreach_instr_global_safe(ctx, v) \
368 mir_foreach_block(ctx, v_block) \
369 mir_foreach_instr_in_block_safe(v_block, v)
370
371 #define mir_foreach_successor(blk, v) \
372 struct midgard_block *v; \
373 struct midgard_block **_v; \
374 for (_v = &blk->successors[0], \
375 v = *_v; \
376 v != NULL && _v < &blk->successors[2]; \
377 _v++, v = *_v) \
378
379 /* Based on set_foreach, expanded with automatic type casts */
380
381 #define mir_foreach_predecessor(blk, v) \
382 struct set_entry *_entry_##v; \
383 struct midgard_block *v; \
384 for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
385 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
386 _entry_##v != NULL; \
387 _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
388 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
389
390 #define mir_foreach_src(ins, v) \
391 for (unsigned v = 0; v < ARRAY_SIZE(ins->ssa_args.src); ++v)
392
393 static inline midgard_instruction *
394 mir_last_in_block(struct midgard_block *block)
395 {
396 return list_last_entry(&block->instructions, struct midgard_instruction, link);
397 }
398
399 static inline midgard_block *
400 mir_get_block(compiler_context *ctx, int idx)
401 {
402 struct list_head *lst = &ctx->blocks;
403
404 while ((idx--) + 1)
405 lst = lst->next;
406
407 return (struct midgard_block *) lst;
408 }
409
410 static inline midgard_block *
411 mir_exit_block(struct compiler_context *ctx)
412 {
413 midgard_block *last = list_last_entry(&ctx->blocks,
414 struct midgard_block, link);
415
416 /* The last block must be empty (the exit block) */
417 assert(list_empty(&last->instructions));
418 assert(last->nr_successors == 0);
419
420 return last;
421 }
422
423 static inline bool
424 mir_is_alu_bundle(midgard_bundle *bundle)
425 {
426 return IS_ALU(bundle->tag);
427 }
428
429 /* Registers/SSA are distinguish in the backend by the bottom-most bit */
430
431 #define IS_REG (1)
432
433 static inline unsigned
434 make_compiler_temp(compiler_context *ctx)
435 {
436 return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
437 }
438
439 static inline unsigned
440 make_compiler_temp_reg(compiler_context *ctx)
441 {
442 return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | IS_REG;
443 }
444
445 static inline unsigned
446 nir_src_index(compiler_context *ctx, nir_src *src)
447 {
448 if (src->is_ssa)
449 return (src->ssa->index << 1) | 0;
450 else {
451 assert(!src->reg.indirect);
452 return (src->reg.reg->index << 1) | IS_REG;
453 }
454 }
455
456 static inline unsigned
457 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
458 {
459 return nir_src_index(ctx, &src->src);
460 }
461
462 static inline unsigned
463 nir_dest_index(compiler_context *ctx, nir_dest *dst)
464 {
465 if (dst->is_ssa)
466 return (dst->ssa.index << 1) | 0;
467 else {
468 assert(!dst->reg.indirect);
469 return (dst->reg.reg->index << 1) | IS_REG;
470 }
471 }
472
473
474
475 /* MIR manipulation */
476
477 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
478 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
479 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
480 void mir_rewrite_index_dst_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag);
481 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new);
482 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
483 void mir_rewrite_index_src_tag(compiler_context *ctx, unsigned old, unsigned new, unsigned tag);
484 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned swizzle);
485 bool mir_single_use(compiler_context *ctx, unsigned value);
486 bool mir_special_index(compiler_context *ctx, unsigned idx);
487 unsigned mir_use_count(compiler_context *ctx, unsigned value);
488 bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
489 unsigned mir_mask_of_read_components(midgard_instruction *ins, unsigned node);
490 unsigned mir_ubo_shift(midgard_load_store_op op);
491
492 /* MIR printing */
493
494 void mir_print_instruction(midgard_instruction *ins);
495 void mir_print_bundle(midgard_bundle *ctx);
496 void mir_print_block(midgard_block *block);
497 void mir_print_shader(compiler_context *ctx);
498 bool mir_nontrivial_source2_mod(midgard_instruction *ins);
499 bool mir_nontrivial_source2_mod_simple(midgard_instruction *ins);
500 bool mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask);
501 bool mir_nontrivial_outmod(midgard_instruction *ins);
502
503 /* MIR goodies */
504
505 static const midgard_vector_alu_src blank_alu_src = {
506 .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
507 };
508
509 static const midgard_vector_alu_src blank_alu_src_xxxx = {
510 .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X),
511 };
512
513 static const midgard_scalar_alu_src blank_scalar_alu_src = {
514 .full = true
515 };
516
517 /* Used for encoding the unused source of 1-op instructions */
518 static const midgard_vector_alu_src zero_alu_src = { 0 };
519
520 /* 'Intrinsic' move for aliasing */
521
522 static inline midgard_instruction
523 v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
524 {
525 midgard_instruction ins = {
526 .type = TAG_ALU_4,
527 .mask = 0xF,
528 .ssa_args = {
529 .src = { SSA_UNUSED_1, src, -1 },
530 .dest = dest,
531 },
532 .alu = {
533 .op = midgard_alu_op_imov,
534 .reg_mode = midgard_reg_mode_32,
535 .dest_override = midgard_dest_override_none,
536 .outmod = midgard_outmod_int_wrap,
537 .src1 = vector_alu_srco_unsigned(zero_alu_src),
538 .src2 = vector_alu_srco_unsigned(mod)
539 },
540 };
541
542 return ins;
543 }
544
545 static inline bool
546 mir_has_arg(midgard_instruction *ins, unsigned arg)
547 {
548 for (unsigned i = 0; i < ARRAY_SIZE(ins->ssa_args.src); ++i) {
549 if (ins->ssa_args.src[i] == arg)
550 return true;
551 }
552
553 return false;
554 }
555
556 /* Scheduling */
557
558 void schedule_program(compiler_context *ctx);
559
560 /* Register allocation */
561
562 struct ra_graph;
563
564 /* Broad types of register classes so we can handle special
565 * registers */
566
567 #define NR_REG_CLASSES 5
568
569 #define REG_CLASS_WORK 0
570 #define REG_CLASS_LDST 1
571 #define REG_CLASS_LDST27 2
572 #define REG_CLASS_TEXR 3
573 #define REG_CLASS_TEXW 4
574
575 void mir_lower_special_reads(compiler_context *ctx);
576 struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled);
577 void install_registers(compiler_context *ctx, struct ra_graph *g);
578 bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
579 bool mir_has_multiple_writes(compiler_context *ctx, int src);
580
581 void mir_create_pipeline_registers(compiler_context *ctx);
582
583 void
584 midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count);
585
586 midgard_instruction *
587 emit_ubo_read(
588 compiler_context *ctx,
589 nir_instr *instr,
590 unsigned dest,
591 unsigned offset,
592 nir_src *indirect_offset,
593 unsigned index);
594
595 void
596 emit_sysval_read(compiler_context *ctx, nir_instr *instr, signed dest_override, unsigned nr_components);
597
598 void
599 midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
600
601 void
602 midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
603
604 bool mir_op_computes_derivatives(unsigned op);
605
606 /* Final emission */
607
608 void emit_binary_bundle(
609 compiler_context *ctx,
610 midgard_bundle *bundle,
611 struct util_dynarray *emission,
612 int next_tag);
613
614 /* NIR stuff. TODO: Move? Share? Something? */
615
616 bool
617 nir_undef_to_zero(nir_shader *shader);
618
619 void
620 nir_clamp_psiz(nir_shader *shader, float min_size, float max_size);
621
622 /* Optimizations */
623
624 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
625 bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
626 bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
627 bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
628 bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
629 void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
630
631 void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
632 bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
633 bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block);
634 bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
635
636 #endif