panfrost: Pipe the GPU ID into compiler and disassembler
[mesa.git] / src / panfrost / midgard / compiler.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
26
27 #include "midgard.h"
28 #include "helpers.h"
29 #include "midgard_compile.h"
30
31 #include "util/hash_table.h"
32 #include "util/u_dynarray.h"
33 #include "util/set.h"
34 #include "util/list.h"
35
36 #include "main/mtypes.h"
37 #include "compiler/nir_types.h"
38 #include "compiler/nir/nir.h"
39
40 /* Forward declare */
41 struct midgard_block;
42
43 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
44 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
45 * instruction is actually a discard op. */
46
47 #define TARGET_GOTO 0
48 #define TARGET_BREAK 1
49 #define TARGET_CONTINUE 2
50 #define TARGET_DISCARD 3
51
52 typedef struct midgard_branch {
53 /* If conditional, the condition is specified in r31.w */
54 bool conditional;
55
56 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
57 bool invert_conditional;
58
59 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
60 unsigned target_type;
61
62 /* The actual target */
63 union {
64 int target_block;
65 int target_break;
66 int target_continue;
67 };
68 } midgard_branch;
69
70 /* Generic in-memory data type repesenting a single logical instruction, rather
71 * than a single instruction group. This is the preferred form for code gen.
72 * Multiple midgard_insturctions will later be combined during scheduling,
73 * though this is not represented in this structure. Its format bridges
74 * the low-level binary representation with the higher level semantic meaning.
75 *
76 * Notably, it allows registers to be specified as block local SSA, for code
77 * emitted before the register allocation pass.
78 */
79
80 #define MIR_SRC_COUNT 3
81 #define MIR_VEC_COMPONENTS 16
82
83 typedef struct midgard_instruction {
84 /* Must be first for casting */
85 struct list_head link;
86
87 unsigned type; /* ALU, load/store, texture */
88
89 /* Instruction arguments represented as block-local SSA
90 * indices, rather than registers. ~0 means unused. */
91 unsigned src[3];
92 unsigned dest;
93
94 /* vec16 swizzle, unpacked, per source */
95 unsigned swizzle[MIR_SRC_COUNT][MIR_VEC_COMPONENTS];
96
97 /* Special fields for an ALU instruction */
98 midgard_reg_info registers;
99
100 /* I.e. (1 << alu_bit) */
101 int unit;
102
103 bool has_constants;
104 uint32_t constants[4];
105 uint16_t inline_constant;
106 bool has_blend_constant;
107 bool has_inline_constant;
108
109 bool compact_branch;
110 bool writeout;
111 bool prepacked_branch;
112
113 /* Kind of a hack, but hint against aggressive DCE */
114 bool dont_eliminate;
115
116 /* Masks in a saneish format. One bit per channel, not packed fancy.
117 * Use this instead of the op specific ones, and switch over at emit
118 * time */
119
120 uint16_t mask;
121
122 /* For ALU ops only: set to true to invert (bitwise NOT) the
123 * destination of an integer-out op. Not imeplemented in hardware but
124 * allows more optimizations */
125
126 bool invert;
127
128 /* Hint for the register allocator not to spill the destination written
129 * from this instruction (because it is a spill/unspill node itself) */
130
131 bool no_spill;
132
133 /* Generic hint for intra-pass use */
134 bool hint;
135
136 /* During scheduling, the backwards dependency graph
137 * (DAG). nr_dependencies is the number of unscheduled
138 * instructions that must still be scheduled after
139 * (before) this instruction. dependents are which
140 * instructions need to be scheduled before (after) this
141 * instruction. */
142
143 unsigned nr_dependencies;
144 BITSET_WORD *dependents;
145
146 union {
147 midgard_load_store_word load_store;
148 midgard_vector_alu alu;
149 midgard_texture_word texture;
150 midgard_branch_extended branch_extended;
151 uint16_t br_compact;
152
153 /* General branch, rather than packed br_compact. Higher level
154 * than the other components */
155 midgard_branch branch;
156 };
157 } midgard_instruction;
158
159 typedef struct midgard_block {
160 /* Link to next block. Must be first for mir_get_block */
161 struct list_head link;
162
163 /* List of midgard_instructions emitted for the current block */
164 struct list_head instructions;
165
166 /* Index of the block in source order */
167 unsigned source_id;
168
169 bool is_scheduled;
170
171 /* List of midgard_bundles emitted (after the scheduler has run) */
172 struct util_dynarray bundles;
173
174 /* Number of quadwords _actually_ emitted, as determined after scheduling */
175 unsigned quadword_count;
176
177 /* Succeeding blocks. The compiler should not necessarily rely on
178 * source-order traversal */
179 struct midgard_block *successors[2];
180 unsigned nr_successors;
181
182 struct set *predecessors;
183
184 /* The successors pointer form a graph, and in the case of
185 * complex control flow, this graph has a cycles. To aid
186 * traversal during liveness analysis, we have a visited?
187 * boolean for passes to use as they see fit, provided they
188 * clean up later */
189 bool visited;
190
191 /* In liveness analysis, these are live masks (per-component) for
192 * indices for the block. Scalar compilers have the luxury of using
193 * simple bit fields, but for us, liveness is a vector idea. */
194 uint16_t *live_in;
195 uint16_t *live_out;
196 } midgard_block;
197
198 typedef struct midgard_bundle {
199 /* Tag for the overall bundle */
200 int tag;
201
202 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
203 * sadd, vadd, smul, vlut, branch) */
204 int instruction_count;
205 midgard_instruction *instructions[6];
206
207 /* Bundle-wide ALU configuration */
208 int padding;
209 int control;
210 bool has_embedded_constants;
211 float constants[4];
212 bool has_blend_constant;
213 } midgard_bundle;
214
215 typedef struct compiler_context {
216 nir_shader *nir;
217 gl_shader_stage stage;
218
219 /* The screen we correspond to */
220 struct midgard_screen *screen;
221
222 /* Is internally a blend shader? Depends on stage == FRAGMENT */
223 bool is_blend;
224
225 /* Tracking for blend constant patching */
226 int blend_constant_offset;
227
228 /* Number of bytes used for Thread Local Storage */
229 unsigned tls_size;
230
231 /* Count of spills and fills for shaderdb */
232 unsigned spills;
233 unsigned fills;
234
235 /* Current NIR function */
236 nir_function *func;
237
238 /* Allocated compiler temporary counter */
239 unsigned temp_alloc;
240
241 /* Unordered list of midgard_blocks */
242 int block_count;
243 struct list_head blocks;
244
245 /* TODO merge with block_count? */
246 unsigned block_source_count;
247
248 /* List of midgard_instructions emitted for the current block */
249 midgard_block *current_block;
250
251 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
252 midgard_block *after_block;
253
254 /* The current "depth" of the loop, for disambiguating breaks/continues
255 * when using nested loops */
256 int current_loop_depth;
257
258 /* Total number of loops for shader-db */
259 unsigned loop_count;
260
261 /* Constants which have been loaded, for later inlining */
262 struct hash_table_u64 *ssa_constants;
263
264 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
265 struct hash_table_u64 *hash_to_temp;
266 int temp_count;
267 int max_hash;
268
269 /* Just the count of the max register used. Higher count => higher
270 * register pressure */
271 int work_registers;
272
273 /* Used for cont/last hinting. Increase when a tex op is added.
274 * Decrease when a tex op is removed. */
275 int texture_op_count;
276
277 /* The number of uniforms allowable for the fast path */
278 int uniform_cutoff;
279
280 /* Count of instructions emitted from NIR overall, across all blocks */
281 int instruction_count;
282
283 /* Alpha ref value passed in */
284 float alpha_ref;
285
286 unsigned quadword_count;
287
288 /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
289 unsigned sysvals[MAX_SYSVAL_COUNT];
290 unsigned sysval_count;
291 struct hash_table_u64 *sysval_to_id;
292
293 /* Bitmask of valid metadata */
294 unsigned metadata;
295
296 unsigned gpu_id;
297 } compiler_context;
298
299 /* Per-block live_in/live_out */
300 #define MIDGARD_METADATA_LIVENESS (1 << 0)
301
302 /* Helpers for manipulating the above structures (forming the driver IR) */
303
304 /* Append instruction to end of current block */
305
306 static inline midgard_instruction *
307 mir_upload_ins(struct compiler_context *ctx, struct midgard_instruction ins)
308 {
309 midgard_instruction *heap = ralloc(ctx, struct midgard_instruction);
310 memcpy(heap, &ins, sizeof(ins));
311 return heap;
312 }
313
314 static inline midgard_instruction *
315 emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
316 {
317 midgard_instruction *u = mir_upload_ins(ctx, ins);
318 list_addtail(&u->link, &ctx->current_block->instructions);
319 return u;
320 }
321
322 static inline struct midgard_instruction *
323 mir_insert_instruction_before(struct compiler_context *ctx,
324 struct midgard_instruction *tag,
325 struct midgard_instruction ins)
326 {
327 struct midgard_instruction *u = mir_upload_ins(ctx, ins);
328 list_addtail(&u->link, &tag->link);
329 return u;
330 }
331
332 static inline void
333 mir_remove_instruction(struct midgard_instruction *ins)
334 {
335 list_del(&ins->link);
336 }
337
338 static inline midgard_instruction*
339 mir_prev_op(struct midgard_instruction *ins)
340 {
341 return list_last_entry(&(ins->link), midgard_instruction, link);
342 }
343
344 static inline midgard_instruction*
345 mir_next_op(struct midgard_instruction *ins)
346 {
347 return list_first_entry(&(ins->link), midgard_instruction, link);
348 }
349
350 #define mir_foreach_block(ctx, v) \
351 list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
352
353 #define mir_foreach_block_from(ctx, from, v) \
354 list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
355
356 #define mir_foreach_instr(ctx, v) \
357 list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link)
358
359 #define mir_foreach_instr_safe(ctx, v) \
360 list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link)
361
362 #define mir_foreach_instr_in_block(block, v) \
363 list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
364 #define mir_foreach_instr_in_block_rev(block, v) \
365 list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
366
367 #define mir_foreach_instr_in_block_safe(block, v) \
368 list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
369
370 #define mir_foreach_instr_in_block_safe_rev(block, v) \
371 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
372
373 #define mir_foreach_instr_in_block_from(block, v, from) \
374 list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
375
376 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
377 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
378
379 #define mir_foreach_bundle_in_block(block, v) \
380 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
381
382 #define mir_foreach_bundle_in_block_rev(block, v) \
383 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
384
385 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
386 midgard_instruction* v; \
387 signed i = 0; \
388 mir_foreach_bundle_in_block_rev(block, _bundle) \
389 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
390 i >= 0; \
391 --i, v = _bundle->instructions[i]) \
392
393 #define mir_foreach_instr_global(ctx, v) \
394 mir_foreach_block(ctx, v_block) \
395 mir_foreach_instr_in_block(v_block, v)
396
397 #define mir_foreach_instr_global_safe(ctx, v) \
398 mir_foreach_block(ctx, v_block) \
399 mir_foreach_instr_in_block_safe(v_block, v)
400
401 #define mir_foreach_successor(blk, v) \
402 struct midgard_block *v; \
403 struct midgard_block **_v; \
404 for (_v = &blk->successors[0], \
405 v = *_v; \
406 v != NULL && _v < &blk->successors[2]; \
407 _v++, v = *_v) \
408
409 /* Based on set_foreach, expanded with automatic type casts */
410
411 #define mir_foreach_predecessor(blk, v) \
412 struct set_entry *_entry_##v; \
413 struct midgard_block *v; \
414 for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
415 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
416 _entry_##v != NULL; \
417 _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
418 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
419
420 #define mir_foreach_src(ins, v) \
421 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
422
423 static inline midgard_instruction *
424 mir_last_in_block(struct midgard_block *block)
425 {
426 return list_last_entry(&block->instructions, struct midgard_instruction, link);
427 }
428
429 static inline midgard_block *
430 mir_get_block(compiler_context *ctx, int idx)
431 {
432 struct list_head *lst = &ctx->blocks;
433
434 while ((idx--) + 1)
435 lst = lst->next;
436
437 return (struct midgard_block *) lst;
438 }
439
440 static inline midgard_block *
441 mir_exit_block(struct compiler_context *ctx)
442 {
443 midgard_block *last = list_last_entry(&ctx->blocks,
444 struct midgard_block, link);
445
446 /* The last block must be empty logically but contains branch writeout
447 * for fragment shaders */
448
449 assert(last->nr_successors == 0);
450
451 return last;
452 }
453
454 static inline bool
455 mir_is_alu_bundle(midgard_bundle *bundle)
456 {
457 return IS_ALU(bundle->tag);
458 }
459
460 /* Registers/SSA are distinguish in the backend by the bottom-most bit */
461
462 #define IS_REG (1)
463
464 static inline unsigned
465 make_compiler_temp(compiler_context *ctx)
466 {
467 return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
468 }
469
470 static inline unsigned
471 make_compiler_temp_reg(compiler_context *ctx)
472 {
473 return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | IS_REG;
474 }
475
476 static inline unsigned
477 nir_src_index(compiler_context *ctx, nir_src *src)
478 {
479 if (src->is_ssa)
480 return (src->ssa->index << 1) | 0;
481 else {
482 assert(!src->reg.indirect);
483 return (src->reg.reg->index << 1) | IS_REG;
484 }
485 }
486
487 static inline unsigned
488 nir_alu_src_index(compiler_context *ctx, nir_alu_src *src)
489 {
490 return nir_src_index(ctx, &src->src);
491 }
492
493 static inline unsigned
494 nir_dest_index(compiler_context *ctx, nir_dest *dst)
495 {
496 if (dst->is_ssa)
497 return (dst->ssa.index << 1) | 0;
498 else {
499 assert(!dst->reg.indirect);
500 return (dst->reg.reg->index << 1) | IS_REG;
501 }
502 }
503
504
505
506 /* MIR manipulation */
507
508 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
509 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
510 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
511 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new);
512 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
513 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle);
514 bool mir_single_use(compiler_context *ctx, unsigned value);
515 bool mir_special_index(compiler_context *ctx, unsigned idx);
516 unsigned mir_use_count(compiler_context *ctx, unsigned value);
517 bool mir_is_written_before(compiler_context *ctx, midgard_instruction *ins, unsigned node);
518 uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
519 unsigned mir_ubo_shift(midgard_load_store_op op);
520 midgard_reg_mode mir_typesize(midgard_instruction *ins);
521 midgard_reg_mode mir_srcsize(midgard_instruction *ins, unsigned i);
522 unsigned mir_bytes_for_mode(midgard_reg_mode mode);
523 uint16_t mir_from_bytemask(uint16_t bytemask, midgard_reg_mode mode);
524 uint16_t mir_bytemask(midgard_instruction *ins);
525 uint16_t mir_round_bytemask_down(uint16_t mask, midgard_reg_mode mode);
526 void mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask);
527
528 /* MIR printing */
529
530 void mir_print_instruction(midgard_instruction *ins);
531 void mir_print_bundle(midgard_bundle *ctx);
532 void mir_print_block(midgard_block *block);
533 void mir_print_shader(compiler_context *ctx);
534 bool mir_nontrivial_source2_mod(midgard_instruction *ins);
535 bool mir_nontrivial_source2_mod_simple(midgard_instruction *ins);
536 bool mir_nontrivial_outmod(midgard_instruction *ins);
537
538 void mir_insert_instruction_before_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
539 void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
540 void mir_flip(midgard_instruction *ins);
541 void mir_compute_temp_count(compiler_context *ctx);
542
543 /* 'Intrinsic' move for aliasing */
544
545 static inline midgard_instruction
546 v_mov(unsigned src, unsigned dest)
547 {
548 midgard_instruction ins = {
549 .type = TAG_ALU_4,
550 .mask = 0xF,
551 .src = { SSA_UNUSED, src, SSA_UNUSED },
552 .swizzle = SWIZZLE_IDENTITY,
553 .dest = dest,
554 .alu = {
555 .op = midgard_alu_op_imov,
556 .reg_mode = midgard_reg_mode_32,
557 .dest_override = midgard_dest_override_none,
558 .outmod = midgard_outmod_int_wrap
559 },
560 };
561
562 return ins;
563 }
564
565 static inline bool
566 mir_has_arg(midgard_instruction *ins, unsigned arg)
567 {
568 if (!ins)
569 return false;
570
571 for (unsigned i = 0; i < ARRAY_SIZE(ins->src); ++i) {
572 if (ins->src[i] == arg)
573 return true;
574 }
575
576 return false;
577 }
578
579 /* Scheduling */
580
581 void schedule_program(compiler_context *ctx);
582
583 /* Register allocation */
584
585 struct ra_graph;
586
587 /* Broad types of register classes so we can handle special
588 * registers */
589
590 #define NR_REG_CLASSES 6
591
592 #define REG_CLASS_WORK 0
593 #define REG_CLASS_LDST 1
594 #define REG_CLASS_LDST27 2
595 #define REG_CLASS_TEXR 3
596 #define REG_CLASS_TEXW 4
597 #define REG_CLASS_FRAGC 5
598
599 void mir_lower_special_reads(compiler_context *ctx);
600 struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled);
601 void install_registers(compiler_context *ctx, struct ra_graph *g);
602 void mir_liveness_ins_update(uint16_t *live, midgard_instruction *ins, unsigned max);
603 void mir_compute_liveness(compiler_context *ctx);
604 void mir_invalidate_liveness(compiler_context *ctx);
605 bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
606
607 void mir_create_pipeline_registers(compiler_context *ctx);
608
609 void
610 midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count);
611
612 midgard_instruction *
613 emit_ubo_read(
614 compiler_context *ctx,
615 nir_instr *instr,
616 unsigned dest,
617 unsigned offset,
618 nir_src *indirect_offset,
619 unsigned index);
620
621 void
622 emit_sysval_read(compiler_context *ctx, nir_instr *instr, signed dest_override, unsigned nr_components);
623
624 void
625 midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
626
627 void
628 midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
629
630 bool mir_op_computes_derivatives(unsigned op);
631
632 /* Final emission */
633
634 void emit_binary_bundle(
635 compiler_context *ctx,
636 midgard_bundle *bundle,
637 struct util_dynarray *emission,
638 int next_tag);
639
640 bool
641 nir_undef_to_zero(nir_shader *shader);
642
643 /* Optimizations */
644
645 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
646 bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
647 bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
648 bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
649 bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
650
651 void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
652 bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
653 bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block);
654 bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
655 bool midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block);
656 bool midgard_opt_promote_fmov(compiler_context *ctx, midgard_block *block);
657
658 #endif