pan/mdg: Handle nir_tex_src_ms_index
[mesa.git] / src / panfrost / midgard / compiler.h
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #ifndef _MDG_COMPILER_H
25 #define _MDG_COMPILER_H
26
27 #include "midgard.h"
28 #include "helpers.h"
29 #include "midgard_compile.h"
30 #include "midgard_ops.h"
31
32 #include "util/hash_table.h"
33 #include "util/u_dynarray.h"
34 #include "util/set.h"
35 #include "util/list.h"
36
37 #include "main/mtypes.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/nir/nir.h"
40 #include "panfrost/util/pan_ir.h"
41 #include "panfrost/util/lcra.h"
42
43 /* Forward declare */
44 struct midgard_block;
45
46 /* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
47 * the hardware), hence why that must be zero. TARGET_DISCARD signals this
48 * instruction is actually a discard op. */
49
50 #define TARGET_GOTO 0
51 #define TARGET_BREAK 1
52 #define TARGET_CONTINUE 2
53 #define TARGET_DISCARD 3
54
55 typedef struct midgard_branch {
56 /* If conditional, the condition is specified in r31.w */
57 bool conditional;
58
59 /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */
60 bool invert_conditional;
61
62 /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */
63 unsigned target_type;
64
65 /* The actual target */
66 union {
67 int target_block;
68 int target_break;
69 int target_continue;
70 };
71 } midgard_branch;
72
73 #define PAN_WRITEOUT_C 1
74 #define PAN_WRITEOUT_Z 2
75 #define PAN_WRITEOUT_S 4
76
77 /* Generic in-memory data type repesenting a single logical instruction, rather
78 * than a single instruction group. This is the preferred form for code gen.
79 * Multiple midgard_insturctions will later be combined during scheduling,
80 * though this is not represented in this structure. Its format bridges
81 * the low-level binary representation with the higher level semantic meaning.
82 *
83 * Notably, it allows registers to be specified as block local SSA, for code
84 * emitted before the register allocation pass.
85 */
86
87 #define MIR_SRC_COUNT 4
88 #define MIR_VEC_COMPONENTS 16
89
90 typedef struct midgard_instruction {
91 /* Must be first for casting */
92 struct list_head link;
93
94 unsigned type; /* ALU, load/store, texture */
95
96 /* Instruction arguments represented as block-local SSA
97 * indices, rather than registers. ~0 means unused. */
98 unsigned src[MIR_SRC_COUNT];
99 unsigned dest;
100
101 /* vec16 swizzle, unpacked, per source */
102 unsigned swizzle[MIR_SRC_COUNT][MIR_VEC_COMPONENTS];
103
104 /* Types! */
105 nir_alu_type src_types[MIR_SRC_COUNT];
106 nir_alu_type dest_type;
107
108 /* Packing ops have non-32-bit dest types even though they functionally
109 * work at the 32-bit level, use this as a signal to disable copyprop.
110 * We maybe need synthetic pack ops instead. */
111 bool is_pack;
112
113 /* Modifiers, depending on type */
114 union {
115 struct {
116 bool src_abs[MIR_SRC_COUNT];
117 bool src_neg[MIR_SRC_COUNT];
118 };
119
120 struct {
121 bool src_shift[MIR_SRC_COUNT];
122 };
123 };
124
125 /* Out of the union for csel (could maybe be fixed..) */
126 bool src_invert[MIR_SRC_COUNT];
127
128 /* If the op supports it */
129 enum midgard_roundmode roundmode;
130
131 /* Special fields for an ALU instruction */
132 midgard_reg_info registers;
133
134 /* For textures: should helpers execute this instruction (instead of
135 * just helping with derivatives)? Should helpers terminate after? */
136 bool helper_terminate;
137 bool helper_execute;
138
139 /* I.e. (1 << alu_bit) */
140 int unit;
141
142 bool has_constants;
143 midgard_constants constants;
144 uint16_t inline_constant;
145 bool has_blend_constant;
146 bool has_inline_constant;
147
148 bool compact_branch;
149 uint8_t writeout;
150 bool last_writeout;
151
152 /* Masks in a saneish format. One bit per channel, not packed fancy.
153 * Use this instead of the op specific ones, and switch over at emit
154 * time */
155
156 uint16_t mask;
157
158 /* Hint for the register allocator not to spill the destination written
159 * from this instruction (because it is a spill/unspill node itself).
160 * Bitmask of spilled classes */
161
162 unsigned no_spill;
163
164 /* Generic hint for intra-pass use */
165 bool hint;
166
167 /* During scheduling, the backwards dependency graph
168 * (DAG). nr_dependencies is the number of unscheduled
169 * instructions that must still be scheduled after
170 * (before) this instruction. dependents are which
171 * instructions need to be scheduled before (after) this
172 * instruction. */
173
174 unsigned nr_dependencies;
175 BITSET_WORD *dependents;
176
177 union {
178 midgard_load_store_word load_store;
179 midgard_vector_alu alu;
180 midgard_texture_word texture;
181 midgard_branch_extended branch_extended;
182 uint16_t br_compact;
183
184 /* General branch, rather than packed br_compact. Higher level
185 * than the other components */
186 midgard_branch branch;
187 };
188 } midgard_instruction;
189
190 typedef struct midgard_block {
191 pan_block base;
192
193 bool scheduled;
194
195 /* List of midgard_bundles emitted (after the scheduler has run) */
196 struct util_dynarray bundles;
197
198 /* Number of quadwords _actually_ emitted, as determined after scheduling */
199 unsigned quadword_count;
200
201 /* Indicates this is a fixed-function fragment epilogue block */
202 bool epilogue;
203
204 /* Are helper invocations required by this block? */
205 bool helpers_in;
206 } midgard_block;
207
208 typedef struct midgard_bundle {
209 /* Tag for the overall bundle */
210 int tag;
211
212 /* Instructions contained by the bundle. instruction_count <= 6 (vmul,
213 * sadd, vadd, smul, vlut, branch) */
214 int instruction_count;
215 midgard_instruction *instructions[6];
216
217 /* Bundle-wide ALU configuration */
218 int padding;
219 int control;
220 bool has_embedded_constants;
221 midgard_constants constants;
222 bool has_blend_constant;
223 bool last_writeout;
224 } midgard_bundle;
225
226 enum midgard_rt_id {
227 MIDGARD_COLOR_RT0,
228 MIDGARD_COLOR_RT1,
229 MIDGARD_COLOR_RT2,
230 MIDGARD_COLOR_RT3,
231 MIDGARD_ZS_RT,
232 MIDGARD_NUM_RTS,
233 };
234
235 typedef struct compiler_context {
236 nir_shader *nir;
237 gl_shader_stage stage;
238
239 /* Is internally a blend shader? Depends on stage == FRAGMENT */
240 bool is_blend;
241
242 /* Render target number for a keyed blend shader. Depends on is_blend */
243 unsigned blend_rt;
244
245 /* Index to precolour to r0 for an input blend colour */
246 unsigned blend_input;
247
248 /* Tracking for blend constant patching */
249 int blend_constant_offset;
250
251 /* Number of bytes used for Thread Local Storage */
252 unsigned tls_size;
253
254 /* Count of spills and fills for shaderdb */
255 unsigned spills;
256 unsigned fills;
257
258 /* Current NIR function */
259 nir_function *func;
260
261 /* Allocated compiler temporary counter */
262 unsigned temp_alloc;
263
264 /* Unordered list of midgard_blocks */
265 int block_count;
266 struct list_head blocks;
267
268 /* TODO merge with block_count? */
269 unsigned block_source_count;
270
271 /* List of midgard_instructions emitted for the current block */
272 midgard_block *current_block;
273
274 /* If there is a preset after block, use this, otherwise emit_block will create one if NULL */
275 midgard_block *after_block;
276
277 /* The current "depth" of the loop, for disambiguating breaks/continues
278 * when using nested loops */
279 int current_loop_depth;
280
281 /* Total number of loops for shader-db */
282 unsigned loop_count;
283
284 /* Constants which have been loaded, for later inlining */
285 struct hash_table_u64 *ssa_constants;
286
287 /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */
288 struct hash_table_u64 *hash_to_temp;
289 int temp_count;
290 int max_hash;
291
292 /* Set of NIR indices that were already emitted as outmods */
293 BITSET_WORD *already_emitted;
294
295 /* Just the count of the max register used. Higher count => higher
296 * register pressure */
297 int work_registers;
298
299 /* The number of uniforms allowable for the fast path */
300 int uniform_cutoff;
301
302 /* Count of instructions emitted from NIR overall, across all blocks */
303 int instruction_count;
304
305 /* Alpha ref value passed in */
306 float alpha_ref;
307
308 unsigned quadword_count;
309
310 /* Bitmask of valid metadata */
311 unsigned metadata;
312
313 /* Model-specific quirk set */
314 uint32_t quirks;
315
316 /* Writeout instructions for each render target */
317 midgard_instruction *writeout_branch[MIDGARD_NUM_RTS];
318
319 struct panfrost_sysvals sysvals;
320 } compiler_context;
321
322 /* Per-block live_in/live_out */
323 #define MIDGARD_METADATA_LIVENESS (1 << 0)
324
325 /* Helpers for manipulating the above structures (forming the driver IR) */
326
327 /* Append instruction to end of current block */
328
329 static inline midgard_instruction *
330 mir_upload_ins(struct compiler_context *ctx, struct midgard_instruction ins)
331 {
332 midgard_instruction *heap = ralloc(ctx, struct midgard_instruction);
333 memcpy(heap, &ins, sizeof(ins));
334 return heap;
335 }
336
337 static inline midgard_instruction *
338 emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
339 {
340 midgard_instruction *u = mir_upload_ins(ctx, ins);
341 list_addtail(&u->link, &ctx->current_block->base.instructions);
342 return u;
343 }
344
345 static inline struct midgard_instruction *
346 mir_insert_instruction_before(struct compiler_context *ctx,
347 struct midgard_instruction *tag,
348 struct midgard_instruction ins)
349 {
350 struct midgard_instruction *u = mir_upload_ins(ctx, ins);
351 list_addtail(&u->link, &tag->link);
352 return u;
353 }
354
355 static inline void
356 mir_remove_instruction(struct midgard_instruction *ins)
357 {
358 list_del(&ins->link);
359 }
360
361 static inline midgard_instruction*
362 mir_prev_op(struct midgard_instruction *ins)
363 {
364 return list_last_entry(&(ins->link), midgard_instruction, link);
365 }
366
367 static inline midgard_instruction*
368 mir_next_op(struct midgard_instruction *ins)
369 {
370 return list_first_entry(&(ins->link), midgard_instruction, link);
371 }
372
373 #define mir_foreach_block(ctx, v) \
374 list_for_each_entry(pan_block, v, &ctx->blocks, link)
375
376 #define mir_foreach_block_from(ctx, from, v) \
377 list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
378
379 #define mir_foreach_instr_in_block(block, v) \
380 list_for_each_entry(struct midgard_instruction, v, &block->base.instructions, link)
381 #define mir_foreach_instr_in_block_rev(block, v) \
382 list_for_each_entry_rev(struct midgard_instruction, v, &block->base.instructions, link)
383
384 #define mir_foreach_instr_in_block_safe(block, v) \
385 list_for_each_entry_safe(struct midgard_instruction, v, &block->base.instructions, link)
386
387 #define mir_foreach_instr_in_block_safe_rev(block, v) \
388 list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->base.instructions, link)
389
390 #define mir_foreach_instr_in_block_from(block, v, from) \
391 list_for_each_entry_from(struct midgard_instruction, v, from, &block->base.instructions, link)
392
393 #define mir_foreach_instr_in_block_from_rev(block, v, from) \
394 list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->base.instructions, link)
395
396 #define mir_foreach_bundle_in_block(block, v) \
397 util_dynarray_foreach(&block->bundles, midgard_bundle, v)
398
399 #define mir_foreach_bundle_in_block_rev(block, v) \
400 util_dynarray_foreach_reverse(&block->bundles, midgard_bundle, v)
401
402 #define mir_foreach_instr_in_block_scheduled_rev(block, v) \
403 midgard_instruction* v; \
404 signed i = 0; \
405 mir_foreach_bundle_in_block_rev(block, _bundle) \
406 for (i = (_bundle->instruction_count - 1), v = _bundle->instructions[i]; \
407 i >= 0; \
408 --i, v = (i >= 0) ? _bundle->instructions[i] : NULL) \
409
410 #define mir_foreach_instr_global(ctx, v) \
411 mir_foreach_block(ctx, v_block) \
412 mir_foreach_instr_in_block(((midgard_block *) v_block), v)
413
414 #define mir_foreach_instr_global_safe(ctx, v) \
415 mir_foreach_block(ctx, v_block) \
416 mir_foreach_instr_in_block_safe(((midgard_block *) v_block), v)
417
418 /* Based on set_foreach, expanded with automatic type casts */
419
420 #define mir_foreach_predecessor(blk, v) \
421 struct set_entry *_entry_##v; \
422 struct midgard_block *v; \
423 for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
424 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
425 _entry_##v != NULL; \
426 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
427 v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
428
429 #define mir_foreach_src(ins, v) \
430 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
431
432 static inline midgard_instruction *
433 mir_last_in_block(struct midgard_block *block)
434 {
435 return list_last_entry(&block->base.instructions, struct midgard_instruction, link);
436 }
437
438 static inline midgard_block *
439 mir_get_block(compiler_context *ctx, int idx)
440 {
441 struct list_head *lst = &ctx->blocks;
442
443 while ((idx--) + 1)
444 lst = lst->next;
445
446 return (struct midgard_block *) lst;
447 }
448
449 static inline bool
450 mir_is_alu_bundle(midgard_bundle *bundle)
451 {
452 return IS_ALU(bundle->tag);
453 }
454
455 static inline unsigned
456 make_compiler_temp(compiler_context *ctx)
457 {
458 return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
459 }
460
461 static inline unsigned
462 make_compiler_temp_reg(compiler_context *ctx)
463 {
464 return ((ctx->func->impl->reg_alloc + ctx->temp_alloc++) << 1) | PAN_IS_REG;
465 }
466
467 static inline unsigned
468 nir_ssa_index(nir_ssa_def *ssa)
469 {
470 return (ssa->index << 1) | 0;
471 }
472
473 static inline unsigned
474 nir_src_index(compiler_context *ctx, nir_src *src)
475 {
476 if (src->is_ssa)
477 return nir_ssa_index(src->ssa);
478 else {
479 assert(!src->reg.indirect);
480 return (src->reg.reg->index << 1) | PAN_IS_REG;
481 }
482 }
483
484 static inline unsigned
485 nir_dest_index(nir_dest *dst)
486 {
487 if (dst->is_ssa)
488 return (dst->ssa.index << 1) | 0;
489 else {
490 assert(!dst->reg.indirect);
491 return (dst->reg.reg->index << 1) | PAN_IS_REG;
492 }
493 }
494
495
496
497 /* MIR manipulation */
498
499 void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
500 void mir_rewrite_index_src(compiler_context *ctx, unsigned old, unsigned new);
501 void mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new);
502 void mir_rewrite_index_dst_single(midgard_instruction *ins, unsigned old, unsigned new);
503 void mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old, unsigned new);
504 void mir_rewrite_index_src_swizzle(compiler_context *ctx, unsigned old, unsigned new, unsigned *swizzle);
505 bool mir_single_use(compiler_context *ctx, unsigned value);
506 unsigned mir_use_count(compiler_context *ctx, unsigned value);
507 uint16_t mir_bytemask_of_read_components(midgard_instruction *ins, unsigned node);
508 uint16_t mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i);
509 uint16_t mir_from_bytemask(uint16_t bytemask, unsigned bits);
510 uint16_t mir_bytemask(midgard_instruction *ins);
511 uint16_t mir_round_bytemask_up(uint16_t mask, unsigned bits);
512 void mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask);
513 signed mir_upper_override(midgard_instruction *ins, unsigned inst_size);
514 unsigned mir_components_for_type(nir_alu_type T);
515
516 /* MIR printing */
517
518 void mir_print_instruction(midgard_instruction *ins);
519 void mir_print_bundle(midgard_bundle *ctx);
520 void mir_print_block(midgard_block *block);
521 void mir_print_shader(compiler_context *ctx);
522 bool mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle);
523 bool mir_nontrivial_outmod(midgard_instruction *ins);
524
525 void mir_insert_instruction_before_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
526 void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block *block, midgard_instruction *tag, midgard_instruction ins);
527 void mir_flip(midgard_instruction *ins);
528 void mir_compute_temp_count(compiler_context *ctx);
529
530 void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared);
531
532 /* 'Intrinsic' move for aliasing */
533
534 static inline midgard_instruction
535 v_mov(unsigned src, unsigned dest)
536 {
537 midgard_instruction ins = {
538 .type = TAG_ALU_4,
539 .mask = 0xF,
540 .src = { ~0, src, ~0, ~0 },
541 .src_types = { 0, nir_type_uint32 },
542 .swizzle = SWIZZLE_IDENTITY,
543 .dest = dest,
544 .dest_type = nir_type_uint32,
545 .alu = {
546 .op = midgard_alu_op_imov,
547 .reg_mode = midgard_reg_mode_32,
548 .outmod = midgard_outmod_int_wrap
549 },
550 };
551
552 return ins;
553 }
554
555 /* Broad types of register classes so we can handle special
556 * registers */
557
558 #define REG_CLASS_WORK 0
559 #define REG_CLASS_LDST 1
560 #define REG_CLASS_TEXR 3
561 #define REG_CLASS_TEXW 4
562
563 /* Like a move, but to thread local storage! */
564
565 static inline midgard_instruction
566 v_load_store_scratch(
567 unsigned srcdest,
568 unsigned index,
569 bool is_store,
570 unsigned mask)
571 {
572 /* We index by 32-bit vec4s */
573 unsigned byte = (index * 4 * 4);
574
575 midgard_instruction ins = {
576 .type = TAG_LOAD_STORE_4,
577 .mask = mask,
578 .dest_type = nir_type_uint32,
579 .dest = ~0,
580 .src = { ~0, ~0, ~0, ~0 },
581 .swizzle = SWIZZLE_IDENTITY_4,
582 .load_store = {
583 .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
584
585 /* For register spilling - to thread local storage */
586 .arg_1 = 0xEA,
587 .arg_2 = 0x1E,
588 },
589
590 /* If we spill an unspill, RA goes into an infinite loop */
591 .no_spill = (1 << REG_CLASS_WORK)
592 };
593
594 ins.constants.u32[0] = byte;
595
596 if (is_store) {
597 ins.src[0] = srcdest;
598 ins.src_types[0] = nir_type_uint32;
599
600 /* Ensure we are tightly swizzled so liveness analysis is
601 * correct */
602
603 for (unsigned i = 0; i < 4; ++i) {
604 if (!(mask & (1 << i)))
605 ins.swizzle[0][i] = COMPONENT_X;
606 }
607 } else
608 ins.dest = srcdest;
609
610 return ins;
611 }
612
613 static inline bool
614 mir_has_arg(midgard_instruction *ins, unsigned arg)
615 {
616 if (!ins)
617 return false;
618
619 mir_foreach_src(ins, i) {
620 if (ins->src[i] == arg)
621 return true;
622 }
623
624 return false;
625 }
626
627 /* Scheduling */
628
629 void midgard_schedule_program(compiler_context *ctx);
630
631 void mir_ra(compiler_context *ctx);
632 void mir_squeeze_index(compiler_context *ctx);
633 void mir_lower_special_reads(compiler_context *ctx);
634 void mir_liveness_ins_update(uint16_t *live, midgard_instruction *ins, unsigned max);
635 void mir_compute_liveness(compiler_context *ctx);
636 void mir_invalidate_liveness(compiler_context *ctx);
637 bool mir_is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src);
638
639 void mir_create_pipeline_registers(compiler_context *ctx);
640 void midgard_promote_uniforms(compiler_context *ctx);
641
642 void
643 midgard_emit_derivatives(compiler_context *ctx, nir_alu_instr *instr);
644
645 void
646 midgard_lower_derivatives(compiler_context *ctx, midgard_block *block);
647
648 bool mir_op_computes_derivatives(gl_shader_stage stage, unsigned op);
649
650 void mir_analyze_helper_terminate(compiler_context *ctx);
651 void mir_analyze_helper_requirements(compiler_context *ctx);
652
653 /* Final emission */
654
655 void emit_binary_bundle(
656 compiler_context *ctx,
657 midgard_block *block,
658 midgard_bundle *bundle,
659 struct util_dynarray *emission,
660 int next_tag);
661
662 bool
663 nir_undef_to_zero(nir_shader *shader);
664 bool nir_fuse_io_16(nir_shader *shader);
665
666 void midgard_nir_lod_errata(nir_shader *shader);
667
668 /* Optimizations */
669
670 bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block);
671 bool midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block);
672 bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
673 bool midgard_opt_dead_code_eliminate(compiler_context *ctx);
674 bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
675
676 #endif