pan/bi: Don't hide SCHED_ADD inside HI_LATENCY
[mesa.git] / src / panfrost / bifrost / compiler.h
1 /*
2 * Copyright (C) 2020 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27 #ifndef __BIFROST_COMPILER_H
28 #define __BIFROST_COMPILER_H
29
30 #include "bifrost.h"
31 #include "compiler/nir/nir.h"
32 #include "panfrost/util/pan_ir.h"
33
34 /* Bifrost opcodes are tricky -- the same op may exist on both FMA and
35 * ADD with two completely different opcodes, and opcodes can be varying
36 * length in some cases. Then we have different opcodes for int vs float
37 * and then sometimes even for different typesizes. Further, virtually
38 * every op has a number of flags which depend on the op. In constrast
39 * to Midgard where you have a strict ALU/LDST/TEX division and within
40 * ALU you have strict int/float and that's it... here it's a *lot* more
41 * involved. As such, we use something much higher level for our IR,
42 * encoding "classes" of operations, letting the opcode details get
43 * sorted out at emit time.
44 *
45 * Please keep this list alphabetized. Please use a dictionary if you
46 * don't know how to do that.
47 */
48
49 enum bi_class {
50 BI_ADD,
51 BI_ATEST,
52 BI_BRANCH,
53 BI_CMP,
54 BI_BLEND,
55 BI_BITWISE,
56 BI_CONVERT,
57 BI_CSEL,
58 BI_DISCARD,
59 BI_FMA,
60 BI_FREXP,
61 BI_ISUB,
62 BI_LOAD,
63 BI_LOAD_UNIFORM,
64 BI_LOAD_ATTR,
65 BI_LOAD_VAR,
66 BI_LOAD_VAR_ADDRESS,
67 BI_MINMAX,
68 BI_MOV,
69 BI_SHIFT,
70 BI_STORE,
71 BI_STORE_VAR,
72 BI_SPECIAL, /* _FAST, _TABLE on supported GPUs */
73 BI_SWIZZLE,
74 BI_TEX,
75 BI_ROUND,
76 BI_NUM_CLASSES
77 };
78
79 /* Properties of a class... */
80 extern unsigned bi_class_props[BI_NUM_CLASSES];
81
82 /* abs/neg/outmod valid for a float op */
83 #define BI_MODS (1 << 0)
84
85 /* Generic enough that little class-specific information is required. In other
86 * words, it acts as a "normal" ALU op, even if the encoding ends up being
87 * irregular enough to warrant a separate class */
88 #define BI_GENERIC (1 << 1)
89
90 /* Accepts a bifrost_roundmode */
91 #define BI_ROUNDMODE (1 << 2)
92
93 /* Can be scheduled to FMA */
94 #define BI_SCHED_FMA (1 << 3)
95
96 /* Can be scheduled to ADD */
97 #define BI_SCHED_ADD (1 << 4)
98
99 /* Most ALU ops can do either, actually */
100 #define BI_SCHED_ALL (BI_SCHED_FMA | BI_SCHED_ADD)
101
102 /* Along with setting BI_SCHED_ADD, eats up the entire cycle, so FMA must be
103 * nopped out. Used for _FAST operations. */
104 #define BI_SCHED_SLOW (1 << 5)
105
106 /* Swizzling allowed for the 8/16-bit source */
107 #define BI_SWIZZLABLE (1 << 6)
108
109 /* For scheduling purposes this is a high latency instruction and must be at
110 * the end of a clause. Implies ADD */
111 #define BI_SCHED_HI_LATENCY (1 << 7)
112
113 /* Intrinsic is vectorized and should read 4 components regardless of writemask */
114 #define BI_VECTOR (1 << 8)
115
116 /* Use a data register for src0/dest respectively, bypassing the usual
117 * register accessor. Mutually exclusive. */
118 #define BI_DATA_REG_SRC (1 << 9)
119 #define BI_DATA_REG_DEST (1 << 10)
120
121 /* It can't get any worse than csel4... can it? */
122 #define BIR_SRC_COUNT 4
123
124 /* BI_LD_VARY */
125 struct bi_load_vary {
126 enum bifrost_interp_mode interp_mode;
127 bool reuse;
128 bool flat;
129 };
130
131 /* BI_BRANCH encoding the details of the branch itself as well as a pointer to
132 * the target. We forward declare bi_block since this is mildly circular (not
133 * strictly, but this order of the file makes more sense I think)
134 *
135 * We define our own enum of conditions since the conditions in the hardware
136 * packed in crazy ways that would make manipulation unweildly (meaning changes
137 * based on port swapping, etc), so we defer dealing with that until emit time.
138 * Likewise, we expose NIR types instead of the crazy branch types, although
139 * the restrictions do eventually apply of course. */
140
141 struct bi_block;
142
143 enum bi_cond {
144 BI_COND_ALWAYS,
145 BI_COND_LT,
146 BI_COND_LE,
147 BI_COND_GE,
148 BI_COND_GT,
149 BI_COND_EQ,
150 BI_COND_NE,
151 };
152
153 struct bi_branch {
154 /* Types are specified in src_types and must be compatible (either both
155 * int, or both float, 16/32, and same size or 32/16 if float. Types
156 * ignored if BI_COND_ALWAYS is set for an unconditional branch. */
157
158 enum bi_cond cond;
159 struct bi_block *target;
160 };
161
162 /* Opcodes within a class */
163 enum bi_minmax_op {
164 BI_MINMAX_MIN,
165 BI_MINMAX_MAX
166 };
167
168 enum bi_bitwise_op {
169 BI_BITWISE_AND,
170 BI_BITWISE_OR,
171 BI_BITWISE_XOR
172 };
173
174 enum bi_round_op {
175 BI_ROUND_MODE, /* use round mode */
176 BI_ROUND_ROUND /* i.e.: fround() */
177 };
178
179 enum bi_special_op {
180 BI_SPECIAL_FRCP,
181 BI_SPECIAL_FRSQ,
182 BI_SPECIAL_FATAN,
183 BI_SPECIAL_FSIN,
184 BI_SPECIAL_FCOS,
185 BI_SPECIAL_FEXP,
186 BI_SPECIAL_FLOG2,
187 BI_SPECIAL_FLOGE
188 };
189
190 typedef struct {
191 struct list_head link; /* Must be first */
192 enum bi_class type;
193
194 /* Indices, see bir_ssa_index etc. Note zero is special cased
195 * to "no argument" */
196 unsigned dest;
197 unsigned src[BIR_SRC_COUNT];
198
199 /* If one of the sources has BIR_INDEX_CONSTANT */
200 union {
201 uint64_t u64;
202 uint32_t u32;
203 uint16_t u16[2];
204 uint8_t u8[4];
205 } constant;
206
207 /* Floating-point modifiers, type/class permitting. If not
208 * allowed for the type/class, these are ignored. */
209 enum bifrost_outmod outmod;
210 bool src_abs[BIR_SRC_COUNT];
211 bool src_neg[BIR_SRC_COUNT];
212
213 /* Round mode (requires BI_ROUNDMODE) */
214 enum bifrost_roundmode roundmode;
215
216 /* Writemask (bit for each affected byte). This is quite restricted --
217 * ALU ops can only write to a single channel (exception: <32 in which
218 * you can write to 32/N contiguous aligned channels). Load/store can
219 * only write to all channels at once, in a sense. But it's still
220 * better to use this generic form than have synthetic ops flying
221 * about, since we're not essentially vector for RA purposes. */
222 uint16_t writemask;
223
224 /* Destination type. Usually the type of the instruction
225 * itself, but if sources and destination have different
226 * types, the type of the destination wins (so f2i would be
227 * int). Zero if there is no destination. Bitsize included */
228 nir_alu_type dest_type;
229
230 /* Source types if required by the class */
231 nir_alu_type src_types[BIR_SRC_COUNT];
232
233 /* If the source type is 8-bit or 16-bit such that SIMD is possible,
234 * and the class has BI_SWIZZLABLE, this is a swizzle in the usual
235 * sense. On non-SIMD instructions, it can be used for component
236 * selection, so we don't have to special case extraction. */
237 uint8_t swizzle[BIR_SRC_COUNT][NIR_MAX_VEC_COMPONENTS];
238
239 /* A class-specific op from which the actual opcode can be derived
240 * (along with the above information) */
241
242 union {
243 enum bi_minmax_op minmax;
244 enum bi_bitwise_op bitwise;
245 enum bi_round_op round;
246 enum bi_special_op special;
247 enum bi_cond compare;
248 } op;
249
250 /* Union for class-specific information */
251 union {
252 enum bifrost_minmax_mode minmax;
253 struct bi_load_vary load_vary;
254 struct bi_branch branch;
255
256 /* For CSEL, the comparison op. BI_COND_ALWAYS doesn't make
257 * sense here but you can always just use a move for that */
258 enum bi_cond csel_cond;
259
260 /* For BLEND -- the location 0-7 */
261 unsigned blend_location;
262 };
263 } bi_instruction;
264
265 /* Scheduling takes place in two steps. Step 1 groups instructions within a
266 * block into distinct clauses (bi_clause). Step 2 schedules instructions
267 * within a clause into FMA/ADD pairs (bi_bundle).
268 *
269 * A bi_bundle contains two paired instruction pointers. If a slot is unfilled,
270 * leave it NULL; the emitter will fill in a nop.
271 */
272
273 typedef struct {
274 bi_instruction *fma;
275 bi_instruction *add;
276 } bi_bundle;
277
278 typedef struct {
279 struct list_head link;
280
281 /* A clause can have 8 instructions in bundled FMA/ADD sense, so there
282 * can be 8 bundles. But each bundle can have both an FMA and an ADD,
283 * so a clause can have up to 16 bi_instructions. Whether bundles or
284 * instructions are used depends on where in scheduling we are. */
285
286 unsigned instruction_count;
287 unsigned bundle_count;
288
289 union {
290 bi_instruction *instructions[16];
291 bi_bundle bundles[8];
292 };
293
294 /* For scoreboarding -- the clause ID (this is not globally unique!)
295 * and its dependencies in terms of other clauses, computed during
296 * scheduling and used when emitting code. Dependencies expressed as a
297 * bitfield matching the hardware, except shifted by a clause (the
298 * shift back to the ISA's off-by-one encoding is worked out when
299 * emitting clauses) */
300 unsigned scoreboard_id;
301 uint8_t dependencies;
302
303 /* Back-to-back corresponds directly to the back-to-back bit. Branch
304 * conditional corresponds to the branch conditional bit except that in
305 * the emitted code it's always set if back-to-bit is, whereas we use
306 * the actual value (without back-to-back so to speak) internally */
307 bool back_to_back;
308 bool branch_conditional;
309
310 /* Assigned data register */
311 unsigned data_register;
312
313 /* Corresponds to the usual bit but shifted by a clause */
314 bool data_register_write_barrier;
315
316 /* Constants read by this clause. ISA limit. */
317 uint64_t constants[8];
318 unsigned constant_count;
319
320 /* What type of high latency instruction is here, basically */
321 unsigned clause_type;
322 } bi_clause;
323
324 typedef struct bi_block {
325 pan_block base; /* must be first */
326
327 /* If true, uses clauses; if false, uses instructions */
328 bool scheduled;
329 struct list_head clauses; /* list of bi_clause */
330 } bi_block;
331
332 typedef struct {
333 nir_shader *nir;
334 gl_shader_stage stage;
335 struct list_head blocks; /* list of bi_block */
336 struct panfrost_sysvals sysvals;
337 uint32_t quirks;
338
339 /* During NIR->BIR */
340 nir_function_impl *impl;
341 bi_block *current_block;
342 unsigned block_name_count;
343 bi_block *after_block;
344 bi_block *break_block;
345 bi_block *continue_block;
346 bool emitted_atest;
347
348 /* For creating temporaries */
349 unsigned temp_alloc;
350
351 /* Analysis results */
352 bool has_liveness;
353
354 /* Stats for shader-db */
355 unsigned instruction_count;
356 unsigned loop_count;
357 } bi_context;
358
359 static inline bi_instruction *
360 bi_emit(bi_context *ctx, bi_instruction ins)
361 {
362 bi_instruction *u = rzalloc(ctx, bi_instruction);
363 memcpy(u, &ins, sizeof(ins));
364 list_addtail(&u->link, &ctx->current_block->base.instructions);
365 return u;
366 }
367
368 static inline void
369 bi_remove_instruction(bi_instruction *ins)
370 {
371 list_del(&ins->link);
372 }
373
374 /* So we can distinguish between SSA/reg/sentinel quickly */
375 #define BIR_NO_ARG (0)
376 #define BIR_IS_REG (1)
377
378 /* If high bits are set, instead of SSA/registers, we have specials indexed by
379 * the low bits if necessary.
380 *
381 * Fixed register: do not allocate register, do not collect $200.
382 * Uniform: access a uniform register given by low bits.
383 * Constant: access the specified constant
384 * Zero: special cased to avoid wasting a constant
385 * Passthrough: a bifrost_packed_src to passthrough T/T0/T1
386 */
387
388 #define BIR_INDEX_REGISTER (1 << 31)
389 #define BIR_INDEX_UNIFORM (1 << 30)
390 #define BIR_INDEX_CONSTANT (1 << 29)
391 #define BIR_INDEX_ZERO (1 << 28)
392 #define BIR_INDEX_PASS (1 << 27)
393
394 /* Keep me synced please so we can check src & BIR_SPECIAL */
395
396 #define BIR_SPECIAL ((BIR_INDEX_REGISTER | BIR_INDEX_UNIFORM) | \
397 (BIR_INDEX_CONSTANT | BIR_INDEX_ZERO | BIR_INDEX_PASS))
398
399 static inline unsigned
400 bi_max_temp(bi_context *ctx)
401 {
402 unsigned alloc = MAX2(ctx->impl->reg_alloc, ctx->impl->ssa_alloc);
403 return ((alloc + 2 + ctx->temp_alloc) << 1);
404 }
405
406 static inline unsigned
407 bi_make_temp(bi_context *ctx)
408 {
409 return (ctx->impl->ssa_alloc + 1 + ctx->temp_alloc++) << 1;
410 }
411
412 static inline unsigned
413 bi_make_temp_reg(bi_context *ctx)
414 {
415 return ((ctx->impl->reg_alloc + ctx->temp_alloc++) << 1) | BIR_IS_REG;
416 }
417
418 static inline unsigned
419 bir_ssa_index(nir_ssa_def *ssa)
420 {
421 /* Off-by-one ensures BIR_NO_ARG is skipped */
422 return ((ssa->index + 1) << 1) | 0;
423 }
424
425 static inline unsigned
426 bir_src_index(nir_src *src)
427 {
428 if (src->is_ssa)
429 return bir_ssa_index(src->ssa);
430 else {
431 assert(!src->reg.indirect);
432 return (src->reg.reg->index << 1) | BIR_IS_REG;
433 }
434 }
435
436 static inline unsigned
437 bir_dest_index(nir_dest *dst)
438 {
439 if (dst->is_ssa)
440 return bir_ssa_index(&dst->ssa);
441 else {
442 assert(!dst->reg.indirect);
443 return (dst->reg.reg->index << 1) | BIR_IS_REG;
444 }
445 }
446
447 /* Iterators for Bifrost IR */
448
449 #define bi_foreach_block(ctx, v) \
450 list_for_each_entry(pan_block, v, &ctx->blocks, link)
451
452 #define bi_foreach_block_from(ctx, from, v) \
453 list_for_each_entry_from(pan_block, v, from, &ctx->blocks, link)
454
455 #define bi_foreach_instr_in_block(block, v) \
456 list_for_each_entry(bi_instruction, v, &(block)->base.instructions, link)
457
458 #define bi_foreach_instr_in_block_rev(block, v) \
459 list_for_each_entry_rev(bi_instruction, v, &(block)->base.instructions, link)
460
461 #define bi_foreach_instr_in_block_safe(block, v) \
462 list_for_each_entry_safe(bi_instruction, v, &(block)->base.instructions, link)
463
464 #define bi_foreach_instr_in_block_safe_rev(block, v) \
465 list_for_each_entry_safe_rev(bi_instruction, v, &(block)->base.instructions, link)
466
467 #define bi_foreach_instr_in_block_from(block, v, from) \
468 list_for_each_entry_from(bi_instruction, v, from, &(block)->base.instructions, link)
469
470 #define bi_foreach_instr_in_block_from_rev(block, v, from) \
471 list_for_each_entry_from_rev(bi_instruction, v, from, &(block)->base.instructions, link)
472
473 #define bi_foreach_clause_in_block(block, v) \
474 list_for_each_entry(bi_clause, v, &(block)->clauses, link)
475
476 #define bi_foreach_instr_global(ctx, v) \
477 bi_foreach_block(ctx, v_block) \
478 bi_foreach_instr_in_block((bi_block *) v_block, v)
479
480 #define bi_foreach_instr_global_safe(ctx, v) \
481 bi_foreach_block(ctx, v_block) \
482 bi_foreach_instr_in_block_safe((bi_block *) v_block, v)
483
484 /* Based on set_foreach, expanded with automatic type casts */
485
486 #define bi_foreach_predecessor(blk, v) \
487 struct set_entry *_entry_##v; \
488 bi_block *v; \
489 for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
490 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL); \
491 _entry_##v != NULL; \
492 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
493 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL))
494
495 #define bi_foreach_src(ins, v) \
496 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
497
498 static inline bi_instruction *
499 bi_prev_op(bi_instruction *ins)
500 {
501 return list_last_entry(&(ins->link), bi_instruction, link);
502 }
503
504 static inline bi_instruction *
505 bi_next_op(bi_instruction *ins)
506 {
507 return list_first_entry(&(ins->link), bi_instruction, link);
508 }
509
510 static inline pan_block *
511 pan_next_block(pan_block *block)
512 {
513 return list_first_entry(&(block->link), pan_block, link);
514 }
515
516 /* BIR manipulation */
517
518 bool bi_has_outmod(bi_instruction *ins);
519 bool bi_has_source_mods(bi_instruction *ins);
520 bool bi_is_src_swizzled(bi_instruction *ins, unsigned s);
521 bool bi_has_arg(bi_instruction *ins, unsigned arg);
522 uint16_t bi_from_bytemask(uint16_t bytemask, unsigned bytes);
523 unsigned bi_get_component_count(bi_instruction *ins);
524 uint16_t bi_bytemask_of_read_components(bi_instruction *ins, unsigned node);
525
526 /* BIR passes */
527
528 bool bi_opt_dead_code_eliminate(bi_context *ctx, bi_block *block);
529 void bi_schedule(bi_context *ctx);
530 void bi_register_allocate(bi_context *ctx);
531
532 /* Liveness */
533
534 void bi_compute_liveness(bi_context *ctx);
535 void bi_liveness_ins_update(uint16_t *live, bi_instruction *ins, unsigned max);
536 void bi_invalidate_liveness(bi_context *ctx);
537 bool bi_is_live_after(bi_context *ctx, bi_block *block, bi_instruction *start, int src);
538
539 /* Code emit */
540
541 void bi_pack(bi_context *ctx, struct util_dynarray *emission);
542
543 #endif