2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #ifndef __BIFROST_COMPILER_H
28 #define __BIFROST_COMPILER_H
31 #include "compiler/nir/nir.h"
32 #include "panfrost/util/pan_ir.h"
34 /* Bifrost opcodes are tricky -- the same op may exist on both FMA and
35 * ADD with two completely different opcodes, and opcodes can be varying
36 * length in some cases. Then we have different opcodes for int vs float
37 * and then sometimes even for different typesizes. Further, virtually
38 * every op has a number of flags which depend on the op. In constrast
39 * to Midgard where you have a strict ALU/LDST/TEX division and within
40 * ALU you have strict int/float and that's it... here it's a *lot* more
41 * involved. As such, we use something much higher level for our IR,
42 * encoding "classes" of operations, letting the opcode details get
43 * sorted out at emit time.
45 * Please keep this list alphabetized. Please use a dictionary if you
46 * don't know how to do that.
72 BI_SPECIAL
, /* _FAST, _TABLE on supported GPUs */
79 /* Properties of a class... */
80 extern unsigned bi_class_props
[BI_NUM_CLASSES
];
82 /* abs/neg/outmod valid for a float op */
83 #define BI_MODS (1 << 0)
85 /* Generic enough that little class-specific information is required. In other
86 * words, it acts as a "normal" ALU op, even if the encoding ends up being
87 * irregular enough to warrant a separate class */
88 #define BI_GENERIC (1 << 1)
90 /* Accepts a bifrost_roundmode */
91 #define BI_ROUNDMODE (1 << 2)
93 /* Can be scheduled to FMA */
94 #define BI_SCHED_FMA (1 << 3)
96 /* Can be scheduled to ADD */
97 #define BI_SCHED_ADD (1 << 4)
99 /* Most ALU ops can do either, actually */
100 #define BI_SCHED_ALL (BI_SCHED_FMA | BI_SCHED_ADD)
102 /* Along with setting BI_SCHED_ADD, eats up the entire cycle, so FMA must be
103 * nopped out. Used for _FAST operations. */
104 #define BI_SCHED_SLOW (1 << 5)
106 /* Swizzling allowed for the 8/16-bit source */
107 #define BI_SWIZZLABLE (1 << 6)
109 /* For scheduling purposes this is a high latency instruction and must be at
110 * the end of a clause. Implies ADD */
111 #define BI_SCHED_HI_LATENCY ((1 << 7) | BI_SCHED_ADD)
113 /* Intrinsic is vectorized and should read 4 components regardless of writemask */
114 #define BI_VECTOR (1 << 8)
116 /* It can't get any worse than csel4... can it? */
117 #define BIR_SRC_COUNT 4
120 struct bi_load_vary
{
121 enum bifrost_interp_mode interp_mode
;
126 /* BI_BRANCH encoding the details of the branch itself as well as a pointer to
127 * the target. We forward declare bi_block since this is mildly circular (not
128 * strictly, but this order of the file makes more sense I think)
130 * We define our own enum of conditions since the conditions in the hardware
131 * packed in crazy ways that would make manipulation unweildly (meaning changes
132 * based on port swapping, etc), so we defer dealing with that until emit time.
133 * Likewise, we expose NIR types instead of the crazy branch types, although
134 * the restrictions do eventually apply of course. */
149 /* Types are specified in src_types and must be compatible (either both
150 * int, or both float, 16/32, and same size or 32/16 if float. Types
151 * ignored if BI_COND_ALWAYS is set for an unconditional branch. */
154 struct bi_block
*target
;
157 /* Opcodes within a class */
170 BI_ROUND_MODE
, /* use round mode */
171 BI_ROUND_ROUND
/* i.e.: fround() */
186 struct list_head link
; /* Must be first */
189 /* Indices, see bir_ssa_index etc. Note zero is special cased
190 * to "no argument" */
192 unsigned src
[BIR_SRC_COUNT
];
194 /* If one of the sources has BIR_INDEX_CONSTANT */
202 /* Floating-point modifiers, type/class permitting. If not
203 * allowed for the type/class, these are ignored. */
204 enum bifrost_outmod outmod
;
205 bool src_abs
[BIR_SRC_COUNT
];
206 bool src_neg
[BIR_SRC_COUNT
];
208 /* Round mode (requires BI_ROUNDMODE) */
209 enum bifrost_roundmode roundmode
;
211 /* Writemask (bit for each affected byte). This is quite restricted --
212 * ALU ops can only write to a single channel (exception: <32 in which
213 * you can write to 32/N contiguous aligned channels). Load/store can
214 * only write to all channels at once, in a sense. But it's still
215 * better to use this generic form than have synthetic ops flying
216 * about, since we're not essentially vector for RA purposes. */
219 /* Destination type. Usually the type of the instruction
220 * itself, but if sources and destination have different
221 * types, the type of the destination wins (so f2i would be
222 * int). Zero if there is no destination. Bitsize included */
223 nir_alu_type dest_type
;
225 /* Source types if required by the class */
226 nir_alu_type src_types
[BIR_SRC_COUNT
];
228 /* If the source type is 8-bit or 16-bit such that SIMD is possible,
229 * and the class has BI_SWIZZLABLE, this is a swizzle in the usual
230 * sense. On non-SIMD instructions, it can be used for component
231 * selection, so we don't have to special case extraction. */
232 uint8_t swizzle
[BIR_SRC_COUNT
][NIR_MAX_VEC_COMPONENTS
];
234 /* A class-specific op from which the actual opcode can be derived
235 * (along with the above information) */
238 enum bi_minmax_op minmax
;
239 enum bi_bitwise_op bitwise
;
240 enum bi_round_op round
;
241 enum bi_special_op special
;
242 enum bi_cond compare
;
245 /* Union for class-specific information */
247 enum bifrost_minmax_mode minmax
;
248 struct bi_load_vary load_vary
;
249 struct bi_branch branch
;
251 /* For CSEL, the comparison op. BI_COND_ALWAYS doesn't make
252 * sense here but you can always just use a move for that */
253 enum bi_cond csel_cond
;
255 /* For BLEND -- the location 0-7 */
256 unsigned blend_location
;
260 /* Scheduling takes place in two steps. Step 1 groups instructions within a
261 * block into distinct clauses (bi_clause). Step 2 schedules instructions
262 * within a clause into FMA/ADD pairs (bi_bundle).
264 * A bi_bundle contains two paired instruction pointers. If a slot is unfilled,
265 * leave it NULL; the emitter will fill in a nop.
274 struct list_head link
;
276 /* A clause can have 8 instructions in bundled FMA/ADD sense, so there
277 * can be 8 bundles. But each bundle can have both an FMA and an ADD,
278 * so a clause can have up to 16 bi_instructions. Whether bundles or
279 * instructions are used depends on where in scheduling we are. */
281 unsigned instruction_count
;
282 unsigned bundle_count
;
285 bi_instruction
*instructions
[16];
286 bi_bundle bundles
[8];
289 /* For scoreboarding -- the clause ID (this is not globally unique!)
290 * and its dependencies in terms of other clauses, computed during
291 * scheduling and used when emitting code. Dependencies expressed as a
292 * bitfield matching the hardware, except shifted by a clause (the
293 * shift back to the ISA's off-by-one encoding is worked out when
294 * emitting clauses) */
295 unsigned scoreboard_id
;
296 uint8_t dependencies
;
298 /* Back-to-back corresponds directly to the back-to-back bit. Branch
299 * conditional corresponds to the branch conditional bit except that in
300 * the emitted code it's always set if back-to-bit is, whereas we use
301 * the actual value (without back-to-back so to speak) internally */
303 bool branch_conditional
;
305 /* Corresponds to the usual bit but shifted by a clause */
306 bool data_register_write_barrier
;
308 /* Constants read by this clause. ISA limit. */
309 uint64_t constants
[8];
310 unsigned constant_count
;
313 typedef struct bi_block
{
314 pan_block base
; /* must be first */
316 /* If true, uses clauses; if false, uses instructions */
318 struct list_head clauses
; /* list of bi_clause */
323 gl_shader_stage stage
;
324 struct list_head blocks
; /* list of bi_block */
325 struct panfrost_sysvals sysvals
;
328 /* During NIR->BIR */
329 nir_function_impl
*impl
;
330 bi_block
*current_block
;
331 unsigned block_name_count
;
332 bi_block
*after_block
;
333 bi_block
*break_block
;
334 bi_block
*continue_block
;
337 /* For creating temporaries */
340 /* Analysis results */
343 /* Stats for shader-db */
344 unsigned instruction_count
;
348 static inline bi_instruction
*
349 bi_emit(bi_context
*ctx
, bi_instruction ins
)
351 bi_instruction
*u
= rzalloc(ctx
, bi_instruction
);
352 memcpy(u
, &ins
, sizeof(ins
));
353 list_addtail(&u
->link
, &ctx
->current_block
->base
.instructions
);
358 bi_remove_instruction(bi_instruction
*ins
)
360 list_del(&ins
->link
);
363 /* So we can distinguish between SSA/reg/sentinel quickly */
364 #define BIR_NO_ARG (0)
365 #define BIR_IS_REG (1)
367 /* If high bits are set, instead of SSA/registers, we have specials indexed by
368 * the low bits if necessary.
370 * Fixed register: do not allocate register, do not collect $200.
371 * Uniform: access a uniform register given by low bits.
372 * Constant: access the specified constant
373 * Zero: special cased to avoid wasting a constant
374 * Passthrough: a bifrost_packed_src to passthrough T/T0/T1
377 #define BIR_INDEX_REGISTER (1 << 31)
378 #define BIR_INDEX_UNIFORM (1 << 30)
379 #define BIR_INDEX_CONSTANT (1 << 29)
380 #define BIR_INDEX_ZERO (1 << 28)
381 #define BIR_INDEX_PASS (1 << 27)
383 /* Keep me synced please so we can check src & BIR_SPECIAL */
385 #define BIR_SPECIAL ((BIR_INDEX_REGISTER | BIR_INDEX_UNIFORM) | \
386 (BIR_INDEX_CONSTANT | BIR_INDEX_ZERO | BIR_INDEX_PASS))
388 static inline unsigned
389 bi_max_temp(bi_context
*ctx
)
391 unsigned alloc
= MAX2(ctx
->impl
->reg_alloc
, ctx
->impl
->ssa_alloc
);
392 return ((alloc
+ 2 + ctx
->temp_alloc
) << 1);
395 static inline unsigned
396 bi_make_temp(bi_context
*ctx
)
398 return (ctx
->impl
->ssa_alloc
+ 1 + ctx
->temp_alloc
++) << 1;
401 static inline unsigned
402 bi_make_temp_reg(bi_context
*ctx
)
404 return ((ctx
->impl
->reg_alloc
+ ctx
->temp_alloc
++) << 1) | BIR_IS_REG
;
407 static inline unsigned
408 bir_ssa_index(nir_ssa_def
*ssa
)
410 /* Off-by-one ensures BIR_NO_ARG is skipped */
411 return ((ssa
->index
+ 1) << 1) | 0;
414 static inline unsigned
415 bir_src_index(nir_src
*src
)
418 return bir_ssa_index(src
->ssa
);
420 assert(!src
->reg
.indirect
);
421 return (src
->reg
.reg
->index
<< 1) | BIR_IS_REG
;
425 static inline unsigned
426 bir_dest_index(nir_dest
*dst
)
429 return bir_ssa_index(&dst
->ssa
);
431 assert(!dst
->reg
.indirect
);
432 return (dst
->reg
.reg
->index
<< 1) | BIR_IS_REG
;
436 /* Iterators for Bifrost IR */
438 #define bi_foreach_block(ctx, v) \
439 list_for_each_entry(pan_block, v, &ctx->blocks, link)
441 #define bi_foreach_block_from(ctx, from, v) \
442 list_for_each_entry_from(pan_block, v, from, &ctx->blocks, link)
444 #define bi_foreach_instr_in_block(block, v) \
445 list_for_each_entry(bi_instruction, v, &(block)->base.instructions, link)
447 #define bi_foreach_instr_in_block_rev(block, v) \
448 list_for_each_entry_rev(bi_instruction, v, &(block)->base.instructions, link)
450 #define bi_foreach_instr_in_block_safe(block, v) \
451 list_for_each_entry_safe(bi_instruction, v, &(block)->base.instructions, link)
453 #define bi_foreach_instr_in_block_safe_rev(block, v) \
454 list_for_each_entry_safe_rev(bi_instruction, v, &(block)->base.instructions, link)
456 #define bi_foreach_instr_in_block_from(block, v, from) \
457 list_for_each_entry_from(bi_instruction, v, from, &(block)->base.instructions, link)
459 #define bi_foreach_instr_in_block_from_rev(block, v, from) \
460 list_for_each_entry_from_rev(bi_instruction, v, from, &(block)->base.instructions, link)
462 #define bi_foreach_clause_in_block(block, v) \
463 list_for_each_entry(bi_clause, v, &(block)->clauses, link)
465 #define bi_foreach_instr_global(ctx, v) \
466 bi_foreach_block(ctx, v_block) \
467 bi_foreach_instr_in_block((bi_block *) v_block, v)
469 #define bi_foreach_instr_global_safe(ctx, v) \
470 bi_foreach_block(ctx, v_block) \
471 bi_foreach_instr_in_block_safe((bi_block *) v_block, v)
473 /* Based on set_foreach, expanded with automatic type casts */
475 #define bi_foreach_predecessor(blk, v) \
476 struct set_entry *_entry_##v; \
478 for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
479 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL); \
480 _entry_##v != NULL; \
481 _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
482 v = (bi_block *) (_entry_##v ? _entry_##v->key : NULL))
484 #define bi_foreach_src(ins, v) \
485 for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
487 static inline bi_instruction
*
488 bi_prev_op(bi_instruction
*ins
)
490 return list_last_entry(&(ins
->link
), bi_instruction
, link
);
493 static inline bi_instruction
*
494 bi_next_op(bi_instruction
*ins
)
496 return list_first_entry(&(ins
->link
), bi_instruction
, link
);
499 static inline pan_block
*
500 pan_next_block(pan_block
*block
)
502 return list_first_entry(&(block
->link
), pan_block
, link
);
505 /* BIR manipulation */
507 bool bi_has_outmod(bi_instruction
*ins
);
508 bool bi_has_source_mods(bi_instruction
*ins
);
509 bool bi_is_src_swizzled(bi_instruction
*ins
, unsigned s
);
510 bool bi_has_arg(bi_instruction
*ins
, unsigned arg
);
511 uint16_t bi_from_bytemask(uint16_t bytemask
, unsigned bytes
);
512 unsigned bi_get_component_count(bi_instruction
*ins
);
513 uint16_t bi_bytemask_of_read_components(bi_instruction
*ins
, unsigned node
);
517 bool bi_opt_dead_code_eliminate(bi_context
*ctx
, bi_block
*block
);
518 void bi_schedule(bi_context
*ctx
);
519 void bi_register_allocate(bi_context
*ctx
);
523 void bi_compute_liveness(bi_context
*ctx
);
524 void bi_liveness_ins_update(uint16_t *live
, bi_instruction
*ins
, unsigned max
);
525 void bi_invalidate_liveness(bi_context
*ctx
);
526 bool bi_is_live_after(bi_context
*ctx
, bi_block
*block
, bi_instruction
*start
, int src
);
530 void bi_pack(bi_context
*ctx
, struct util_dynarray
*emission
);