2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_debug.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
37 #include "bi_quirks.h"
40 static const struct debug_named_value debug_options
[] = {
41 {"msgs", BIFROST_DBG_MSGS
, "Print debug messages"},
42 {"shaders", BIFROST_DBG_SHADERS
, "Dump shaders in NIR and MIR"},
46 DEBUG_GET_ONCE_FLAGS_OPTION(bifrost_debug
, "BIFROST_MESA_DEBUG", debug_options
, 0)
48 int bifrost_debug
= 0;
50 #define DBG(fmt, ...) \
51 do { if (bifrost_debug & BIFROST_DBG_MSGS) \
52 fprintf(stderr, "%s:%d: "fmt, \
53 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
55 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
56 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
57 static void bi_schedule_barrier(bi_context
*ctx
);
60 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
62 bi_instruction
*branch
= bi_emit_branch(ctx
);
64 switch (instr
->type
) {
66 branch
->branch
.target
= ctx
->break_block
;
68 case nir_jump_continue
:
69 branch
->branch
.target
= ctx
->continue_block
;
72 unreachable("Unhandled jump type");
75 pan_block_add_successor(&ctx
->current_block
->base
, &branch
->branch
.target
->base
);
79 bi_load(enum bi_class T
, nir_intrinsic_instr
*instr
)
81 bi_instruction load
= {
83 .vector_channels
= instr
->num_components
,
84 .src
= { BIR_INDEX_CONSTANT
},
85 .src_types
= { nir_type_uint32
},
86 .constant
= { .u64
= nir_intrinsic_base(instr
) },
89 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
92 load
.dest
= pan_dest_index(&instr
->dest
);
94 if (info
->has_dest
&& info
->index_map
[NIR_INTRINSIC_TYPE
] > 0)
95 load
.dest_type
= nir_intrinsic_type(instr
);
97 nir_src
*offset
= nir_get_io_offset_src(instr
);
99 if (nir_src_is_const(*offset
))
100 load
.constant
.u64
+= nir_src_as_uint(*offset
);
102 load
.src
[0] = pan_src_index(offset
);
108 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
110 bi_instruction ins
= bi_load(BI_LOAD_VAR
, instr
);
111 ins
.load_vary
.interp_mode
= BIFROST_INTERP_DEFAULT
; /* TODO */
112 ins
.load_vary
.reuse
= false; /* TODO */
113 ins
.load_vary
.flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
;
114 ins
.dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
);
116 if (nir_src_is_const(*nir_get_io_offset_src(instr
))) {
117 /* Zero it out for direct */
118 ins
.src
[1] = BIR_INDEX_ZERO
;
120 /* R61 contains sample mask stuff, TODO RA XXX */
121 ins
.src
[1] = BIR_INDEX_REGISTER
| 61;
128 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
130 if (!ctx
->emitted_atest
) {
131 bi_instruction ins
= {
134 BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
135 pan_src_index(&instr
->src
[0])
139 nir_intrinsic_type(instr
)
143 { 3, 0 } /* swizzle out the alpha */
145 .dest
= BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
146 .dest_type
= nir_type_uint32
,
150 bi_schedule_barrier(ctx
);
151 ctx
->emitted_atest
= true;
154 bi_instruction blend
= {
156 .blend_location
= nir_intrinsic_base(instr
),
158 pan_src_index(&instr
->src
[0]),
159 BIR_INDEX_REGISTER
| 60 /* Can this be arbitrary? */,
162 nir_intrinsic_type(instr
),
169 .dest
= BIR_INDEX_REGISTER
| 48 /* Looks like magic */,
170 .dest_type
= nir_type_uint32
,
174 assert(blend
.blend_location
< BIFROST_MAX_RENDER_TARGET_COUNT
);
175 assert(ctx
->blend_types
);
176 assert(blend
.src_types
[0]);
177 ctx
->blend_types
[blend
.blend_location
] = blend
.src_types
[0];
180 bi_schedule_barrier(ctx
);
183 static bi_instruction
184 bi_load_with_r61(enum bi_class T
, nir_intrinsic_instr
*instr
)
186 bi_instruction ld
= bi_load(T
, instr
);
187 ld
.src
[1] = BIR_INDEX_REGISTER
| 61; /* TODO: RA */
188 ld
.src
[2] = BIR_INDEX_REGISTER
| 62;
190 ld
.src_types
[1] = nir_type_uint32
;
191 ld
.src_types
[2] = nir_type_uint32
;
192 ld
.src_types
[3] = nir_intrinsic_type(instr
);
197 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
199 bi_instruction address
= bi_load_with_r61(BI_LOAD_VAR_ADDRESS
, instr
);
200 address
.dest
= bi_make_temp(ctx
);
201 address
.dest_type
= nir_type_uint32
;
202 address
.vector_channels
= 3;
204 unsigned nr
= nir_intrinsic_src_components(instr
, 0);
205 assert(nir_intrinsic_write_mask(instr
) == ((1 << nr
) - 1));
207 bi_instruction st
= {
208 .type
= BI_STORE_VAR
,
210 pan_src_index(&instr
->src
[0]),
211 address
.dest
, address
.dest
, address
.dest
,
215 nir_type_uint32
, nir_type_uint32
, nir_type_uint32
,
221 .vector_channels
= nr
,
224 for (unsigned i
= 0; i
< nr
; ++i
)
225 st
.swizzle
[0][i
] = i
;
227 bi_emit(ctx
, address
);
232 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
234 bi_instruction ld
= bi_load(BI_LOAD_UNIFORM
, instr
);
235 ld
.src
[1] = BIR_INDEX_ZERO
; /* TODO: UBO index */
237 /* TODO: Indirect access, since we need to multiply by the element
238 * size. I believe we can get this lowering automatically via
239 * nir_lower_io (as mul instructions) with the proper options, but this
241 assert(ld
.src
[0] & BIR_INDEX_CONSTANT
);
242 ld
.constant
.u64
+= ctx
->sysvals
.sysval_count
;
243 ld
.constant
.u64
*= 16;
249 bi_emit_sysval(bi_context
*ctx
, nir_instr
*instr
,
250 unsigned nr_components
, unsigned offset
)
254 /* Figure out which uniform this is */
255 int sysval
= panfrost_sysval_for_instr(instr
, &nir_dest
);
256 void *val
= _mesa_hash_table_u64_search(ctx
->sysvals
.sysval_to_id
, sysval
);
258 /* Sysvals are prefix uniforms */
259 unsigned uniform
= ((uintptr_t) val
) - 1;
261 /* Emit the read itself -- this is never indirect */
263 bi_instruction load
= {
264 .type
= BI_LOAD_UNIFORM
,
265 .vector_channels
= nr_components
,
266 .src
= { BIR_INDEX_CONSTANT
, BIR_INDEX_ZERO
},
267 .src_types
= { nir_type_uint32
, nir_type_uint32
},
268 .constant
= { (uniform
* 16) + offset
},
269 .dest
= pan_dest_index(&nir_dest
),
270 .dest_type
= nir_type_uint32
, /* TODO */
277 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
280 switch (instr
->intrinsic
) {
281 case nir_intrinsic_load_barycentric_pixel
:
284 case nir_intrinsic_load_interpolated_input
:
285 case nir_intrinsic_load_input
:
286 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
287 bi_emit_ld_vary(ctx
, instr
);
288 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
289 bi_emit(ctx
, bi_load_with_r61(BI_LOAD_ATTR
, instr
));
291 unreachable("Unsupported shader stage");
295 case nir_intrinsic_store_output
:
296 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
297 bi_emit_frag_out(ctx
, instr
);
298 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
299 bi_emit_st_vary(ctx
, instr
);
301 unreachable("Unsupported shader stage");
304 case nir_intrinsic_load_uniform
:
305 bi_emit_ld_uniform(ctx
, instr
);
308 case nir_intrinsic_load_ssbo_address
:
309 bi_emit_sysval(ctx
, &instr
->instr
, 1, 0);
312 case nir_intrinsic_get_buffer_size
:
313 bi_emit_sysval(ctx
, &instr
->instr
, 1, 8);
316 case nir_intrinsic_load_viewport_scale
:
317 case nir_intrinsic_load_viewport_offset
:
318 case nir_intrinsic_load_num_work_groups
:
319 case nir_intrinsic_load_sampler_lod_parameters_pan
:
320 bi_emit_sysval(ctx
, &instr
->instr
, 3, 0);
324 unreachable("Unknown intrinsic");
330 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
332 /* Make sure we've been lowered */
333 assert(instr
->def
.num_components
== 1);
335 bi_instruction move
= {
337 .dest
= pan_ssa_index(&instr
->def
),
338 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
343 instr
->def
.bit_size
| nir_type_uint
,
346 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
353 #define BI_CASE_CMP(op) \
359 bi_class_for_nir_alu(nir_op op
)
374 BI_CASE_CMP(nir_op_flt
)
375 BI_CASE_CMP(nir_op_fge
)
376 BI_CASE_CMP(nir_op_feq
)
377 BI_CASE_CMP(nir_op_fne
)
378 BI_CASE_CMP(nir_op_ilt
)
379 BI_CASE_CMP(nir_op_ige
)
380 BI_CASE_CMP(nir_op_ieq
)
381 BI_CASE_CMP(nir_op_ine
)
422 unreachable("should've been lowered");
443 case nir_op_fround_even
:
454 unreachable("Unknown ALU op");
458 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
459 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
460 * optimizations). Otherwise it will bail (when used for primary code
464 bi_cond_for_nir(nir_op op
, bool soft
)
467 BI_CASE_CMP(nir_op_flt
)
468 BI_CASE_CMP(nir_op_ilt
)
471 BI_CASE_CMP(nir_op_fge
)
472 BI_CASE_CMP(nir_op_ige
)
475 BI_CASE_CMP(nir_op_feq
)
476 BI_CASE_CMP(nir_op_ieq
)
479 BI_CASE_CMP(nir_op_fne
)
480 BI_CASE_CMP(nir_op_ine
)
484 return BI_COND_ALWAYS
;
486 unreachable("Invalid compare");
491 bi_copy_src(bi_instruction
*alu
, nir_alu_instr
*instr
, unsigned i
, unsigned to
,
492 unsigned *constants_left
, unsigned *constant_shift
, unsigned comps
)
494 unsigned bits
= nir_src_bit_size(instr
->src
[i
].src
);
495 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
497 alu
->src_types
[to
] = nir_op_infos
[instr
->op
].input_types
[i
]
500 /* Try to inline a constant */
501 if (nir_src_is_const(instr
->src
[i
].src
) && *constants_left
&& (dest_bits
== bits
)) {
502 uint64_t mask
= (1ull << dest_bits
) - 1;
503 uint64_t cons
= nir_src_as_uint(instr
->src
[i
].src
);
505 /* Try to reuse a constant */
506 for (unsigned i
= 0; i
< (*constant_shift
); i
+= dest_bits
) {
507 if (((alu
->constant
.u64
>> i
) & mask
) == cons
) {
508 alu
->src
[to
] = BIR_INDEX_CONSTANT
| i
;
513 alu
->constant
.u64
|= cons
<< *constant_shift
;
514 alu
->src
[to
] = BIR_INDEX_CONSTANT
| (*constant_shift
);
516 (*constant_shift
) += MAX2(dest_bits
, 32); /* lo/hi */
520 alu
->src
[to
] = pan_src_index(&instr
->src
[i
].src
);
522 /* Copy swizzle for all vectored components, replicating last component
523 * to fill undersized */
525 unsigned vec
= alu
->type
== BI_COMBINE
? 1 :
526 MAX2(1, 32 / dest_bits
);
528 for (unsigned j
= 0; j
< vec
; ++j
)
529 alu
->swizzle
[to
][j
] = instr
->src
[i
].swizzle
[MIN2(j
, comps
- 1)];
533 bi_fuse_csel_cond(bi_instruction
*csel
, nir_alu_src cond
,
534 unsigned *constants_left
, unsigned *constant_shift
, unsigned comps
)
536 /* Bail for vector weirdness */
537 if (cond
.swizzle
[0] != 0)
540 if (!cond
.src
.is_ssa
)
543 nir_ssa_def
*def
= cond
.src
.ssa
;
544 nir_instr
*parent
= def
->parent_instr
;
546 if (parent
->type
!= nir_instr_type_alu
)
549 nir_alu_instr
*alu
= nir_instr_as_alu(parent
);
551 /* Try to match a condition */
552 enum bi_cond bcond
= bi_cond_for_nir(alu
->op
, true);
554 if (bcond
== BI_COND_ALWAYS
)
557 /* We found one, let's fuse it in */
559 bi_copy_src(csel
, alu
, 0, 0, constants_left
, constant_shift
, comps
);
560 bi_copy_src(csel
, alu
, 1, 1, constants_left
, constant_shift
, comps
);
564 emit_alu(bi_context
*ctx
, nir_alu_instr
*instr
)
566 /* Try some special functions */
569 bi_emit_fexp2(ctx
, instr
);
572 bi_emit_flog2(ctx
, instr
);
578 /* Otherwise, assume it's something we can handle normally */
579 bi_instruction alu
= {
580 .type
= bi_class_for_nir_alu(instr
->op
),
581 .dest
= pan_dest_index(&instr
->dest
.dest
),
582 .dest_type
= nir_op_infos
[instr
->op
].output_type
583 | nir_dest_bit_size(instr
->dest
.dest
),
586 /* TODO: Implement lowering of special functions for older Bifrost */
587 assert((alu
.type
!= BI_SPECIAL
) || !(ctx
->quirks
& BIFROST_NO_FAST_OP
));
589 unsigned comps
= nir_dest_num_components(instr
->dest
.dest
);
591 if (alu
.type
!= BI_COMBINE
)
592 assert(comps
<= MAX2(1, 32 / comps
));
594 if (!instr
->dest
.dest
.is_ssa
) {
595 for (unsigned i
= 0; i
< comps
; ++i
)
596 assert(instr
->dest
.write_mask
);
599 /* We inline constants as we go. This tracks how many constants have
600 * been inlined, since we're limited to 64-bits of constants per
603 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
604 unsigned constants_left
= (64 / dest_bits
);
605 unsigned constant_shift
= 0;
607 if (alu
.type
== BI_COMBINE
)
612 unsigned num_inputs
= nir_op_infos
[instr
->op
].num_inputs
;
613 assert(num_inputs
<= ARRAY_SIZE(alu
.src
));
615 for (unsigned i
= 0; i
< num_inputs
; ++i
) {
618 if (i
&& alu
.type
== BI_CSEL
)
621 bi_copy_src(&alu
, instr
, i
, i
+ f
, &constants_left
, &constant_shift
, comps
);
624 /* Op-specific fixup */
627 alu
.src
[2] = BIR_INDEX_ZERO
; /* FMA */
628 alu
.src_types
[2] = alu
.src_types
[1];
631 alu
.outmod
= BIFROST_SAT
; /* FMOV */
634 alu
.src_neg
[0] = true; /* FMOV */
637 alu
.src_abs
[0] = true; /* FMOV */
640 alu
.src_neg
[1] = true; /* FADD */
645 alu
.op
.minmax
= BI_MINMAX_MAX
; /* MINMAX */
648 alu
.op
.special
= BI_SPECIAL_FRCP
;
651 alu
.op
.special
= BI_SPECIAL_FRSQ
;
653 BI_CASE_CMP(nir_op_flt
)
654 BI_CASE_CMP(nir_op_ilt
)
655 BI_CASE_CMP(nir_op_fge
)
656 BI_CASE_CMP(nir_op_ige
)
657 BI_CASE_CMP(nir_op_feq
)
658 BI_CASE_CMP(nir_op_ieq
)
659 BI_CASE_CMP(nir_op_fne
)
660 BI_CASE_CMP(nir_op_ine
)
661 alu
.cond
= bi_cond_for_nir(instr
->op
, false);
663 case nir_op_fround_even
:
664 alu
.roundmode
= BIFROST_RTE
;
667 alu
.roundmode
= BIFROST_RTP
;
670 alu
.roundmode
= BIFROST_RTN
;
673 alu
.roundmode
= BIFROST_RTZ
;
676 alu
.op
.bitwise
= BI_BITWISE_AND
;
679 alu
.op
.bitwise
= BI_BITWISE_OR
;
682 alu
.op
.bitwise
= BI_BITWISE_XOR
;
688 if (alu
.type
== BI_CSEL
) {
689 /* Default to csel3 */
690 alu
.cond
= BI_COND_NE
;
691 alu
.src
[1] = BIR_INDEX_ZERO
;
692 alu
.src_types
[1] = alu
.src_types
[0];
694 bi_fuse_csel_cond(&alu
, instr
->src
[0],
695 &constants_left
, &constant_shift
, comps
);
696 } else if (alu
.type
== BI_BITWISE
) {
697 /* Implicit shift argument... at some point we should fold */
698 alu
.src
[2] = BIR_INDEX_ZERO
;
699 alu
.src_types
[2] = alu
.src_types
[1];
705 /* TEX_COMPACT instructions assume normal 2D f32 operation but are more
706 * space-efficient and with simpler RA/scheduling requirements*/
709 emit_tex_compact(bi_context
*ctx
, nir_tex_instr
*instr
)
711 bi_instruction tex
= {
713 .op
= { .texture
= BI_TEX_COMPACT
},
715 .texture_index
= instr
->texture_index
,
716 .sampler_index
= instr
->sampler_index
,
718 .dest
= pan_dest_index(&instr
->dest
),
719 .dest_type
= instr
->dest_type
,
720 .src_types
= { nir_type_float32
, nir_type_float32
},
724 for (unsigned i
= 0; i
< instr
->num_srcs
; ++i
) {
725 int index
= pan_src_index(&instr
->src
[i
].src
);
726 assert (instr
->src
[i
].src_type
== nir_tex_src_coord
);
730 tex
.swizzle
[0][0] = 0;
731 tex
.swizzle
[1][0] = 1;
738 emit_tex_full(bi_context
*ctx
, nir_tex_instr
*instr
)
744 emit_tex(bi_context
*ctx
, nir_tex_instr
*instr
)
746 nir_alu_type base
= nir_alu_type_get_base_type(instr
->dest_type
);
747 unsigned sz
= nir_dest_bit_size(instr
->dest
);
748 instr
->dest_type
= base
| sz
;
750 bool is_normal
= instr
->op
== nir_texop_tex
;
751 bool is_2d
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
752 instr
->sampler_dim
== GLSL_SAMPLER_DIM_EXTERNAL
;
753 bool is_f
= base
== nir_type_float
&& (sz
== 16 || sz
== 32);
755 bool is_compact
= is_normal
&& is_2d
&& is_f
&& !instr
->is_shadow
;
758 emit_tex_compact(ctx
, instr
);
760 emit_tex_full(ctx
, instr
);
764 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
766 switch (instr
->type
) {
767 case nir_instr_type_load_const
:
768 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
771 case nir_instr_type_intrinsic
:
772 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
775 case nir_instr_type_alu
:
776 emit_alu(ctx
, nir_instr_as_alu(instr
));
779 case nir_instr_type_tex
:
780 emit_tex(ctx
, nir_instr_as_tex(instr
));
783 case nir_instr_type_jump
:
784 emit_jump(ctx
, nir_instr_as_jump(instr
));
787 case nir_instr_type_ssa_undef
:
792 unreachable("Unhandled instruction type");
800 create_empty_block(bi_context
*ctx
)
802 bi_block
*blk
= rzalloc(ctx
, bi_block
);
804 blk
->base
.predecessors
= _mesa_set_create(blk
,
806 _mesa_key_pointer_equal
);
808 blk
->base
.name
= ctx
->block_name_count
++;
814 bi_schedule_barrier(bi_context
*ctx
)
816 bi_block
*temp
= ctx
->after_block
;
817 ctx
->after_block
= create_empty_block(ctx
);
818 list_addtail(&ctx
->after_block
->base
.link
, &ctx
->blocks
);
819 list_inithead(&ctx
->after_block
->base
.instructions
);
820 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->after_block
->base
);
821 ctx
->current_block
= ctx
->after_block
;
822 ctx
->after_block
= temp
;
826 emit_block(bi_context
*ctx
, nir_block
*block
)
828 if (ctx
->after_block
) {
829 ctx
->current_block
= ctx
->after_block
;
830 ctx
->after_block
= NULL
;
832 ctx
->current_block
= create_empty_block(ctx
);
835 list_addtail(&ctx
->current_block
->base
.link
, &ctx
->blocks
);
836 list_inithead(&ctx
->current_block
->base
.instructions
);
838 nir_foreach_instr(instr
, block
) {
839 emit_instr(ctx
, instr
);
840 ++ctx
->instruction_count
;
843 return ctx
->current_block
;
846 /* Emits an unconditional branch to the end of the current block, returning a
847 * pointer so the user can fill in details */
849 static bi_instruction
*
850 bi_emit_branch(bi_context
*ctx
)
852 bi_instruction branch
= {
855 .cond
= BI_COND_ALWAYS
859 return bi_emit(ctx
, branch
);
862 /* Sets a condition for a branch by examing the NIR condition. If we're
863 * familiar with the condition, we unwrap it to fold it into the branch
864 * instruction. Otherwise, we consume the condition directly. We
865 * generally use 1-bit booleans which allows us to use small types for
870 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
872 /* TODO: Try to unwrap instead of always bailing */
873 branch
->src
[0] = pan_src_index(cond
);
874 branch
->src
[1] = BIR_INDEX_ZERO
;
875 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
876 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
880 emit_if(bi_context
*ctx
, nir_if
*nif
)
882 bi_block
*before_block
= ctx
->current_block
;
884 /* Speculatively emit the branch, but we can't fill it in until later */
885 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
886 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
888 /* Emit the two subblocks. */
889 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
890 bi_block
*end_then_block
= ctx
->current_block
;
892 /* Emit a jump from the end of the then block to the end of the else */
893 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
895 /* Emit second block, and check if it's empty */
897 int count_in
= ctx
->instruction_count
;
898 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
899 bi_block
*end_else_block
= ctx
->current_block
;
900 ctx
->after_block
= create_empty_block(ctx
);
902 /* Now that we have the subblocks emitted, fix up the branches */
907 if (ctx
->instruction_count
== count_in
) {
908 /* The else block is empty, so don't emit an exit jump */
909 bi_remove_instruction(then_exit
);
910 then_branch
->branch
.target
= ctx
->after_block
;
912 then_branch
->branch
.target
= else_block
;
913 then_exit
->branch
.target
= ctx
->after_block
;
914 pan_block_add_successor(&end_then_block
->base
, &then_exit
->branch
.target
->base
);
917 /* Wire up the successors */
919 pan_block_add_successor(&before_block
->base
, &then_branch
->branch
.target
->base
); /* then_branch */
921 pan_block_add_successor(&before_block
->base
, &then_block
->base
); /* fallthrough */
922 pan_block_add_successor(&end_else_block
->base
, &ctx
->after_block
->base
); /* fallthrough */
926 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
928 /* Remember where we are */
929 bi_block
*start_block
= ctx
->current_block
;
931 bi_block
*saved_break
= ctx
->break_block
;
932 bi_block
*saved_continue
= ctx
->continue_block
;
934 ctx
->continue_block
= create_empty_block(ctx
);
935 ctx
->break_block
= create_empty_block(ctx
);
936 ctx
->after_block
= ctx
->continue_block
;
938 /* Emit the body itself */
939 emit_cf_list(ctx
, &nloop
->body
);
941 /* Branch back to loop back */
942 bi_instruction
*br_back
= bi_emit_branch(ctx
);
943 br_back
->branch
.target
= ctx
->continue_block
;
944 pan_block_add_successor(&start_block
->base
, &ctx
->continue_block
->base
);
945 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->continue_block
->base
);
947 ctx
->after_block
= ctx
->break_block
;
950 ctx
->break_block
= saved_break
;
951 ctx
->continue_block
= saved_continue
;
956 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
958 bi_block
*start_block
= NULL
;
960 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
961 switch (node
->type
) {
962 case nir_cf_node_block
: {
963 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
972 emit_if(ctx
, nir_cf_node_as_if(node
));
975 case nir_cf_node_loop
:
976 emit_loop(ctx
, nir_cf_node_as_loop(node
));
980 unreachable("Unknown control flow");
988 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
990 return glsl_count_attribute_slots(type
, false);
994 bi_optimize_nir(nir_shader
*nir
)
997 unsigned lower_flrp
= 16 | 32 | 64;
999 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
1000 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
1002 nir_lower_tex_options lower_tex_options
= {
1003 .lower_txs_lod
= true,
1005 .lower_tex_without_implicit_lod
= true,
1009 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
1010 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1011 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
1016 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
1017 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
1019 NIR_PASS(progress
, nir
, nir_copy_prop
);
1020 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
1021 NIR_PASS(progress
, nir
, nir_opt_dce
);
1022 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
1023 NIR_PASS(progress
, nir
, nir_opt_cse
);
1024 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
1025 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
1026 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
1028 if (lower_flrp
!= 0) {
1029 bool lower_flrp_progress
= false;
1030 NIR_PASS(lower_flrp_progress
,
1034 false /* always_precise */,
1035 nir
->options
->lower_ffma
);
1036 if (lower_flrp_progress
) {
1037 NIR_PASS(progress
, nir
,
1038 nir_opt_constant_folding
);
1042 /* Nothing should rematerialize any flrps, so we only
1043 * need to do this lowering once.
1048 NIR_PASS(progress
, nir
, nir_opt_undef
);
1049 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
1051 nir_var_shader_out
|
1052 nir_var_function_temp
);
1055 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
1056 NIR_PASS(progress
, nir
, nir_lower_bool_to_int32
);
1057 NIR_PASS(progress
, nir
, bifrost_nir_lower_algebraic_late
);
1058 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1059 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
1061 /* Take us out of SSA */
1062 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
1063 NIR_PASS(progress
, nir
, nir_move_vec_src_uses_to_dest
);
1064 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
1068 bifrost_compile_shader_nir(nir_shader
*nir
, panfrost_program
*program
, unsigned product_id
)
1070 bifrost_debug
= debug_get_option_bifrost_debug();
1072 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
1074 ctx
->stage
= nir
->info
.stage
;
1075 ctx
->quirks
= bifrost_get_quirks(product_id
);
1076 list_inithead(&ctx
->blocks
);
1078 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
1079 * (so we don't accidentally duplicate the epilogue since mesa/st has
1080 * messed with our I/O quite a bit already) */
1082 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1084 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
1085 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
1086 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
1089 NIR_PASS_V(nir
, nir_split_var_copies
);
1090 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
1091 NIR_PASS_V(nir
, nir_lower_var_copies
);
1092 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1093 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
1094 NIR_PASS_V(nir
, nir_lower_ssbo
);
1095 NIR_PASS_V(nir
, nir_lower_mediump_outputs
);
1097 bi_optimize_nir(nir
);
1099 if (bifrost_debug
& BIFROST_DBG_SHADERS
) {
1100 nir_print_shader(nir
, stdout
);
1103 panfrost_nir_assign_sysvals(&ctx
->sysvals
, nir
);
1104 program
->sysval_count
= ctx
->sysvals
.sysval_count
;
1105 memcpy(program
->sysvals
, ctx
->sysvals
.sysvals
, sizeof(ctx
->sysvals
.sysvals
[0]) * ctx
->sysvals
.sysval_count
);
1106 ctx
->blend_types
= program
->blend_types
;
1108 nir_foreach_function(func
, nir
) {
1112 ctx
->impl
= func
->impl
;
1113 emit_cf_list(ctx
, &func
->impl
->body
);
1114 break; /* TODO: Multi-function shaders */
1117 bi_foreach_block(ctx
, _block
) {
1118 bi_block
*block
= (bi_block
*) _block
;
1119 bi_lower_combine(ctx
, block
);
1122 bool progress
= false;
1127 bi_foreach_block(ctx
, _block
) {
1128 bi_block
*block
= (bi_block
*) _block
;
1129 progress
|= bi_opt_dead_code_eliminate(ctx
, block
);
1133 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1134 bi_print_shader(ctx
, stdout
);
1136 bi_register_allocate(ctx
);
1137 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1138 bi_print_shader(ctx
, stdout
);
1139 bi_pack(ctx
, &program
->compiled
);
1141 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1142 disassemble_bifrost(stdout
, program
->compiled
.data
, program
->compiled
.size
, true);