2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "compiler/nir/nir_builder.h"
32 #include "disassemble.h"
33 #include "bifrost_compile.h"
34 #include "bifrost_nir.h"
36 #include "bi_quirks.h"
39 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
40 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
41 static void bi_schedule_barrier(bi_context
*ctx
);
44 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
46 bi_instruction
*branch
= bi_emit_branch(ctx
);
48 switch (instr
->type
) {
50 branch
->branch
.target
= ctx
->break_block
;
52 case nir_jump_continue
:
53 branch
->branch
.target
= ctx
->continue_block
;
56 unreachable("Unhandled jump type");
59 pan_block_add_successor(&ctx
->current_block
->base
, &branch
->branch
.target
->base
);
63 bi_load(enum bi_class T
, nir_intrinsic_instr
*instr
)
65 bi_instruction load
= {
67 .vector_channels
= instr
->num_components
,
68 .src
= { BIR_INDEX_CONSTANT
},
69 .src_types
= { nir_type_uint32
},
70 .constant
= { .u64
= nir_intrinsic_base(instr
) },
73 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
76 load
.dest
= bir_dest_index(&instr
->dest
);
78 if (info
->has_dest
&& info
->index_map
[NIR_INTRINSIC_TYPE
] > 0)
79 load
.dest_type
= nir_intrinsic_type(instr
);
81 nir_src
*offset
= nir_get_io_offset_src(instr
);
83 if (nir_src_is_const(*offset
))
84 load
.constant
.u64
+= nir_src_as_uint(*offset
);
86 load
.src
[0] = bir_src_index(offset
);
92 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
94 bi_instruction ins
= bi_load(BI_LOAD_VAR
, instr
);
95 ins
.load_vary
.interp_mode
= BIFROST_INTERP_DEFAULT
; /* TODO */
96 ins
.load_vary
.reuse
= false; /* TODO */
97 ins
.load_vary
.flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
;
98 ins
.dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
);
100 if (nir_src_is_const(*nir_get_io_offset_src(instr
))) {
101 /* Zero it out for direct */
102 ins
.src
[1] = BIR_INDEX_ZERO
;
104 /* R61 contains sample mask stuff, TODO RA XXX */
105 ins
.src
[1] = BIR_INDEX_REGISTER
| 61;
112 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
114 if (!ctx
->emitted_atest
) {
115 bi_instruction ins
= {
118 BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
119 bir_src_index(&instr
->src
[0])
123 nir_intrinsic_type(instr
)
127 { 3, 0 } /* swizzle out the alpha */
129 .dest
= BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
130 .dest_type
= nir_type_uint32
,
134 bi_schedule_barrier(ctx
);
135 ctx
->emitted_atest
= true;
138 bi_instruction blend
= {
140 .blend_location
= nir_intrinsic_base(instr
),
142 bir_src_index(&instr
->src
[0]),
143 BIR_INDEX_REGISTER
| 60 /* Can this be arbitrary? */,
146 nir_intrinsic_type(instr
),
153 .dest
= BIR_INDEX_REGISTER
| 48 /* Looks like magic */,
154 .dest_type
= nir_type_uint32
,
158 assert(blend
.blend_location
< 8);
159 assert(ctx
->blend_types
);
160 ctx
->blend_types
[blend
.blend_location
] = blend
.src_types
[0];
163 bi_schedule_barrier(ctx
);
166 static bi_instruction
167 bi_load_with_r61(enum bi_class T
, nir_intrinsic_instr
*instr
)
169 bi_instruction ld
= bi_load(T
, instr
);
170 ld
.src
[1] = BIR_INDEX_REGISTER
| 61; /* TODO: RA */
171 ld
.src
[2] = BIR_INDEX_REGISTER
| 62;
173 ld
.src_types
[1] = nir_type_uint32
;
174 ld
.src_types
[2] = nir_type_uint32
;
175 ld
.src_types
[3] = nir_intrinsic_type(instr
);
180 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
182 bi_instruction address
= bi_load_with_r61(BI_LOAD_VAR_ADDRESS
, instr
);
183 address
.dest
= bi_make_temp(ctx
);
184 address
.dest_type
= nir_type_uint32
;
185 address
.vector_channels
= 3;
187 unsigned nr
= nir_intrinsic_src_components(instr
, 0);
188 assert(nir_intrinsic_write_mask(instr
) == ((1 << nr
) - 1));
190 bi_instruction st
= {
191 .type
= BI_STORE_VAR
,
193 bir_src_index(&instr
->src
[0]),
194 address
.dest
, address
.dest
, address
.dest
,
198 nir_type_uint32
, nir_type_uint32
, nir_type_uint32
,
204 .vector_channels
= nr
,
207 for (unsigned i
= 0; i
< nr
; ++i
)
208 st
.swizzle
[0][i
] = i
;
210 bi_emit(ctx
, address
);
215 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
217 bi_instruction ld
= bi_load(BI_LOAD_UNIFORM
, instr
);
218 ld
.src
[1] = BIR_INDEX_ZERO
; /* TODO: UBO index */
220 /* TODO: Indirect access, since we need to multiply by the element
221 * size. I believe we can get this lowering automatically via
222 * nir_lower_io (as mul instructions) with the proper options, but this
224 assert(ld
.src
[0] & BIR_INDEX_CONSTANT
);
225 ld
.constant
.u64
+= ctx
->sysvals
.sysval_count
;
226 ld
.constant
.u64
*= 16;
232 bi_emit_sysval(bi_context
*ctx
, nir_instr
*instr
,
233 unsigned nr_components
, unsigned offset
)
237 /* Figure out which uniform this is */
238 int sysval
= panfrost_sysval_for_instr(instr
, &nir_dest
);
239 void *val
= _mesa_hash_table_u64_search(ctx
->sysvals
.sysval_to_id
, sysval
);
241 /* Sysvals are prefix uniforms */
242 unsigned uniform
= ((uintptr_t) val
) - 1;
244 /* Emit the read itself -- this is never indirect */
246 bi_instruction load
= {
247 .type
= BI_LOAD_UNIFORM
,
248 .vector_channels
= nr_components
,
249 .src
= { BIR_INDEX_CONSTANT
, BIR_INDEX_ZERO
},
250 .src_types
= { nir_type_uint32
, nir_type_uint32
},
251 .constant
= { (uniform
* 16) + offset
},
252 .dest
= bir_dest_index(&nir_dest
),
253 .dest_type
= nir_type_uint32
, /* TODO */
260 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
263 switch (instr
->intrinsic
) {
264 case nir_intrinsic_load_barycentric_pixel
:
267 case nir_intrinsic_load_interpolated_input
:
268 case nir_intrinsic_load_input
:
269 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
270 bi_emit_ld_vary(ctx
, instr
);
271 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
272 bi_emit(ctx
, bi_load_with_r61(BI_LOAD_ATTR
, instr
));
274 unreachable("Unsupported shader stage");
278 case nir_intrinsic_store_output
:
279 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
280 bi_emit_frag_out(ctx
, instr
);
281 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
282 bi_emit_st_vary(ctx
, instr
);
284 unreachable("Unsupported shader stage");
287 case nir_intrinsic_load_uniform
:
288 bi_emit_ld_uniform(ctx
, instr
);
291 case nir_intrinsic_load_ssbo_address
:
292 bi_emit_sysval(ctx
, &instr
->instr
, 1, 0);
295 case nir_intrinsic_get_buffer_size
:
296 bi_emit_sysval(ctx
, &instr
->instr
, 1, 8);
299 case nir_intrinsic_load_viewport_scale
:
300 case nir_intrinsic_load_viewport_offset
:
301 case nir_intrinsic_load_num_work_groups
:
302 case nir_intrinsic_load_sampler_lod_parameters_pan
:
303 bi_emit_sysval(ctx
, &instr
->instr
, 3, 0);
313 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
315 /* Make sure we've been lowered */
316 assert(instr
->def
.num_components
== 1);
318 bi_instruction move
= {
320 .dest
= bir_ssa_index(&instr
->def
),
321 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
326 instr
->def
.bit_size
| nir_type_uint
,
329 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
336 #define BI_CASE_CMP(op) \
342 bi_class_for_nir_alu(nir_op op
)
352 BI_CASE_CMP(nir_op_flt
)
353 BI_CASE_CMP(nir_op_fge
)
354 BI_CASE_CMP(nir_op_feq
)
355 BI_CASE_CMP(nir_op_fne
)
356 BI_CASE_CMP(nir_op_ilt
)
357 BI_CASE_CMP(nir_op_ige
)
358 BI_CASE_CMP(nir_op_ieq
)
359 BI_CASE_CMP(nir_op_ine
)
400 unreachable("should've been lowered");
421 case nir_op_fround_even
:
432 unreachable("Unknown ALU op");
436 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
437 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
438 * optimizations). Otherwise it will bail (when used for primary code
442 bi_cond_for_nir(nir_op op
, bool soft
)
445 BI_CASE_CMP(nir_op_flt
)
446 BI_CASE_CMP(nir_op_ilt
)
449 BI_CASE_CMP(nir_op_fge
)
450 BI_CASE_CMP(nir_op_ige
)
453 BI_CASE_CMP(nir_op_feq
)
454 BI_CASE_CMP(nir_op_ieq
)
457 BI_CASE_CMP(nir_op_fne
)
458 BI_CASE_CMP(nir_op_ine
)
462 return BI_COND_ALWAYS
;
464 unreachable("Invalid compare");
469 bi_copy_src(bi_instruction
*alu
, nir_alu_instr
*instr
, unsigned i
, unsigned to
,
470 unsigned *constants_left
, unsigned *constant_shift
, unsigned comps
)
472 unsigned bits
= nir_src_bit_size(instr
->src
[i
].src
);
473 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
475 alu
->src_types
[to
] = nir_op_infos
[instr
->op
].input_types
[i
]
478 /* Try to inline a constant */
479 if (nir_src_is_const(instr
->src
[i
].src
) && *constants_left
&& (dest_bits
== bits
)) {
480 uint64_t mask
= (1ull << dest_bits
) - 1;
481 uint64_t cons
= nir_src_as_uint(instr
->src
[i
].src
);
483 /* Try to reuse a constant */
484 for (unsigned i
= 0; i
< (*constant_shift
); i
+= dest_bits
) {
485 if (((alu
->constant
.u64
>> i
) & mask
) == cons
) {
486 alu
->src
[to
] = BIR_INDEX_CONSTANT
| i
;
491 alu
->constant
.u64
|= cons
<< *constant_shift
;
492 alu
->src
[to
] = BIR_INDEX_CONSTANT
| (*constant_shift
);
494 (*constant_shift
) += MAX2(dest_bits
, 32); /* lo/hi */
498 alu
->src
[to
] = bir_src_index(&instr
->src
[i
].src
);
500 /* Copy swizzle for all vectored components, replicating last component
501 * to fill undersized */
503 unsigned vec
= alu
->type
== BI_COMBINE
? 1 :
504 MAX2(1, 32 / dest_bits
);
506 for (unsigned j
= 0; j
< vec
; ++j
)
507 alu
->swizzle
[to
][j
] = instr
->src
[i
].swizzle
[MIN2(j
, comps
- 1)];
511 bi_fuse_csel_cond(bi_instruction
*csel
, nir_alu_src cond
,
512 unsigned *constants_left
, unsigned *constant_shift
, unsigned comps
)
514 /* Bail for vector weirdness */
515 if (cond
.swizzle
[0] != 0)
518 if (!cond
.src
.is_ssa
)
521 nir_ssa_def
*def
= cond
.src
.ssa
;
522 nir_instr
*parent
= def
->parent_instr
;
524 if (parent
->type
!= nir_instr_type_alu
)
527 nir_alu_instr
*alu
= nir_instr_as_alu(parent
);
529 /* Try to match a condition */
530 enum bi_cond bcond
= bi_cond_for_nir(alu
->op
, true);
532 if (bcond
== BI_COND_ALWAYS
)
535 /* We found one, let's fuse it in */
536 csel
->csel_cond
= bcond
;
537 bi_copy_src(csel
, alu
, 0, 0, constants_left
, constant_shift
, comps
);
538 bi_copy_src(csel
, alu
, 1, 1, constants_left
, constant_shift
, comps
);
542 emit_alu(bi_context
*ctx
, nir_alu_instr
*instr
)
544 /* Try some special functions */
547 bi_emit_fexp2(ctx
, instr
);
550 bi_emit_flog2(ctx
, instr
);
556 /* Otherwise, assume it's something we can handle normally */
557 bi_instruction alu
= {
558 .type
= bi_class_for_nir_alu(instr
->op
),
559 .dest
= bir_dest_index(&instr
->dest
.dest
),
560 .dest_type
= nir_op_infos
[instr
->op
].output_type
561 | nir_dest_bit_size(instr
->dest
.dest
),
564 /* TODO: Implement lowering of special functions for older Bifrost */
565 assert((alu
.type
!= BI_SPECIAL
) || !(ctx
->quirks
& BIFROST_NO_FAST_OP
));
567 unsigned comps
= nir_dest_num_components(instr
->dest
.dest
);
569 if (alu
.type
!= BI_COMBINE
)
570 assert(comps
<= MAX2(1, 32 / comps
));
572 if (!instr
->dest
.dest
.is_ssa
) {
573 for (unsigned i
= 0; i
< comps
; ++i
)
574 assert(instr
->dest
.write_mask
);
577 /* We inline constants as we go. This tracks how many constants have
578 * been inlined, since we're limited to 64-bits of constants per
581 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
582 unsigned constants_left
= (64 / dest_bits
);
583 unsigned constant_shift
= 0;
585 if (alu
.type
== BI_COMBINE
)
590 unsigned num_inputs
= nir_op_infos
[instr
->op
].num_inputs
;
591 assert(num_inputs
<= ARRAY_SIZE(alu
.src
));
593 for (unsigned i
= 0; i
< num_inputs
; ++i
) {
596 if (i
&& alu
.type
== BI_CSEL
)
599 bi_copy_src(&alu
, instr
, i
, i
+ f
, &constants_left
, &constant_shift
, comps
);
602 /* Op-specific fixup */
605 alu
.src
[2] = BIR_INDEX_ZERO
; /* FMA */
606 alu
.src_types
[2] = alu
.src_types
[1];
609 alu
.outmod
= BIFROST_SAT
; /* FMOV */
612 alu
.src_neg
[0] = true; /* FMOV */
615 alu
.src_abs
[0] = true; /* FMOV */
618 alu
.src_neg
[1] = true; /* FADD */
623 alu
.op
.minmax
= BI_MINMAX_MAX
; /* MINMAX */
626 alu
.op
.special
= BI_SPECIAL_FRCP
;
629 alu
.op
.special
= BI_SPECIAL_FRSQ
;
631 BI_CASE_CMP(nir_op_flt
)
632 BI_CASE_CMP(nir_op_ilt
)
633 BI_CASE_CMP(nir_op_fge
)
634 BI_CASE_CMP(nir_op_ige
)
635 BI_CASE_CMP(nir_op_feq
)
636 BI_CASE_CMP(nir_op_ieq
)
637 BI_CASE_CMP(nir_op_fne
)
638 BI_CASE_CMP(nir_op_ine
)
639 alu
.op
.compare
= bi_cond_for_nir(instr
->op
, false);
641 case nir_op_fround_even
:
642 alu
.op
.round
= BI_ROUND_MODE
;
643 alu
.roundmode
= BIFROST_RTE
;
646 alu
.op
.round
= BI_ROUND_MODE
;
647 alu
.roundmode
= BIFROST_RTP
;
650 alu
.op
.round
= BI_ROUND_MODE
;
651 alu
.roundmode
= BIFROST_RTN
;
654 alu
.op
.round
= BI_ROUND_MODE
;
655 alu
.roundmode
= BIFROST_RTZ
;
661 if (alu
.type
== BI_CSEL
) {
662 /* Default to csel3 */
663 alu
.csel_cond
= BI_COND_NE
;
664 alu
.src
[1] = BIR_INDEX_ZERO
;
665 alu
.src_types
[1] = alu
.src_types
[0];
667 bi_fuse_csel_cond(&alu
, instr
->src
[0],
668 &constants_left
, &constant_shift
, comps
);
674 /* TEX_COMPACT instructions assume normal 2D f32 operation but are more
675 * space-efficient and with simpler RA/scheduling requirements*/
678 emit_tex_compact(bi_context
*ctx
, nir_tex_instr
*instr
)
680 /* TODO: Pipe through indices */
681 assert(instr
->texture_index
== 0);
682 assert(instr
->sampler_index
== 0);
684 bi_instruction tex
= {
686 .op
= { .texture
= BI_TEX_COMPACT
},
687 .dest
= bir_dest_index(&instr
->dest
),
688 .dest_type
= instr
->dest_type
,
689 .src_types
= { nir_type_float32
, nir_type_float32
},
693 for (unsigned i
= 0; i
< instr
->num_srcs
; ++i
) {
694 int index
= bir_src_index(&instr
->src
[i
].src
);
695 assert (instr
->src
[i
].src_type
== nir_tex_src_coord
);
699 tex
.swizzle
[0][0] = 0;
700 tex
.swizzle
[1][0] = 1;
707 emit_tex_full(bi_context
*ctx
, nir_tex_instr
*instr
)
713 emit_tex(bi_context
*ctx
, nir_tex_instr
*instr
)
715 nir_alu_type base
= nir_alu_type_get_base_type(instr
->dest_type
);
716 unsigned sz
= nir_dest_bit_size(instr
->dest
);
717 instr
->dest_type
= base
| sz
;
719 bool is_normal
= instr
->op
== nir_texop_tex
;
720 bool is_2d
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
721 instr
->sampler_dim
== GLSL_SAMPLER_DIM_EXTERNAL
;
722 bool is_f
= base
== nir_type_float
&& (sz
== 16 || sz
== 32);
724 bool is_compact
= is_normal
&& is_2d
&& is_f
&& !instr
->is_shadow
;
727 emit_tex_compact(ctx
, instr
);
729 emit_tex_full(ctx
, instr
);
733 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
735 switch (instr
->type
) {
736 case nir_instr_type_load_const
:
737 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
740 case nir_instr_type_intrinsic
:
741 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
744 case nir_instr_type_alu
:
745 emit_alu(ctx
, nir_instr_as_alu(instr
));
748 case nir_instr_type_tex
:
749 emit_tex(ctx
, nir_instr_as_tex(instr
));
752 case nir_instr_type_jump
:
753 emit_jump(ctx
, nir_instr_as_jump(instr
));
756 case nir_instr_type_ssa_undef
:
761 unreachable("Unhandled instruction type");
769 create_empty_block(bi_context
*ctx
)
771 bi_block
*blk
= rzalloc(ctx
, bi_block
);
773 blk
->base
.predecessors
= _mesa_set_create(blk
,
775 _mesa_key_pointer_equal
);
777 blk
->base
.name
= ctx
->block_name_count
++;
783 bi_schedule_barrier(bi_context
*ctx
)
785 bi_block
*temp
= ctx
->after_block
;
786 ctx
->after_block
= create_empty_block(ctx
);
787 list_addtail(&ctx
->after_block
->base
.link
, &ctx
->blocks
);
788 list_inithead(&ctx
->after_block
->base
.instructions
);
789 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->after_block
->base
);
790 ctx
->current_block
= ctx
->after_block
;
791 ctx
->after_block
= temp
;
795 emit_block(bi_context
*ctx
, nir_block
*block
)
797 if (ctx
->after_block
) {
798 ctx
->current_block
= ctx
->after_block
;
799 ctx
->after_block
= NULL
;
801 ctx
->current_block
= create_empty_block(ctx
);
804 list_addtail(&ctx
->current_block
->base
.link
, &ctx
->blocks
);
805 list_inithead(&ctx
->current_block
->base
.instructions
);
807 nir_foreach_instr(instr
, block
) {
808 emit_instr(ctx
, instr
);
809 ++ctx
->instruction_count
;
812 return ctx
->current_block
;
815 /* Emits an unconditional branch to the end of the current block, returning a
816 * pointer so the user can fill in details */
818 static bi_instruction
*
819 bi_emit_branch(bi_context
*ctx
)
821 bi_instruction branch
= {
824 .cond
= BI_COND_ALWAYS
828 return bi_emit(ctx
, branch
);
831 /* Sets a condition for a branch by examing the NIR condition. If we're
832 * familiar with the condition, we unwrap it to fold it into the branch
833 * instruction. Otherwise, we consume the condition directly. We
834 * generally use 1-bit booleans which allows us to use small types for
839 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
841 /* TODO: Try to unwrap instead of always bailing */
842 branch
->src
[0] = bir_src_index(cond
);
843 branch
->src
[1] = BIR_INDEX_ZERO
;
844 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
845 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
849 emit_if(bi_context
*ctx
, nir_if
*nif
)
851 bi_block
*before_block
= ctx
->current_block
;
853 /* Speculatively emit the branch, but we can't fill it in until later */
854 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
855 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
857 /* Emit the two subblocks. */
858 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
859 bi_block
*end_then_block
= ctx
->current_block
;
861 /* Emit a jump from the end of the then block to the end of the else */
862 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
864 /* Emit second block, and check if it's empty */
866 int count_in
= ctx
->instruction_count
;
867 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
868 bi_block
*end_else_block
= ctx
->current_block
;
869 ctx
->after_block
= create_empty_block(ctx
);
871 /* Now that we have the subblocks emitted, fix up the branches */
876 if (ctx
->instruction_count
== count_in
) {
877 /* The else block is empty, so don't emit an exit jump */
878 bi_remove_instruction(then_exit
);
879 then_branch
->branch
.target
= ctx
->after_block
;
881 then_branch
->branch
.target
= else_block
;
882 then_exit
->branch
.target
= ctx
->after_block
;
883 pan_block_add_successor(&end_then_block
->base
, &then_exit
->branch
.target
->base
);
886 /* Wire up the successors */
888 pan_block_add_successor(&before_block
->base
, &then_branch
->branch
.target
->base
); /* then_branch */
890 pan_block_add_successor(&before_block
->base
, &then_block
->base
); /* fallthrough */
891 pan_block_add_successor(&end_else_block
->base
, &ctx
->after_block
->base
); /* fallthrough */
895 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
897 /* Remember where we are */
898 bi_block
*start_block
= ctx
->current_block
;
900 bi_block
*saved_break
= ctx
->break_block
;
901 bi_block
*saved_continue
= ctx
->continue_block
;
903 ctx
->continue_block
= create_empty_block(ctx
);
904 ctx
->break_block
= create_empty_block(ctx
);
905 ctx
->after_block
= ctx
->continue_block
;
907 /* Emit the body itself */
908 emit_cf_list(ctx
, &nloop
->body
);
910 /* Branch back to loop back */
911 bi_instruction
*br_back
= bi_emit_branch(ctx
);
912 br_back
->branch
.target
= ctx
->continue_block
;
913 pan_block_add_successor(&start_block
->base
, &ctx
->continue_block
->base
);
914 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->continue_block
->base
);
916 ctx
->after_block
= ctx
->break_block
;
919 ctx
->break_block
= saved_break
;
920 ctx
->continue_block
= saved_continue
;
925 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
927 bi_block
*start_block
= NULL
;
929 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
930 switch (node
->type
) {
931 case nir_cf_node_block
: {
932 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
941 emit_if(ctx
, nir_cf_node_as_if(node
));
944 case nir_cf_node_loop
:
945 emit_loop(ctx
, nir_cf_node_as_loop(node
));
949 unreachable("Unknown control flow");
957 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
959 return glsl_count_attribute_slots(type
, false);
963 bi_optimize_nir(nir_shader
*nir
)
966 unsigned lower_flrp
= 16 | 32 | 64;
968 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
969 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
971 nir_lower_tex_options lower_tex_options
= {
972 .lower_txs_lod
= true,
974 .lower_tex_without_implicit_lod
= true,
978 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
979 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
980 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
985 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
986 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
988 NIR_PASS(progress
, nir
, nir_copy_prop
);
989 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
990 NIR_PASS(progress
, nir
, nir_opt_dce
);
991 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
992 NIR_PASS(progress
, nir
, nir_opt_cse
);
993 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
994 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
995 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
997 if (lower_flrp
!= 0) {
998 bool lower_flrp_progress
= false;
999 NIR_PASS(lower_flrp_progress
,
1003 false /* always_precise */,
1004 nir
->options
->lower_ffma
);
1005 if (lower_flrp_progress
) {
1006 NIR_PASS(progress
, nir
,
1007 nir_opt_constant_folding
);
1011 /* Nothing should rematerialize any flrps, so we only
1012 * need to do this lowering once.
1017 NIR_PASS(progress
, nir
, nir_opt_undef
);
1018 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
1020 nir_var_shader_out
|
1021 nir_var_function_temp
);
1024 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
1025 NIR_PASS(progress
, nir
, nir_lower_bool_to_int32
);
1026 NIR_PASS(progress
, nir
, bifrost_nir_lower_algebraic_late
);
1027 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1028 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
1030 /* Take us out of SSA */
1031 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
1032 NIR_PASS(progress
, nir
, nir_move_vec_src_uses_to_dest
);
1033 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
1037 bifrost_compile_shader_nir(nir_shader
*nir
, panfrost_program
*program
, unsigned product_id
)
1039 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
1041 ctx
->stage
= nir
->info
.stage
;
1042 ctx
->quirks
= bifrost_get_quirks(product_id
);
1043 list_inithead(&ctx
->blocks
);
1045 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
1046 * (so we don't accidentally duplicate the epilogue since mesa/st has
1047 * messed with our I/O quite a bit already) */
1049 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1051 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
1052 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
1053 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
1056 NIR_PASS_V(nir
, nir_split_var_copies
);
1057 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
1058 NIR_PASS_V(nir
, nir_lower_var_copies
);
1059 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1060 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
1061 NIR_PASS_V(nir
, nir_lower_ssbo
);
1063 bi_optimize_nir(nir
);
1064 nir_print_shader(nir
, stdout
);
1066 panfrost_nir_assign_sysvals(&ctx
->sysvals
, nir
);
1067 program
->sysval_count
= ctx
->sysvals
.sysval_count
;
1068 memcpy(program
->sysvals
, ctx
->sysvals
.sysvals
, sizeof(ctx
->sysvals
.sysvals
[0]) * ctx
->sysvals
.sysval_count
);
1069 ctx
->blend_types
= program
->blend_types
;
1071 nir_foreach_function(func
, nir
) {
1075 ctx
->impl
= func
->impl
;
1076 emit_cf_list(ctx
, &func
->impl
->body
);
1077 break; /* TODO: Multi-function shaders */
1080 bi_foreach_block(ctx
, _block
) {
1081 bi_block
*block
= (bi_block
*) _block
;
1082 bi_lower_combine(ctx
, block
);
1085 bool progress
= false;
1090 bi_foreach_block(ctx
, _block
) {
1091 bi_block
*block
= (bi_block
*) _block
;
1092 progress
|= bi_opt_dead_code_eliminate(ctx
, block
);
1096 bi_print_shader(ctx
, stdout
);
1098 bi_register_allocate(ctx
);
1099 bi_print_shader(ctx
, stdout
);
1100 bi_pack(ctx
, &program
->compiled
);
1101 disassemble_bifrost(stdout
, program
->compiled
.data
, program
->compiled
.size
, true);