2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "main/imports.h"
31 #include "compiler/nir/nir_builder.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
37 #include "bi_quirks.h"
40 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
41 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
42 static void bi_schedule_barrier(bi_context
*ctx
);
45 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
47 bi_instruction
*branch
= bi_emit_branch(ctx
);
49 switch (instr
->type
) {
51 branch
->branch
.target
= ctx
->break_block
;
53 case nir_jump_continue
:
54 branch
->branch
.target
= ctx
->continue_block
;
57 unreachable("Unhandled jump type");
60 pan_block_add_successor(&ctx
->current_block
->base
, &branch
->branch
.target
->base
);
63 /* Gets a bytemask for a complete vecN write */
65 bi_mask_for_channels_32(unsigned i
)
67 return (1 << (4 * i
)) - 1;
71 bi_load(enum bi_class T
, nir_intrinsic_instr
*instr
)
73 bi_instruction load
= {
75 .writemask
= bi_mask_for_channels_32(instr
->num_components
),
76 .src
= { BIR_INDEX_CONSTANT
},
77 .constant
= { .u64
= nir_intrinsic_base(instr
) },
80 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
83 load
.dest
= bir_dest_index(&instr
->dest
);
85 if (info
->has_dest
&& info
->index_map
[NIR_INTRINSIC_TYPE
] > 0)
86 load
.dest_type
= nir_intrinsic_type(instr
);
88 nir_src
*offset
= nir_get_io_offset_src(instr
);
90 if (nir_src_is_const(*offset
))
91 load
.constant
.u64
+= nir_src_as_uint(*offset
);
93 load
.src
[0] = bir_src_index(offset
);
99 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
101 bi_instruction ins
= bi_load(BI_LOAD_VAR
, instr
);
102 ins
.load_vary
.interp_mode
= BIFROST_INTERP_DEFAULT
; /* TODO */
103 ins
.load_vary
.reuse
= false; /* TODO */
104 ins
.load_vary
.flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
;
105 ins
.dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
);
107 if (nir_src_is_const(*nir_get_io_offset_src(instr
))) {
108 /* Zero it out for direct */
109 ins
.src
[1] = BIR_INDEX_ZERO
;
111 /* R61 contains sample mask stuff, TODO RA XXX */
112 ins
.src
[1] = BIR_INDEX_REGISTER
| 61;
119 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
121 if (!ctx
->emitted_atest
) {
122 bi_instruction ins
= {
125 BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
126 bir_src_index(&instr
->src
[0])
134 { 3, 0 } /* swizzle out the alpha */
136 .dest
= BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
137 .dest_type
= nir_type_uint32
,
142 bi_schedule_barrier(ctx
);
143 ctx
->emitted_atest
= true;
146 bi_instruction blend
= {
148 .blend_location
= nir_intrinsic_base(instr
),
150 BIR_INDEX_REGISTER
| 60 /* Can this be arbitrary? */,
151 bir_src_index(&instr
->src
[0])
161 .dest
= BIR_INDEX_REGISTER
| 48 /* Looks like magic */,
162 .dest_type
= nir_type_uint32
,
167 bi_schedule_barrier(ctx
);
170 static bi_instruction
171 bi_load_with_r61(enum bi_class T
, nir_intrinsic_instr
*instr
)
173 bi_instruction ld
= bi_load(T
, instr
);
174 ld
.src
[1] = BIR_INDEX_REGISTER
| 61; /* TODO: RA */
175 ld
.src
[2] = BIR_INDEX_REGISTER
| 62;
177 ld
.src_types
[1] = nir_type_uint32
;
178 ld
.src_types
[2] = nir_type_uint32
;
179 ld
.src_types
[3] = nir_intrinsic_type(instr
);
184 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
186 bi_instruction address
= bi_load_with_r61(BI_LOAD_VAR_ADDRESS
, instr
);
187 address
.dest
= bi_make_temp(ctx
);
188 address
.dest_type
= nir_type_uint32
;
189 address
.writemask
= (1 << 12) - 1;
191 bi_instruction st
= {
192 .type
= BI_STORE_VAR
,
194 bir_src_index(&instr
->src
[0]),
195 address
.dest
, address
.dest
, address
.dest
,
199 nir_type_uint32
, nir_type_uint32
, nir_type_uint32
,
205 .store_channels
= 4, /* TODO: WRITEMASK */
208 bi_emit(ctx
, address
);
213 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
215 bi_instruction ld
= bi_load(BI_LOAD_UNIFORM
, instr
);
216 ld
.src
[1] = BIR_INDEX_ZERO
; /* TODO: UBO index */
218 /* TODO: Indirect access, since we need to multiply by the element
219 * size. I believe we can get this lowering automatically via
220 * nir_lower_io (as mul instructions) with the proper options, but this
222 assert(ld
.src
[0] & BIR_INDEX_CONSTANT
);
223 ld
.constant
.u64
+= ctx
->sysvals
.sysval_count
;
224 ld
.constant
.u64
*= 16;
230 bi_emit_sysval(bi_context
*ctx
, nir_instr
*instr
,
231 unsigned nr_components
, unsigned offset
)
235 /* Figure out which uniform this is */
236 int sysval
= panfrost_sysval_for_instr(instr
, &nir_dest
);
237 void *val
= _mesa_hash_table_u64_search(ctx
->sysvals
.sysval_to_id
, sysval
);
239 /* Sysvals are prefix uniforms */
240 unsigned uniform
= ((uintptr_t) val
) - 1;
242 /* Emit the read itself -- this is never indirect */
244 bi_instruction load
= {
245 .type
= BI_LOAD_UNIFORM
,
246 .writemask
= (1 << (nr_components
* 4)) - 1,
247 .src
= { BIR_INDEX_CONSTANT
, BIR_INDEX_ZERO
},
248 .constant
= { (uniform
* 16) + offset
},
249 .dest
= bir_dest_index(&nir_dest
),
250 .dest_type
= nir_type_uint32
, /* TODO */
257 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
260 switch (instr
->intrinsic
) {
261 case nir_intrinsic_load_barycentric_pixel
:
264 case nir_intrinsic_load_interpolated_input
:
265 case nir_intrinsic_load_input
:
266 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
267 bi_emit_ld_vary(ctx
, instr
);
268 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
269 bi_emit(ctx
, bi_load_with_r61(BI_LOAD_ATTR
, instr
));
271 unreachable("Unsupported shader stage");
275 case nir_intrinsic_store_output
:
276 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
277 bi_emit_frag_out(ctx
, instr
);
278 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
279 bi_emit_st_vary(ctx
, instr
);
281 unreachable("Unsupported shader stage");
284 case nir_intrinsic_load_uniform
:
285 bi_emit_ld_uniform(ctx
, instr
);
288 case nir_intrinsic_load_ssbo_address
:
289 bi_emit_sysval(ctx
, &instr
->instr
, 1, 0);
292 case nir_intrinsic_get_buffer_size
:
293 bi_emit_sysval(ctx
, &instr
->instr
, 1, 8);
296 case nir_intrinsic_load_viewport_scale
:
297 case nir_intrinsic_load_viewport_offset
:
298 case nir_intrinsic_load_num_work_groups
:
299 case nir_intrinsic_load_sampler_lod_parameters_pan
:
300 bi_emit_sysval(ctx
, &instr
->instr
, 3, 0);
310 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
312 /* Make sure we've been lowered */
313 assert(instr
->def
.num_components
== 1);
315 bi_instruction move
= {
317 .dest
= bir_ssa_index(&instr
->def
),
318 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
319 .writemask
= (1 << (instr
->def
.bit_size
/ 8)) - 1,
324 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
331 #define BI_CASE_CMP(op) \
337 bi_class_for_nir_alu(nir_op op
)
347 BI_CASE_CMP(nir_op_flt
)
348 BI_CASE_CMP(nir_op_fge
)
349 BI_CASE_CMP(nir_op_feq
)
350 BI_CASE_CMP(nir_op_fne
)
351 BI_CASE_CMP(nir_op_ilt
)
352 BI_CASE_CMP(nir_op_ige
)
353 BI_CASE_CMP(nir_op_ieq
)
354 BI_CASE_CMP(nir_op_ine
)
410 unreachable("Unknown ALU op");
414 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
415 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
416 * optimizations). Otherwise it will bail (when used for primary code
420 bi_cond_for_nir(nir_op op
, bool soft
)
423 BI_CASE_CMP(nir_op_flt
)
424 BI_CASE_CMP(nir_op_ilt
)
427 BI_CASE_CMP(nir_op_fge
)
428 BI_CASE_CMP(nir_op_ige
)
431 BI_CASE_CMP(nir_op_feq
)
432 BI_CASE_CMP(nir_op_ieq
)
435 BI_CASE_CMP(nir_op_fne
)
436 BI_CASE_CMP(nir_op_ine
)
440 return BI_COND_ALWAYS
;
442 unreachable("Invalid compare");
447 emit_alu(bi_context
*ctx
, nir_alu_instr
*instr
)
449 /* Assume it's something we can handle normally */
450 bi_instruction alu
= {
451 .type
= bi_class_for_nir_alu(instr
->op
),
452 .dest
= bir_dest_index(&instr
->dest
.dest
),
453 .dest_type
= nir_op_infos
[instr
->op
].output_type
454 | nir_dest_bit_size(instr
->dest
.dest
),
457 /* TODO: Implement lowering of special functions for older Bifrost */
458 assert((alu
.type
!= BI_SPECIAL
) || !(ctx
->quirks
& BIFROST_NO_FAST_OP
));
460 if (instr
->dest
.dest
.is_ssa
) {
461 /* Construct a writemask */
462 unsigned bits_per_comp
= instr
->dest
.dest
.ssa
.bit_size
;
463 unsigned comps
= instr
->dest
.dest
.ssa
.num_components
;
465 unsigned bits
= bits_per_comp
* comps
;
466 unsigned bytes
= bits
/ 8;
467 alu
.writemask
= (1 << bytes
) - 1;
469 unsigned comp_mask
= instr
->dest
.write_mask
;
471 alu
.writemask
= pan_to_bytemask(nir_dest_bit_size(instr
->dest
.dest
),
475 /* We inline constants as we go. This tracks how many constants have
476 * been inlined, since we're limited to 64-bits of constants per
479 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
480 unsigned constants_left
= (64 / dest_bits
);
481 unsigned constant_shift
= 0;
485 unsigned num_inputs
= nir_op_infos
[instr
->op
].num_inputs
;
486 assert(num_inputs
<= ARRAY_SIZE(alu
.src
));
488 for (unsigned i
= 0; i
< num_inputs
; ++i
) {
489 unsigned bits
= nir_src_bit_size(instr
->src
[i
].src
);
490 alu
.src_types
[i
] = nir_op_infos
[instr
->op
].input_types
[i
]
493 /* Try to inline a constant */
494 if (nir_src_is_const(instr
->src
[i
].src
) && constants_left
&& (dest_bits
== bits
)) {
496 (nir_src_as_uint(instr
->src
[i
].src
)) << constant_shift
;
498 alu
.src
[i
] = BIR_INDEX_CONSTANT
| constant_shift
;
500 constant_shift
+= dest_bits
;
504 alu
.src
[i
] = bir_src_index(&instr
->src
[i
].src
);
506 /* We assert scalarization above */
507 alu
.swizzle
[i
][0] = instr
->src
[i
].swizzle
[0];
510 /* Op-specific fixup */
513 alu
.src
[2] = BIR_INDEX_ZERO
; /* FMA */
516 alu
.outmod
= BIFROST_SAT
; /* FMOV */
519 alu
.src_neg
[0] = true; /* FMOV */
522 alu
.src_abs
[0] = true; /* FMOV */
525 alu
.src_neg
[1] = true; /* FADD */
530 alu
.op
.minmax
= BI_MINMAX_MAX
; /* MINMAX */
533 alu
.op
.special
= BI_SPECIAL_FRCP
;
536 alu
.op
.special
= BI_SPECIAL_FRSQ
;
539 alu
.op
.special
= BI_SPECIAL_FSIN
;
542 alu
.op
.special
= BI_SPECIAL_FCOS
;
544 BI_CASE_CMP(nir_op_flt
)
545 BI_CASE_CMP(nir_op_ilt
)
546 BI_CASE_CMP(nir_op_fge
)
547 BI_CASE_CMP(nir_op_ige
)
548 BI_CASE_CMP(nir_op_feq
)
549 BI_CASE_CMP(nir_op_ieq
)
550 BI_CASE_CMP(nir_op_fne
)
551 BI_CASE_CMP(nir_op_ine
)
552 alu
.op
.compare
= bi_cond_for_nir(instr
->op
, false);
562 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
564 switch (instr
->type
) {
565 case nir_instr_type_load_const
:
566 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
569 case nir_instr_type_intrinsic
:
570 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
573 case nir_instr_type_alu
:
574 emit_alu(ctx
, nir_instr_as_alu(instr
));
578 case nir_instr_type_tex
:
579 emit_tex(ctx
, nir_instr_as_tex(instr
));
583 case nir_instr_type_jump
:
584 emit_jump(ctx
, nir_instr_as_jump(instr
));
587 case nir_instr_type_ssa_undef
:
592 //unreachable("Unhandled instruction type");
600 create_empty_block(bi_context
*ctx
)
602 bi_block
*blk
= rzalloc(ctx
, bi_block
);
604 blk
->base
.predecessors
= _mesa_set_create(blk
,
606 _mesa_key_pointer_equal
);
608 blk
->base
.name
= ctx
->block_name_count
++;
614 bi_schedule_barrier(bi_context
*ctx
)
616 bi_block
*temp
= ctx
->after_block
;
617 ctx
->after_block
= create_empty_block(ctx
);
618 list_addtail(&ctx
->after_block
->base
.link
, &ctx
->blocks
);
619 list_inithead(&ctx
->after_block
->base
.instructions
);
620 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->after_block
->base
);
621 ctx
->current_block
= ctx
->after_block
;
622 ctx
->after_block
= temp
;
626 emit_block(bi_context
*ctx
, nir_block
*block
)
628 if (ctx
->after_block
) {
629 ctx
->current_block
= ctx
->after_block
;
630 ctx
->after_block
= NULL
;
632 ctx
->current_block
= create_empty_block(ctx
);
635 list_addtail(&ctx
->current_block
->base
.link
, &ctx
->blocks
);
636 list_inithead(&ctx
->current_block
->base
.instructions
);
638 nir_foreach_instr(instr
, block
) {
639 emit_instr(ctx
, instr
);
640 ++ctx
->instruction_count
;
643 return ctx
->current_block
;
646 /* Emits an unconditional branch to the end of the current block, returning a
647 * pointer so the user can fill in details */
649 static bi_instruction
*
650 bi_emit_branch(bi_context
*ctx
)
652 bi_instruction branch
= {
655 .cond
= BI_COND_ALWAYS
659 return bi_emit(ctx
, branch
);
662 /* Sets a condition for a branch by examing the NIR condition. If we're
663 * familiar with the condition, we unwrap it to fold it into the branch
664 * instruction. Otherwise, we consume the condition directly. We
665 * generally use 1-bit booleans which allows us to use small types for
670 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
672 /* TODO: Try to unwrap instead of always bailing */
673 branch
->src
[0] = bir_src_index(cond
);
674 branch
->src
[1] = BIR_INDEX_ZERO
;
675 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
676 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
680 emit_if(bi_context
*ctx
, nir_if
*nif
)
682 bi_block
*before_block
= ctx
->current_block
;
684 /* Speculatively emit the branch, but we can't fill it in until later */
685 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
686 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
688 /* Emit the two subblocks. */
689 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
690 bi_block
*end_then_block
= ctx
->current_block
;
692 /* Emit a jump from the end of the then block to the end of the else */
693 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
695 /* Emit second block, and check if it's empty */
697 int count_in
= ctx
->instruction_count
;
698 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
699 bi_block
*end_else_block
= ctx
->current_block
;
700 ctx
->after_block
= create_empty_block(ctx
);
702 /* Now that we have the subblocks emitted, fix up the branches */
707 if (ctx
->instruction_count
== count_in
) {
708 /* The else block is empty, so don't emit an exit jump */
709 bi_remove_instruction(then_exit
);
710 then_branch
->branch
.target
= ctx
->after_block
;
712 then_branch
->branch
.target
= else_block
;
713 then_exit
->branch
.target
= ctx
->after_block
;
714 pan_block_add_successor(&end_then_block
->base
, &then_exit
->branch
.target
->base
);
717 /* Wire up the successors */
719 pan_block_add_successor(&before_block
->base
, &then_branch
->branch
.target
->base
); /* then_branch */
721 pan_block_add_successor(&before_block
->base
, &then_block
->base
); /* fallthrough */
722 pan_block_add_successor(&end_else_block
->base
, &ctx
->after_block
->base
); /* fallthrough */
726 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
728 /* Remember where we are */
729 bi_block
*start_block
= ctx
->current_block
;
731 bi_block
*saved_break
= ctx
->break_block
;
732 bi_block
*saved_continue
= ctx
->continue_block
;
734 ctx
->continue_block
= create_empty_block(ctx
);
735 ctx
->break_block
= create_empty_block(ctx
);
736 ctx
->after_block
= ctx
->continue_block
;
738 /* Emit the body itself */
739 emit_cf_list(ctx
, &nloop
->body
);
741 /* Branch back to loop back */
742 bi_instruction
*br_back
= bi_emit_branch(ctx
);
743 br_back
->branch
.target
= ctx
->continue_block
;
744 pan_block_add_successor(&start_block
->base
, &ctx
->continue_block
->base
);
745 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->continue_block
->base
);
747 ctx
->after_block
= ctx
->break_block
;
750 ctx
->break_block
= saved_break
;
751 ctx
->continue_block
= saved_continue
;
756 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
758 bi_block
*start_block
= NULL
;
760 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
761 switch (node
->type
) {
762 case nir_cf_node_block
: {
763 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
772 emit_if(ctx
, nir_cf_node_as_if(node
));
775 case nir_cf_node_loop
:
776 emit_loop(ctx
, nir_cf_node_as_loop(node
));
780 unreachable("Unknown control flow");
788 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
790 return glsl_count_attribute_slots(type
, false);
794 bi_optimize_nir(nir_shader
*nir
)
797 unsigned lower_flrp
= 16 | 32 | 64;
799 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
800 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
802 nir_lower_tex_options lower_tex_options
= {
803 .lower_txs_lod
= true,
805 .lower_tex_without_implicit_lod
= true,
809 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
810 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
811 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
816 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
817 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
819 NIR_PASS(progress
, nir
, nir_copy_prop
);
820 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
821 NIR_PASS(progress
, nir
, nir_opt_dce
);
822 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
823 NIR_PASS(progress
, nir
, nir_opt_cse
);
824 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
825 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
826 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
828 if (lower_flrp
!= 0) {
829 bool lower_flrp_progress
= false;
830 NIR_PASS(lower_flrp_progress
,
834 false /* always_precise */,
835 nir
->options
->lower_ffma
);
836 if (lower_flrp_progress
) {
837 NIR_PASS(progress
, nir
,
838 nir_opt_constant_folding
);
842 /* Nothing should rematerialize any flrps, so we only
843 * need to do this lowering once.
848 NIR_PASS(progress
, nir
, nir_opt_undef
);
849 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
852 nir_var_function_temp
);
855 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
856 NIR_PASS(progress
, nir
, nir_lower_bool_to_int32
);
857 NIR_PASS(progress
, nir
, bifrost_nir_lower_algebraic_late
);
858 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
859 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
861 /* Take us out of SSA */
862 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
863 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
865 /* We're a primary scalar architecture but there's enough vector that
866 * we use a vector IR so let's not also deal with scalar hacks on top
867 * of the vector hacks */
869 NIR_PASS(progress
, nir
, nir_move_vec_src_uses_to_dest
);
870 NIR_PASS(progress
, nir
, nir_lower_vec_to_movs
);
871 NIR_PASS(progress
, nir
, nir_opt_dce
);
875 bi_insert_mov32(bi_context
*ctx
, bi_instruction
*parent
, unsigned comp
)
877 bi_instruction move
= {
879 .dest
= parent
->dest
,
880 .dest_type
= nir_type_uint32
,
881 .writemask
= (0xF << (4 * comp
)),
882 .src
= { parent
->src
[0] },
883 .src_types
= { nir_type_uint32
},
884 .swizzle
= { { comp
} }
887 bi_emit_before(ctx
, parent
, move
);
891 bi_lower_mov(bi_context
*ctx
, bi_block
*block
)
893 bi_foreach_instr_in_block_safe(block
, ins
) {
894 if (ins
->type
!= BI_MOV
) continue;
895 if (util_bitcount(ins
->writemask
) <= 4) continue;
897 for (unsigned i
= 0; i
< 4; ++i
) {
898 unsigned quad
= (ins
->writemask
>> (4 * i
)) & 0xF;
902 else if (quad
== 0xF)
903 bi_insert_mov32(ctx
, ins
, i
);
905 unreachable("TODO: Lowering <32bit moves");
908 bi_remove_instruction(ins
);
913 bifrost_compile_shader_nir(nir_shader
*nir
, panfrost_program
*program
, unsigned product_id
)
915 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
917 ctx
->stage
= nir
->info
.stage
;
918 ctx
->quirks
= bifrost_get_quirks(product_id
);
919 list_inithead(&ctx
->blocks
);
921 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
922 * (so we don't accidentally duplicate the epilogue since mesa/st has
923 * messed with our I/O quite a bit already) */
925 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
927 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
928 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
929 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
932 NIR_PASS_V(nir
, nir_split_var_copies
);
933 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
934 NIR_PASS_V(nir
, nir_lower_var_copies
);
935 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
936 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
937 NIR_PASS_V(nir
, nir_lower_ssbo
);
939 bi_optimize_nir(nir
);
940 nir_print_shader(nir
, stdout
);
942 panfrost_nir_assign_sysvals(&ctx
->sysvals
, nir
);
943 program
->sysval_count
= ctx
->sysvals
.sysval_count
;
944 memcpy(program
->sysvals
, ctx
->sysvals
.sysvals
, sizeof(ctx
->sysvals
.sysvals
[0]) * ctx
->sysvals
.sysval_count
);
946 nir_foreach_function(func
, nir
) {
950 ctx
->impl
= func
->impl
;
951 emit_cf_list(ctx
, &func
->impl
->body
);
952 break; /* TODO: Multi-function shaders */
955 bi_foreach_block(ctx
, _block
) {
956 bi_block
*block
= (bi_block
*) _block
;
957 bi_lower_mov(ctx
, block
);
960 bool progress
= false;
965 bi_foreach_block(ctx
, _block
) {
966 bi_block
*block
= (bi_block
*) _block
;
967 progress
|= bi_opt_dead_code_eliminate(ctx
, block
);
971 bi_print_shader(ctx
, stdout
);
973 bi_register_allocate(ctx
);
974 bi_print_shader(ctx
, stdout
);
975 bi_pack(ctx
, &program
->compiled
);
976 disassemble_bifrost(stdout
, program
->compiled
.data
, program
->compiled
.size
, true);