2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "main/imports.h"
31 #include "compiler/nir/nir_builder.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
37 #include "bi_quirks.h"
40 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
41 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
42 static void bi_block_add_successor(bi_block
*block
, bi_block
*successor
);
43 static void bi_schedule_barrier(bi_context
*ctx
);
46 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
48 bi_instruction
*branch
= bi_emit_branch(ctx
);
50 switch (instr
->type
) {
52 branch
->branch
.target
= ctx
->break_block
;
54 case nir_jump_continue
:
55 branch
->branch
.target
= ctx
->continue_block
;
58 unreachable("Unhandled jump type");
61 bi_block_add_successor(ctx
->current_block
, branch
->branch
.target
);
64 /* Gets a bytemask for a complete vecN write */
66 bi_mask_for_channels_32(unsigned i
)
68 return (1 << (4 * i
)) - 1;
72 bi_load(enum bi_class T
, nir_intrinsic_instr
*instr
)
74 bi_instruction load
= {
76 .writemask
= bi_mask_for_channels_32(instr
->num_components
),
77 .src
= { BIR_INDEX_CONSTANT
},
78 .constant
= { .u64
= nir_intrinsic_base(instr
) },
81 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
84 load
.dest
= bir_dest_index(&instr
->dest
);
86 if (info
->has_dest
&& info
->index_map
[NIR_INTRINSIC_TYPE
] > 0)
87 load
.dest_type
= nir_intrinsic_type(instr
);
89 nir_src
*offset
= nir_get_io_offset_src(instr
);
91 if (nir_src_is_const(*offset
))
92 load
.constant
.u64
+= nir_src_as_uint(*offset
);
94 load
.src
[0] = bir_src_index(offset
);
100 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
102 bi_instruction ins
= bi_load(BI_LOAD_VAR
, instr
);
103 ins
.load_vary
.interp_mode
= BIFROST_INTERP_DEFAULT
; /* TODO */
104 ins
.load_vary
.reuse
= false; /* TODO */
105 ins
.load_vary
.flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
;
106 ins
.dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
),
111 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
113 if (!ctx
->emitted_atest
) {
114 bi_instruction ins
= {
119 bi_schedule_barrier(ctx
);
120 ctx
->emitted_atest
= true;
123 bi_instruction blend
= {
125 .blend_location
= nir_intrinsic_base(instr
),
127 bir_src_index(&instr
->src
[0])
135 bi_schedule_barrier(ctx
);
139 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
141 bi_instruction address
= bi_load(BI_LOAD_VAR_ADDRESS
, instr
);
142 address
.dest
= bi_make_temp(ctx
);
143 address
.dest_type
= nir_type_uint64
;
144 address
.writemask
= (1 << 8) - 1;
146 bi_instruction st
= {
147 .type
= BI_STORE_VAR
,
150 bir_src_index(&instr
->src
[0])
157 bi_emit(ctx
, address
);
162 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
164 bi_instruction ld
= bi_load(BI_LOAD_UNIFORM
, instr
);
165 ld
.src
[1] = BIR_INDEX_ZERO
; /* TODO: UBO index */
170 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
173 switch (instr
->intrinsic
) {
174 case nir_intrinsic_load_barycentric_pixel
:
177 case nir_intrinsic_load_interpolated_input
:
178 case nir_intrinsic_load_input
:
179 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
180 bi_emit_ld_vary(ctx
, instr
);
181 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
182 bi_emit(ctx
, bi_load(BI_LOAD_ATTR
, instr
));
184 unreachable("Unsupported shader stage");
188 case nir_intrinsic_store_output
:
189 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
190 bi_emit_frag_out(ctx
, instr
);
191 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
192 bi_emit_st_vary(ctx
, instr
);
194 unreachable("Unsupported shader stage");
197 case nir_intrinsic_load_uniform
:
198 bi_emit_ld_uniform(ctx
, instr
);
208 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
210 /* Make sure we've been lowered */
211 assert(instr
->def
.num_components
== 1);
213 bi_instruction move
= {
215 .dest
= bir_ssa_index(&instr
->def
),
216 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
217 .writemask
= (1 << (instr
->def
.bit_size
/ 8)) - 1,
222 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
230 bi_class_for_nir_alu(nir_op op
)
289 unreachable("Unknown ALU op");
294 emit_alu(bi_context
*ctx
, nir_alu_instr
*instr
)
296 /* Assume it's something we can handle normally */
297 bi_instruction alu
= {
298 .type
= bi_class_for_nir_alu(instr
->op
),
299 .dest
= bir_dest_index(&instr
->dest
.dest
),
300 .dest_type
= nir_op_infos
[instr
->op
].output_type
301 | nir_dest_bit_size(instr
->dest
.dest
),
304 /* TODO: Implement lowering of special functions for older Bifrost */
305 assert((alu
.type
!= BI_SPECIAL
) || !(ctx
->quirks
& BIFROST_NO_FAST_OP
));
307 if (instr
->dest
.dest
.is_ssa
) {
308 /* Construct a writemask */
309 unsigned bits_per_comp
= instr
->dest
.dest
.ssa
.bit_size
;
310 unsigned comps
= instr
->dest
.dest
.ssa
.num_components
;
312 unsigned bits
= bits_per_comp
* comps
;
313 unsigned bytes
= MAX2(bits
/ 8, 1);
314 alu
.writemask
= (1 << bytes
) - 1;
316 unsigned comp_mask
= instr
->dest
.write_mask
;
318 alu
.writemask
= pan_to_bytemask(nir_dest_bit_size(instr
->dest
.dest
),
322 /* We inline constants as we go. This tracks how many constants have
323 * been inlined, since we're limited to 64-bits of constants per
326 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
327 unsigned constants_left
= (64 / dest_bits
);
328 unsigned constant_shift
= 0;
332 unsigned num_inputs
= nir_op_infos
[instr
->op
].num_inputs
;
333 assert(num_inputs
<= ARRAY_SIZE(alu
.src
));
335 for (unsigned i
= 0; i
< num_inputs
; ++i
) {
336 unsigned bits
= nir_src_bit_size(instr
->src
[i
].src
);
337 alu
.src_types
[i
] = nir_op_infos
[instr
->op
].input_types
[i
]
340 /* Try to inline a constant */
341 if (nir_src_is_const(instr
->src
[i
].src
) && constants_left
&& (dest_bits
== bits
)) {
343 (nir_src_as_uint(instr
->src
[i
].src
)) << constant_shift
;
345 alu
.src
[i
] = BIR_INDEX_CONSTANT
| constant_shift
;
347 constant_shift
+= dest_bits
;
351 alu
.src
[i
] = bir_src_index(&instr
->src
[i
].src
);
353 /* We assert scalarization above */
354 alu
.swizzle
[i
][0] = instr
->src
[i
].swizzle
[0];
357 /* Op-specific fixup */
360 alu
.src
[2] = BIR_INDEX_ZERO
; /* FMA */
363 alu
.outmod
= BIFROST_SAT
; /* MOV */
366 alu
.src_neg
[0] = true; /* MOV */
369 alu
.src_abs
[0] = true; /* MOV */
372 alu
.src_neg
[1] = true; /* ADD */
377 alu
.op
.minmax
= BI_MINMAX_MAX
; /* MINMAX */
380 alu
.op
.special
= BI_SPECIAL_FRCP
;
383 alu
.op
.special
= BI_SPECIAL_FRSQ
;
386 alu
.op
.special
= BI_SPECIAL_FSIN
;
389 alu
.op
.special
= BI_SPECIAL_FCOS
;
399 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
401 switch (instr
->type
) {
402 case nir_instr_type_load_const
:
403 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
406 case nir_instr_type_intrinsic
:
407 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
410 case nir_instr_type_alu
:
411 emit_alu(ctx
, nir_instr_as_alu(instr
));
415 case nir_instr_type_tex
:
416 emit_tex(ctx
, nir_instr_as_tex(instr
));
420 case nir_instr_type_jump
:
421 emit_jump(ctx
, nir_instr_as_jump(instr
));
424 case nir_instr_type_ssa_undef
:
429 //unreachable("Unhandled instruction type");
437 create_empty_block(bi_context
*ctx
)
439 bi_block
*blk
= rzalloc(ctx
, bi_block
);
441 blk
->predecessors
= _mesa_set_create(blk
,
443 _mesa_key_pointer_equal
);
445 blk
->name
= ctx
->block_name_count
++;
451 bi_block_add_successor(bi_block
*block
, bi_block
*successor
)
456 for (unsigned i
= 0; i
< ARRAY_SIZE(block
->successors
); ++i
) {
457 if (block
->successors
[i
]) {
458 if (block
->successors
[i
] == successor
)
464 block
->successors
[i
] = successor
;
465 _mesa_set_add(successor
->predecessors
, block
);
469 unreachable("Too many successors");
473 bi_schedule_barrier(bi_context
*ctx
)
475 bi_block
*temp
= ctx
->after_block
;
476 ctx
->after_block
= create_empty_block(ctx
);
477 list_addtail(&ctx
->after_block
->link
, &ctx
->blocks
);
478 list_inithead(&ctx
->after_block
->instructions
);
479 bi_block_add_successor(ctx
->current_block
, ctx
->after_block
);
480 ctx
->current_block
= ctx
->after_block
;
481 ctx
->after_block
= temp
;
485 emit_block(bi_context
*ctx
, nir_block
*block
)
487 if (ctx
->after_block
) {
488 ctx
->current_block
= ctx
->after_block
;
489 ctx
->after_block
= NULL
;
491 ctx
->current_block
= create_empty_block(ctx
);
494 list_addtail(&ctx
->current_block
->link
, &ctx
->blocks
);
495 list_inithead(&ctx
->current_block
->instructions
);
497 nir_foreach_instr(instr
, block
) {
498 emit_instr(ctx
, instr
);
499 ++ctx
->instruction_count
;
502 return ctx
->current_block
;
505 /* Emits an unconditional branch to the end of the current block, returning a
506 * pointer so the user can fill in details */
508 static bi_instruction
*
509 bi_emit_branch(bi_context
*ctx
)
511 bi_instruction branch
= {
514 .cond
= BI_COND_ALWAYS
518 return bi_emit(ctx
, branch
);
521 /* Sets a condition for a branch by examing the NIR condition. If we're
522 * familiar with the condition, we unwrap it to fold it into the branch
523 * instruction. Otherwise, we consume the condition directly. We
524 * generally use 1-bit booleans which allows us to use small types for
529 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
531 /* TODO: Try to unwrap instead of always bailing */
532 branch
->src
[0] = bir_src_index(cond
);
533 branch
->src
[1] = BIR_INDEX_ZERO
;
534 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
535 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
539 emit_if(bi_context
*ctx
, nir_if
*nif
)
541 bi_block
*before_block
= ctx
->current_block
;
543 /* Speculatively emit the branch, but we can't fill it in until later */
544 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
545 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
547 /* Emit the two subblocks. */
548 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
549 bi_block
*end_then_block
= ctx
->current_block
;
551 /* Emit a jump from the end of the then block to the end of the else */
552 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
554 /* Emit second block, and check if it's empty */
556 int count_in
= ctx
->instruction_count
;
557 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
558 bi_block
*end_else_block
= ctx
->current_block
;
559 ctx
->after_block
= create_empty_block(ctx
);
561 /* Now that we have the subblocks emitted, fix up the branches */
566 if (ctx
->instruction_count
== count_in
) {
567 /* The else block is empty, so don't emit an exit jump */
568 bi_remove_instruction(then_exit
);
569 then_branch
->branch
.target
= ctx
->after_block
;
571 then_branch
->branch
.target
= else_block
;
572 then_exit
->branch
.target
= ctx
->after_block
;
573 bi_block_add_successor(end_then_block
, then_exit
->branch
.target
);
576 /* Wire up the successors */
578 bi_block_add_successor(before_block
, then_branch
->branch
.target
); /* then_branch */
580 bi_block_add_successor(before_block
, then_block
); /* fallthrough */
581 bi_block_add_successor(end_else_block
, ctx
->after_block
); /* fallthrough */
585 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
587 /* Remember where we are */
588 bi_block
*start_block
= ctx
->current_block
;
590 bi_block
*saved_break
= ctx
->break_block
;
591 bi_block
*saved_continue
= ctx
->continue_block
;
593 ctx
->continue_block
= create_empty_block(ctx
);
594 ctx
->break_block
= create_empty_block(ctx
);
595 ctx
->after_block
= ctx
->continue_block
;
597 /* Emit the body itself */
598 emit_cf_list(ctx
, &nloop
->body
);
600 /* Branch back to loop back */
601 bi_instruction
*br_back
= bi_emit_branch(ctx
);
602 br_back
->branch
.target
= ctx
->continue_block
;
603 bi_block_add_successor(start_block
, ctx
->continue_block
);
604 bi_block_add_successor(ctx
->current_block
, ctx
->continue_block
);
606 ctx
->after_block
= ctx
->break_block
;
609 ctx
->break_block
= saved_break
;
610 ctx
->continue_block
= saved_continue
;
615 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
617 bi_block
*start_block
= NULL
;
619 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
620 switch (node
->type
) {
621 case nir_cf_node_block
: {
622 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
631 emit_if(ctx
, nir_cf_node_as_if(node
));
634 case nir_cf_node_loop
:
635 emit_loop(ctx
, nir_cf_node_as_loop(node
));
639 unreachable("Unknown control flow");
647 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
649 return glsl_count_attribute_slots(type
, false);
653 bi_optimize_nir(nir_shader
*nir
)
656 unsigned lower_flrp
= 16 | 32 | 64;
658 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
659 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
661 nir_lower_tex_options lower_tex_options
= {
662 .lower_txs_lod
= true,
664 .lower_tex_without_implicit_lod
= true,
668 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
669 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
670 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
675 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
676 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
678 NIR_PASS(progress
, nir
, nir_copy_prop
);
679 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
680 NIR_PASS(progress
, nir
, nir_opt_dce
);
681 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
682 NIR_PASS(progress
, nir
, nir_opt_cse
);
683 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
684 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
685 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
687 if (lower_flrp
!= 0) {
688 bool lower_flrp_progress
= false;
689 NIR_PASS(lower_flrp_progress
,
693 false /* always_precise */,
694 nir
->options
->lower_ffma
);
695 if (lower_flrp_progress
) {
696 NIR_PASS(progress
, nir
,
697 nir_opt_constant_folding
);
701 /* Nothing should rematerialize any flrps, so we only
702 * need to do this lowering once.
707 NIR_PASS(progress
, nir
, nir_opt_undef
);
708 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
711 nir_var_function_temp
);
714 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
715 NIR_PASS(progress
, nir
, bifrost_nir_lower_algebraic_late
);
716 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
717 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
719 /* Take us out of SSA */
720 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
721 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
723 /* We're a primary scalar architecture but there's enough vector that
724 * we use a vector IR so let's not also deal with scalar hacks on top
725 * of the vector hacks */
727 NIR_PASS(progress
, nir
, nir_move_vec_src_uses_to_dest
);
728 NIR_PASS(progress
, nir
, nir_lower_vec_to_movs
);
729 NIR_PASS(progress
, nir
, nir_opt_dce
);
733 bifrost_compile_shader_nir(nir_shader
*nir
, bifrost_program
*program
, unsigned product_id
)
735 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
737 ctx
->stage
= nir
->info
.stage
;
738 ctx
->quirks
= bifrost_get_quirks(product_id
);
739 list_inithead(&ctx
->blocks
);
741 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
742 * (so we don't accidentally duplicate the epilogue since mesa/st has
743 * messed with our I/O quite a bit already) */
745 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
747 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
748 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
749 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
752 NIR_PASS_V(nir
, nir_split_var_copies
);
753 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
754 NIR_PASS_V(nir
, nir_lower_var_copies
);
755 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
756 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
757 NIR_PASS_V(nir
, nir_lower_ssbo
);
759 bi_optimize_nir(nir
);
760 nir_print_shader(nir
, stdout
);
762 nir_foreach_function(func
, nir
) {
766 ctx
->impl
= func
->impl
;
767 emit_cf_list(ctx
, &func
->impl
->body
);
768 break; /* TODO: Multi-function shaders */
771 bi_print_shader(ctx
, stdout
);