2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "main/imports.h"
31 #include "compiler/nir/nir_builder.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
36 #include "bi_quirks.h"
39 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
40 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
41 static void bi_block_add_successor(bi_block
*block
, bi_block
*successor
);
42 static void bi_schedule_barrier(bi_context
*ctx
);
45 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
47 bi_instruction
*branch
= bi_emit_branch(ctx
);
49 switch (instr
->type
) {
51 branch
->branch
.target
= ctx
->break_block
;
53 case nir_jump_continue
:
54 branch
->branch
.target
= ctx
->continue_block
;
57 unreachable("Unhandled jump type");
60 bi_block_add_successor(ctx
->current_block
, branch
->branch
.target
);
63 /* Gets a bytemask for a complete vecN write */
65 bi_mask_for_channels_32(unsigned i
)
67 return (1 << (4 * i
)) - 1;
71 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
73 bi_instruction ins
= {
77 .location
= nir_intrinsic_base(instr
),
78 .channels
= instr
->num_components
,
80 .interp_mode
= BIFROST_INTERP_DEFAULT
, /* TODO */
81 .reuse
= false, /* TODO */
82 .flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
84 .dest
= bir_dest_index(&instr
->dest
),
85 .dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
),
86 .writemask
= bi_mask_for_channels_32(instr
->num_components
)
89 nir_src
*offset
= nir_get_io_offset_src(instr
);
91 if (nir_src_is_const(*offset
))
92 ins
.load_vary
.load
.location
+= nir_src_as_uint(*offset
);
94 ins
.src
[0] = bir_src_index(offset
);
100 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
102 if (!ctx
->emitted_atest
) {
103 bi_instruction ins
= {
108 bi_schedule_barrier(ctx
);
109 ctx
->emitted_atest
= true;
112 bi_instruction blend
= {
114 .blend_location
= nir_intrinsic_base(instr
),
116 bir_src_index(&instr
->src
[0])
124 bi_schedule_barrier(ctx
);
127 static struct bi_load
128 bi_direct_load_for_instr(nir_intrinsic_instr
*instr
)
130 nir_src
*offset
= nir_get_io_offset_src(instr
);
131 assert(nir_src_is_const(*offset
)); /* no indirects */
133 struct bi_load load
= {
134 .location
= nir_intrinsic_base(instr
) + nir_src_as_uint(*offset
),
135 .channels
= instr
->num_components
142 bi_emit_ld_attr(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
144 bi_instruction load
= {
145 .type
= BI_LOAD_ATTR
,
146 .load
= bi_direct_load_for_instr(instr
),
147 .dest
= bir_dest_index(&instr
->dest
),
148 .dest_type
= nir_intrinsic_type(instr
),
149 .writemask
= bi_mask_for_channels_32(instr
->num_components
)
156 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
158 nir_src
*offset
= nir_get_io_offset_src(instr
);
159 assert(nir_src_is_const(*offset
)); /* no indirects */
161 bi_instruction address
= {
162 .type
= BI_LOAD_VAR_ADDRESS
,
163 .load
= bi_direct_load_for_instr(instr
),
164 .dest_type
= nir_intrinsic_type(instr
),
165 .dest
= bi_make_temp(ctx
),
166 .writemask
= bi_mask_for_channels_32(instr
->num_components
)
169 bi_instruction st
= {
170 .type
= BI_STORE_VAR
,
173 bir_src_index(&instr
->src
[0])
180 bi_emit(ctx
, address
);
185 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
187 /* TODO: Indirect access */
189 bi_instruction ld
= {
190 .type
= BI_LOAD_UNIFORM
,
191 .load
= bi_direct_load_for_instr(instr
),
192 .dest
= bir_dest_index(&instr
->dest
),
193 .dest_type
= nir_intrinsic_type(instr
),
194 .writemask
= bi_mask_for_channels_32(instr
->num_components
),
196 BIR_INDEX_ZERO
/* TODO: UBOs */
204 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
207 switch (instr
->intrinsic
) {
208 case nir_intrinsic_load_barycentric_pixel
:
211 case nir_intrinsic_load_interpolated_input
:
212 case nir_intrinsic_load_input
:
213 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
214 bi_emit_ld_vary(ctx
, instr
);
215 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
216 bi_emit_ld_attr(ctx
, instr
);
218 unreachable("Unsupported shader stage");
222 case nir_intrinsic_store_output
:
223 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
224 bi_emit_frag_out(ctx
, instr
);
225 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
226 bi_emit_st_vary(ctx
, instr
);
228 unreachable("Unsupported shader stage");
231 case nir_intrinsic_load_uniform
:
232 bi_emit_ld_uniform(ctx
, instr
);
242 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
244 /* Make sure we've been lowered */
245 assert(instr
->def
.num_components
== 1);
247 bi_instruction move
= {
249 .dest
= bir_ssa_index(&instr
->def
),
250 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
251 .writemask
= (1 << (instr
->def
.bit_size
/ 8)) - 1,
256 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
264 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
266 switch (instr
->type
) {
267 case nir_instr_type_load_const
:
268 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
271 case nir_instr_type_intrinsic
:
272 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
276 case nir_instr_type_alu
:
277 emit_alu(ctx
, nir_instr_as_alu(instr
));
280 case nir_instr_type_tex
:
281 emit_tex(ctx
, nir_instr_as_tex(instr
));
285 case nir_instr_type_jump
:
286 emit_jump(ctx
, nir_instr_as_jump(instr
));
289 case nir_instr_type_ssa_undef
:
294 //unreachable("Unhandled instruction type");
302 create_empty_block(bi_context
*ctx
)
304 bi_block
*blk
= rzalloc(ctx
, bi_block
);
306 blk
->predecessors
= _mesa_set_create(blk
,
308 _mesa_key_pointer_equal
);
310 blk
->name
= ctx
->block_name_count
++;
316 bi_block_add_successor(bi_block
*block
, bi_block
*successor
)
321 for (unsigned i
= 0; i
< ARRAY_SIZE(block
->successors
); ++i
) {
322 if (block
->successors
[i
]) {
323 if (block
->successors
[i
] == successor
)
329 block
->successors
[i
] = successor
;
330 _mesa_set_add(successor
->predecessors
, block
);
334 unreachable("Too many successors");
338 bi_schedule_barrier(bi_context
*ctx
)
340 bi_block
*temp
= ctx
->after_block
;
341 ctx
->after_block
= create_empty_block(ctx
);
342 list_addtail(&ctx
->after_block
->link
, &ctx
->blocks
);
343 list_inithead(&ctx
->after_block
->instructions
);
344 bi_block_add_successor(ctx
->current_block
, ctx
->after_block
);
345 ctx
->current_block
= ctx
->after_block
;
346 ctx
->after_block
= temp
;
350 emit_block(bi_context
*ctx
, nir_block
*block
)
352 if (ctx
->after_block
) {
353 ctx
->current_block
= ctx
->after_block
;
354 ctx
->after_block
= NULL
;
356 ctx
->current_block
= create_empty_block(ctx
);
359 list_addtail(&ctx
->current_block
->link
, &ctx
->blocks
);
360 list_inithead(&ctx
->current_block
->instructions
);
362 nir_foreach_instr(instr
, block
) {
363 emit_instr(ctx
, instr
);
364 ++ctx
->instruction_count
;
367 return ctx
->current_block
;
370 /* Emits an unconditional branch to the end of the current block, returning a
371 * pointer so the user can fill in details */
373 static bi_instruction
*
374 bi_emit_branch(bi_context
*ctx
)
376 bi_instruction branch
= {
379 .cond
= BI_COND_ALWAYS
383 return bi_emit(ctx
, branch
);
386 /* Sets a condition for a branch by examing the NIR condition. If we're
387 * familiar with the condition, we unwrap it to fold it into the branch
388 * instruction. Otherwise, we consume the condition directly. We
389 * generally use 1-bit booleans which allows us to use small types for
394 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
396 /* TODO: Try to unwrap instead of always bailing */
397 branch
->src
[0] = bir_src_index(cond
);
398 branch
->src
[1] = BIR_INDEX_ZERO
;
399 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
400 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
404 emit_if(bi_context
*ctx
, nir_if
*nif
)
406 bi_block
*before_block
= ctx
->current_block
;
408 /* Speculatively emit the branch, but we can't fill it in until later */
409 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
410 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
412 /* Emit the two subblocks. */
413 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
414 bi_block
*end_then_block
= ctx
->current_block
;
416 /* Emit a jump from the end of the then block to the end of the else */
417 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
419 /* Emit second block, and check if it's empty */
421 int count_in
= ctx
->instruction_count
;
422 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
423 bi_block
*end_else_block
= ctx
->current_block
;
424 ctx
->after_block
= create_empty_block(ctx
);
426 /* Now that we have the subblocks emitted, fix up the branches */
431 if (ctx
->instruction_count
== count_in
) {
432 /* The else block is empty, so don't emit an exit jump */
433 bi_remove_instruction(then_exit
);
434 then_branch
->branch
.target
= ctx
->after_block
;
436 then_branch
->branch
.target
= else_block
;
437 then_exit
->branch
.target
= ctx
->after_block
;
438 bi_block_add_successor(end_then_block
, then_exit
->branch
.target
);
441 /* Wire up the successors */
443 bi_block_add_successor(before_block
, then_branch
->branch
.target
); /* then_branch */
445 bi_block_add_successor(before_block
, then_block
); /* fallthrough */
446 bi_block_add_successor(end_else_block
, ctx
->after_block
); /* fallthrough */
450 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
452 /* Remember where we are */
453 bi_block
*start_block
= ctx
->current_block
;
455 bi_block
*saved_break
= ctx
->break_block
;
456 bi_block
*saved_continue
= ctx
->continue_block
;
458 ctx
->continue_block
= create_empty_block(ctx
);
459 ctx
->break_block
= create_empty_block(ctx
);
460 ctx
->after_block
= ctx
->continue_block
;
462 /* Emit the body itself */
463 emit_cf_list(ctx
, &nloop
->body
);
465 /* Branch back to loop back */
466 bi_instruction
*br_back
= bi_emit_branch(ctx
);
467 br_back
->branch
.target
= ctx
->continue_block
;
468 bi_block_add_successor(start_block
, ctx
->continue_block
);
469 bi_block_add_successor(ctx
->current_block
, ctx
->continue_block
);
471 ctx
->after_block
= ctx
->break_block
;
474 ctx
->break_block
= saved_break
;
475 ctx
->continue_block
= saved_continue
;
480 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
482 bi_block
*start_block
= NULL
;
484 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
485 switch (node
->type
) {
486 case nir_cf_node_block
: {
487 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
496 emit_if(ctx
, nir_cf_node_as_if(node
));
499 case nir_cf_node_loop
:
500 emit_loop(ctx
, nir_cf_node_as_loop(node
));
504 unreachable("Unknown control flow");
512 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
514 return glsl_count_attribute_slots(type
, false);
518 bi_optimize_nir(nir_shader
*nir
)
521 unsigned lower_flrp
= 16 | 32 | 64;
523 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
524 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
526 nir_lower_tex_options lower_tex_options
= {
527 .lower_txs_lod
= true,
529 .lower_tex_without_implicit_lod
= true,
533 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
534 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
535 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
540 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
541 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
543 NIR_PASS(progress
, nir
, nir_copy_prop
);
544 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
545 NIR_PASS(progress
, nir
, nir_opt_dce
);
546 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
547 NIR_PASS(progress
, nir
, nir_opt_cse
);
548 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
549 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
550 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
552 if (lower_flrp
!= 0) {
553 bool lower_flrp_progress
= false;
554 NIR_PASS(lower_flrp_progress
,
558 false /* always_precise */,
559 nir
->options
->lower_ffma
);
560 if (lower_flrp_progress
) {
561 NIR_PASS(progress
, nir
,
562 nir_opt_constant_folding
);
566 /* Nothing should rematerialize any flrps, so we only
567 * need to do this lowering once.
572 NIR_PASS(progress
, nir
, nir_opt_undef
);
573 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
576 nir_var_function_temp
);
579 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
580 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
581 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
583 /* Take us out of SSA */
584 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
585 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
589 bifrost_compile_shader_nir(nir_shader
*nir
, bifrost_program
*program
, unsigned product_id
)
591 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
593 ctx
->stage
= nir
->info
.stage
;
594 ctx
->quirks
= bifrost_get_quirks(product_id
);
595 list_inithead(&ctx
->blocks
);
597 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
598 * (so we don't accidentally duplicate the epilogue since mesa/st has
599 * messed with our I/O quite a bit already) */
601 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
603 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
604 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
605 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
608 NIR_PASS_V(nir
, nir_split_var_copies
);
609 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
610 NIR_PASS_V(nir
, nir_lower_var_copies
);
611 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
612 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
613 NIR_PASS_V(nir
, nir_lower_ssbo
);
615 bi_optimize_nir(nir
);
616 nir_print_shader(nir
, stdout
);
618 nir_foreach_function(func
, nir
) {
622 ctx
->impl
= func
->impl
;
623 emit_cf_list(ctx
, &func
->impl
->body
);
624 break; /* TODO: Multi-function shaders */
627 bi_print_shader(ctx
, stdout
);