2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "main/imports.h"
31 #include "compiler/nir/nir_builder.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
36 #include "bi_quirks.h"
39 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
40 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
41 static void bi_block_add_successor(bi_block
*block
, bi_block
*successor
);
42 static void bi_schedule_barrier(bi_context
*ctx
);
45 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
47 bi_instruction
*branch
= bi_emit_branch(ctx
);
49 switch (instr
->type
) {
51 branch
->branch
.target
= ctx
->break_block
;
53 case nir_jump_continue
:
54 branch
->branch
.target
= ctx
->continue_block
;
57 unreachable("Unhandled jump type");
60 bi_block_add_successor(ctx
->current_block
, branch
->branch
.target
);
64 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
66 bi_instruction ins
= {
70 .location
= nir_intrinsic_base(instr
),
71 .channels
= instr
->num_components
,
73 .interp_mode
= BIFROST_INTERP_DEFAULT
, /* TODO */
74 .reuse
= false, /* TODO */
75 .flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
77 .dest
= bir_dest_index(&instr
->dest
),
78 .dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
),
81 nir_src
*offset
= nir_get_io_offset_src(instr
);
83 if (nir_src_is_const(*offset
))
84 ins
.load_vary
.load
.location
+= nir_src_as_uint(*offset
);
86 ins
.src
[0] = bir_src_index(offset
);
92 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
94 if (!ctx
->emitted_atest
) {
95 bi_instruction ins
= {
100 bi_schedule_barrier(ctx
);
101 ctx
->emitted_atest
= true;
104 bi_instruction blend
= {
106 .blend_location
= nir_intrinsic_base(instr
),
108 bir_src_index(&instr
->src
[0])
113 bi_schedule_barrier(ctx
);
116 static struct bi_load
117 bi_direct_load_for_instr(nir_intrinsic_instr
*instr
)
119 nir_src
*offset
= nir_get_io_offset_src(instr
);
120 assert(nir_src_is_const(*offset
)); /* no indirects */
122 struct bi_load load
= {
123 .location
= nir_intrinsic_base(instr
) + nir_src_as_uint(*offset
),
124 .channels
= instr
->num_components
131 bi_emit_ld_attr(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
133 bi_instruction load
= {
134 .type
= BI_LOAD_ATTR
,
135 .load
= bi_direct_load_for_instr(instr
),
136 .dest
= bir_dest_index(&instr
->dest
),
137 .dest_type
= nir_intrinsic_type(instr
)
144 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
146 nir_src
*offset
= nir_get_io_offset_src(instr
);
147 assert(nir_src_is_const(*offset
)); /* no indirects */
149 bi_instruction address
= {
150 .type
= BI_LOAD_VAR_ADDRESS
,
151 .load
= bi_direct_load_for_instr(instr
),
152 .dest_type
= nir_intrinsic_type(instr
),
153 .dest
= bi_make_temp(ctx
)
156 bi_instruction st
= {
157 .type
= BI_STORE_VAR
,
160 bir_src_index(&instr
->src
[0])
164 bi_emit(ctx
, address
);
169 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
171 /* TODO: Indirect access */
173 bi_instruction ld
= {
174 .type
= BI_LOAD_UNIFORM
,
175 .load
= bi_direct_load_for_instr(instr
),
176 .dest
= bir_dest_index(&instr
->dest
),
177 .dest_type
= nir_intrinsic_type(instr
),
179 BIR_INDEX_ZERO
/* TODO: UBOs */
187 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
190 switch (instr
->intrinsic
) {
191 case nir_intrinsic_load_barycentric_pixel
:
194 case nir_intrinsic_load_interpolated_input
:
195 case nir_intrinsic_load_input
:
196 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
197 bi_emit_ld_vary(ctx
, instr
);
198 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
199 bi_emit_ld_attr(ctx
, instr
);
201 unreachable("Unsupported shader stage");
205 case nir_intrinsic_store_output
:
206 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
207 bi_emit_frag_out(ctx
, instr
);
208 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
209 bi_emit_st_vary(ctx
, instr
);
211 unreachable("Unsupported shader stage");
214 case nir_intrinsic_load_uniform
:
215 bi_emit_ld_uniform(ctx
, instr
);
225 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
227 switch (instr
->type
) {
229 case nir_instr_type_load_const
:
230 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
234 case nir_instr_type_intrinsic
:
235 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
239 case nir_instr_type_alu
:
240 emit_alu(ctx
, nir_instr_as_alu(instr
));
243 case nir_instr_type_tex
:
244 emit_tex(ctx
, nir_instr_as_tex(instr
));
248 case nir_instr_type_jump
:
249 emit_jump(ctx
, nir_instr_as_jump(instr
));
252 case nir_instr_type_ssa_undef
:
257 //unreachable("Unhandled instruction type");
265 create_empty_block(bi_context
*ctx
)
267 bi_block
*blk
= rzalloc(ctx
, bi_block
);
269 blk
->predecessors
= _mesa_set_create(blk
,
271 _mesa_key_pointer_equal
);
273 blk
->name
= ctx
->block_name_count
++;
279 bi_block_add_successor(bi_block
*block
, bi_block
*successor
)
284 for (unsigned i
= 0; i
< ARRAY_SIZE(block
->successors
); ++i
) {
285 if (block
->successors
[i
]) {
286 if (block
->successors
[i
] == successor
)
292 block
->successors
[i
] = successor
;
293 _mesa_set_add(successor
->predecessors
, block
);
297 unreachable("Too many successors");
301 bi_schedule_barrier(bi_context
*ctx
)
303 bi_block
*temp
= ctx
->after_block
;
304 ctx
->after_block
= create_empty_block(ctx
);
305 list_addtail(&ctx
->after_block
->link
, &ctx
->blocks
);
306 list_inithead(&ctx
->after_block
->instructions
);
307 bi_block_add_successor(ctx
->current_block
, ctx
->after_block
);
308 ctx
->current_block
= ctx
->after_block
;
309 ctx
->after_block
= temp
;
313 emit_block(bi_context
*ctx
, nir_block
*block
)
315 if (ctx
->after_block
) {
316 ctx
->current_block
= ctx
->after_block
;
317 ctx
->after_block
= NULL
;
319 ctx
->current_block
= create_empty_block(ctx
);
322 list_addtail(&ctx
->current_block
->link
, &ctx
->blocks
);
323 list_inithead(&ctx
->current_block
->instructions
);
325 nir_foreach_instr(instr
, block
) {
326 emit_instr(ctx
, instr
);
327 ++ctx
->instruction_count
;
330 return ctx
->current_block
;
333 /* Emits an unconditional branch to the end of the current block, returning a
334 * pointer so the user can fill in details */
336 static bi_instruction
*
337 bi_emit_branch(bi_context
*ctx
)
339 bi_instruction branch
= {
342 .cond
= BI_COND_ALWAYS
346 return bi_emit(ctx
, branch
);
349 /* Sets a condition for a branch by examing the NIR condition. If we're
350 * familiar with the condition, we unwrap it to fold it into the branch
351 * instruction. Otherwise, we consume the condition directly. We
352 * generally use 1-bit booleans which allows us to use small types for
357 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
359 /* TODO: Try to unwrap instead of always bailing */
360 branch
->src
[0] = bir_src_index(cond
);
361 branch
->src
[1] = BIR_INDEX_ZERO
;
362 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
363 branch
->branch
.cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
367 emit_if(bi_context
*ctx
, nir_if
*nif
)
369 bi_block
*before_block
= ctx
->current_block
;
371 /* Speculatively emit the branch, but we can't fill it in until later */
372 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
373 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
375 /* Emit the two subblocks. */
376 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
377 bi_block
*end_then_block
= ctx
->current_block
;
379 /* Emit a jump from the end of the then block to the end of the else */
380 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
382 /* Emit second block, and check if it's empty */
384 int count_in
= ctx
->instruction_count
;
385 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
386 bi_block
*end_else_block
= ctx
->current_block
;
387 ctx
->after_block
= create_empty_block(ctx
);
389 /* Now that we have the subblocks emitted, fix up the branches */
394 if (ctx
->instruction_count
== count_in
) {
395 /* The else block is empty, so don't emit an exit jump */
396 bi_remove_instruction(then_exit
);
397 then_branch
->branch
.target
= ctx
->after_block
;
399 then_branch
->branch
.target
= else_block
;
400 then_exit
->branch
.target
= ctx
->after_block
;
401 bi_block_add_successor(end_then_block
, then_exit
->branch
.target
);
404 /* Wire up the successors */
406 bi_block_add_successor(before_block
, then_branch
->branch
.target
); /* then_branch */
408 bi_block_add_successor(before_block
, then_block
); /* fallthrough */
409 bi_block_add_successor(end_else_block
, ctx
->after_block
); /* fallthrough */
413 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
415 /* Remember where we are */
416 bi_block
*start_block
= ctx
->current_block
;
418 bi_block
*saved_break
= ctx
->break_block
;
419 bi_block
*saved_continue
= ctx
->continue_block
;
421 ctx
->continue_block
= create_empty_block(ctx
);
422 ctx
->break_block
= create_empty_block(ctx
);
423 ctx
->after_block
= ctx
->continue_block
;
425 /* Emit the body itself */
426 emit_cf_list(ctx
, &nloop
->body
);
428 /* Branch back to loop back */
429 bi_instruction
*br_back
= bi_emit_branch(ctx
);
430 br_back
->branch
.target
= ctx
->continue_block
;
431 bi_block_add_successor(start_block
, ctx
->continue_block
);
432 bi_block_add_successor(ctx
->current_block
, ctx
->continue_block
);
434 ctx
->after_block
= ctx
->break_block
;
437 ctx
->break_block
= saved_break
;
438 ctx
->continue_block
= saved_continue
;
443 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
445 bi_block
*start_block
= NULL
;
447 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
448 switch (node
->type
) {
449 case nir_cf_node_block
: {
450 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
459 emit_if(ctx
, nir_cf_node_as_if(node
));
462 case nir_cf_node_loop
:
463 emit_loop(ctx
, nir_cf_node_as_loop(node
));
467 unreachable("Unknown control flow");
475 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
477 return glsl_count_attribute_slots(type
, false);
481 bi_optimize_nir(nir_shader
*nir
)
484 unsigned lower_flrp
= 16 | 32 | 64;
486 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
487 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
489 nir_lower_tex_options lower_tex_options
= {
490 .lower_txs_lod
= true,
492 .lower_tex_without_implicit_lod
= true,
496 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
501 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
502 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
504 NIR_PASS(progress
, nir
, nir_copy_prop
);
505 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
506 NIR_PASS(progress
, nir
, nir_opt_dce
);
507 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
508 NIR_PASS(progress
, nir
, nir_opt_cse
);
509 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
510 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
511 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
513 if (lower_flrp
!= 0) {
514 bool lower_flrp_progress
= false;
515 NIR_PASS(lower_flrp_progress
,
519 false /* always_precise */,
520 nir
->options
->lower_ffma
);
521 if (lower_flrp_progress
) {
522 NIR_PASS(progress
, nir
,
523 nir_opt_constant_folding
);
527 /* Nothing should rematerialize any flrps, so we only
528 * need to do this lowering once.
533 NIR_PASS(progress
, nir
, nir_opt_undef
);
534 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
537 nir_var_function_temp
);
540 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
542 /* Take us out of SSA */
543 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
544 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
548 bifrost_compile_shader_nir(nir_shader
*nir
, bifrost_program
*program
, unsigned product_id
)
550 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
552 ctx
->stage
= nir
->info
.stage
;
553 ctx
->quirks
= bifrost_get_quirks(product_id
);
554 list_inithead(&ctx
->blocks
);
556 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
557 * (so we don't accidentally duplicate the epilogue since mesa/st has
558 * messed with our I/O quite a bit already) */
560 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
562 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
563 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
564 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
567 NIR_PASS_V(nir
, nir_split_var_copies
);
568 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
569 NIR_PASS_V(nir
, nir_lower_var_copies
);
570 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
571 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
572 NIR_PASS_V(nir
, nir_lower_ssbo
);
574 /* We have to lower ALU to scalar ourselves since viewport
575 * transformations produce vector ops */
576 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
578 bi_optimize_nir(nir
);
579 nir_print_shader(nir
, stdout
);
581 nir_foreach_function(func
, nir
) {
585 ctx
->impl
= func
->impl
;
586 emit_cf_list(ctx
, &func
->impl
->body
);
587 break; /* TODO: Multi-function shaders */
590 bi_print_shader(ctx
, stdout
);