2 * Copyright (C) 2020 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors (Collabora):
24 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
27 #include "main/mtypes.h"
28 #include "compiler/glsl/glsl_to_nir.h"
29 #include "compiler/nir_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_debug.h"
33 #include "disassemble.h"
34 #include "bifrost_compile.h"
35 #include "bifrost_nir.h"
37 #include "bi_quirks.h"
40 static const struct debug_named_value debug_options
[] = {
41 {"msgs", BIFROST_DBG_MSGS
, "Print debug messages"},
42 {"shaders", BIFROST_DBG_SHADERS
, "Dump shaders in NIR and MIR"},
46 DEBUG_GET_ONCE_FLAGS_OPTION(bifrost_debug
, "BIFROST_MESA_DEBUG", debug_options
, 0)
48 int bifrost_debug
= 0;
50 #define DBG(fmt, ...) \
51 do { if (bifrost_debug & BIFROST_DBG_MSGS) \
52 fprintf(stderr, "%s:%d: "fmt, \
53 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
55 static bi_block
*emit_cf_list(bi_context
*ctx
, struct exec_list
*list
);
56 static bi_instruction
*bi_emit_branch(bi_context
*ctx
);
59 emit_jump(bi_context
*ctx
, nir_jump_instr
*instr
)
61 bi_instruction
*branch
= bi_emit_branch(ctx
);
63 switch (instr
->type
) {
65 branch
->branch_target
= ctx
->break_block
;
67 case nir_jump_continue
:
68 branch
->branch_target
= ctx
->continue_block
;
71 unreachable("Unhandled jump type");
74 pan_block_add_successor(&ctx
->current_block
->base
, &branch
->branch_target
->base
);
78 bi_load(enum bi_class T
, nir_intrinsic_instr
*instr
)
80 bi_instruction load
= {
82 .vector_channels
= instr
->num_components
,
83 .src
= { BIR_INDEX_CONSTANT
},
84 .src_types
= { nir_type_uint32
},
85 .constant
= { .u64
= nir_intrinsic_base(instr
) },
88 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
91 load
.dest
= pan_dest_index(&instr
->dest
);
93 if (info
->has_dest
&& info
->index_map
[NIR_INTRINSIC_TYPE
] > 0)
94 load
.dest_type
= nir_intrinsic_type(instr
);
96 nir_src
*offset
= nir_get_io_offset_src(instr
);
98 if (nir_src_is_const(*offset
))
99 load
.constant
.u64
+= nir_src_as_uint(*offset
);
101 load
.src
[0] = pan_src_index(offset
);
107 bi_emit_ld_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
109 bi_instruction ins
= bi_load(BI_LOAD_VAR
, instr
);
110 ins
.load_vary
.interp_mode
= BIFROST_INTERP_DEFAULT
; /* TODO */
111 ins
.load_vary
.reuse
= false; /* TODO */
112 ins
.load_vary
.flat
= instr
->intrinsic
!= nir_intrinsic_load_interpolated_input
;
113 ins
.dest_type
= nir_type_float
| nir_dest_bit_size(instr
->dest
);
115 if (nir_src_is_const(*nir_get_io_offset_src(instr
))) {
116 /* Zero it out for direct */
117 ins
.src
[1] = BIR_INDEX_ZERO
;
119 /* R61 contains sample mask stuff, TODO RA XXX */
120 ins
.src
[1] = BIR_INDEX_REGISTER
| 61;
127 bi_emit_frag_out(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
129 if (!ctx
->emitted_atest
) {
130 bi_instruction ins
= {
133 BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
134 pan_src_index(&instr
->src
[0])
138 nir_intrinsic_type(instr
)
142 { 3, 0 } /* swizzle out the alpha */
144 .dest
= BIR_INDEX_REGISTER
| 60 /* TODO: RA */,
145 .dest_type
= nir_type_uint32
,
149 ctx
->emitted_atest
= true;
152 bi_instruction blend
= {
154 .blend_location
= nir_intrinsic_base(instr
),
156 pan_src_index(&instr
->src
[0]),
157 BIR_INDEX_REGISTER
| 60 /* Can this be arbitrary? */,
160 nir_intrinsic_type(instr
),
167 .dest
= BIR_INDEX_REGISTER
| 48 /* Looks like magic */,
168 .dest_type
= nir_type_uint32
,
172 assert(blend
.blend_location
< BIFROST_MAX_RENDER_TARGET_COUNT
);
173 assert(ctx
->blend_types
);
174 assert(blend
.src_types
[0]);
175 ctx
->blend_types
[blend
.blend_location
] = blend
.src_types
[0];
180 static bi_instruction
181 bi_load_with_r61(enum bi_class T
, nir_intrinsic_instr
*instr
)
183 bi_instruction ld
= bi_load(T
, instr
);
184 ld
.src
[1] = BIR_INDEX_REGISTER
| 61; /* TODO: RA */
185 ld
.src
[2] = BIR_INDEX_REGISTER
| 62;
187 ld
.src_types
[1] = nir_type_uint32
;
188 ld
.src_types
[2] = nir_type_uint32
;
189 ld
.src_types
[3] = nir_intrinsic_type(instr
);
194 bi_emit_st_vary(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
196 bi_instruction address
= bi_load_with_r61(BI_LOAD_VAR_ADDRESS
, instr
);
197 address
.dest
= bi_make_temp(ctx
);
198 address
.dest_type
= nir_type_uint32
;
199 address
.vector_channels
= 3;
201 unsigned nr
= nir_intrinsic_src_components(instr
, 0);
202 assert(nir_intrinsic_write_mask(instr
) == ((1 << nr
) - 1));
204 bi_instruction st
= {
205 .type
= BI_STORE_VAR
,
207 pan_src_index(&instr
->src
[0]),
208 address
.dest
, address
.dest
, address
.dest
,
212 nir_type_uint32
, nir_type_uint32
, nir_type_uint32
,
218 .vector_channels
= nr
,
221 for (unsigned i
= 0; i
< nr
; ++i
)
222 st
.swizzle
[0][i
] = i
;
224 bi_emit(ctx
, address
);
229 bi_emit_ld_uniform(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
231 bi_instruction ld
= bi_load(BI_LOAD_UNIFORM
, instr
);
232 ld
.src
[1] = BIR_INDEX_ZERO
; /* TODO: UBO index */
234 /* TODO: Indirect access, since we need to multiply by the element
235 * size. I believe we can get this lowering automatically via
236 * nir_lower_io (as mul instructions) with the proper options, but this
238 assert(ld
.src
[0] & BIR_INDEX_CONSTANT
);
239 ld
.constant
.u64
+= ctx
->sysvals
.sysval_count
;
240 ld
.constant
.u64
*= 16;
246 bi_emit_sysval(bi_context
*ctx
, nir_instr
*instr
,
247 unsigned nr_components
, unsigned offset
)
251 /* Figure out which uniform this is */
252 int sysval
= panfrost_sysval_for_instr(instr
, &nir_dest
);
253 void *val
= _mesa_hash_table_u64_search(ctx
->sysvals
.sysval_to_id
, sysval
);
255 /* Sysvals are prefix uniforms */
256 unsigned uniform
= ((uintptr_t) val
) - 1;
258 /* Emit the read itself -- this is never indirect */
260 bi_instruction load
= {
261 .type
= BI_LOAD_UNIFORM
,
262 .vector_channels
= nr_components
,
263 .src
= { BIR_INDEX_CONSTANT
, BIR_INDEX_ZERO
},
264 .src_types
= { nir_type_uint32
, nir_type_uint32
},
265 .constant
= { (uniform
* 16) + offset
},
266 .dest
= pan_dest_index(&nir_dest
),
267 .dest_type
= nir_type_uint32
, /* TODO */
273 /* gl_FragCoord.xy = u16_to_f32(R59.xy) + 0.5
274 * gl_FragCoord.z = ld_vary(fragz)
275 * gl_FragCoord.w = ld_vary(fragw)
279 bi_emit_ld_frag_coord(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
281 /* Future proofing for mediump fragcoord at some point.. */
282 nir_alu_type T
= nir_type_float32
;
284 /* First, sketch a combine */
285 bi_instruction combine
= {
287 .dest_type
= nir_type_uint32
,
288 .dest
= pan_dest_index(&instr
->dest
),
289 .src_types
= { T
, T
, T
, T
},
292 /* Second, handle xy */
293 for (unsigned i
= 0; i
< 2; ++i
) {
294 bi_instruction conv
= {
297 .dest
= bi_make_temp(ctx
),
300 BIR_INDEX_REGISTER
| 59
302 .src_types
= { nir_type_uint16
},
306 bi_instruction add
= {
309 .dest
= bi_make_temp(ctx
),
310 .src
= { conv
.dest
, BIR_INDEX_CONSTANT
},
311 .src_types
= { T
, T
},
315 memcpy(&add
.constant
.u32
, &half
, sizeof(float));
320 combine
.src
[i
] = add
.dest
;
324 for (unsigned i
= 0; i
< 2; ++i
) {
325 bi_instruction load
= {
328 .interp_mode
= BIFROST_INTERP_DEFAULT
,
332 .vector_channels
= 1,
333 .dest_type
= nir_type_float32
,
334 .dest
= bi_make_temp(ctx
),
335 .src
= { BIR_INDEX_CONSTANT
, BIR_INDEX_ZERO
},
336 .src_types
= { nir_type_uint32
, nir_type_uint32
},
338 .u32
= (i
== 0) ? BIFROST_FRAGZ
: BIFROST_FRAGW
344 combine
.src
[i
+ 2] = load
.dest
;
347 /* Finally, emit the combine */
348 bi_emit(ctx
, combine
);
352 bi_emit_discard(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
355 bi_instruction discard
= {
358 .src_types
= { nir_type_uint32
, nir_type_uint32
},
359 .src
= { BIR_INDEX_ZERO
, BIR_INDEX_ZERO
},
362 bi_emit(ctx
, discard
);
366 bi_fuse_cond(bi_instruction
*csel
, nir_alu_src cond
,
367 unsigned *constants_left
, unsigned *constant_shift
,
368 unsigned comps
, bool float_only
);
371 bi_emit_discard_if(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
373 nir_src cond
= instr
->src
[0];
374 nir_alu_type T
= nir_type_uint
| nir_src_bit_size(cond
);
376 bi_instruction discard
= {
379 .src_types
= { T
, T
},
381 pan_src_index(&cond
),
386 /* Try to fuse in the condition */
387 unsigned constants_left
= 1, constant_shift
= 0;
389 /* Scalar so no swizzle */
394 /* May or may not succeed but we're optimistic */
395 bi_fuse_cond(&discard
, wrap
, &constants_left
, &constant_shift
, 1, true);
397 bi_emit(ctx
, discard
);
401 emit_intrinsic(bi_context
*ctx
, nir_intrinsic_instr
*instr
)
404 switch (instr
->intrinsic
) {
405 case nir_intrinsic_load_barycentric_pixel
:
408 case nir_intrinsic_load_interpolated_input
:
409 case nir_intrinsic_load_input
:
410 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
411 bi_emit_ld_vary(ctx
, instr
);
412 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
413 bi_emit(ctx
, bi_load_with_r61(BI_LOAD_ATTR
, instr
));
415 unreachable("Unsupported shader stage");
419 case nir_intrinsic_store_output
:
420 if (ctx
->stage
== MESA_SHADER_FRAGMENT
)
421 bi_emit_frag_out(ctx
, instr
);
422 else if (ctx
->stage
== MESA_SHADER_VERTEX
)
423 bi_emit_st_vary(ctx
, instr
);
425 unreachable("Unsupported shader stage");
428 case nir_intrinsic_load_uniform
:
429 bi_emit_ld_uniform(ctx
, instr
);
432 case nir_intrinsic_load_frag_coord
:
433 bi_emit_ld_frag_coord(ctx
, instr
);
436 case nir_intrinsic_discard
:
437 bi_emit_discard(ctx
, instr
);
440 case nir_intrinsic_discard_if
:
441 bi_emit_discard_if(ctx
, instr
);
444 case nir_intrinsic_load_ssbo_address
:
445 bi_emit_sysval(ctx
, &instr
->instr
, 1, 0);
448 case nir_intrinsic_get_buffer_size
:
449 bi_emit_sysval(ctx
, &instr
->instr
, 1, 8);
452 case nir_intrinsic_load_viewport_scale
:
453 case nir_intrinsic_load_viewport_offset
:
454 case nir_intrinsic_load_num_work_groups
:
455 case nir_intrinsic_load_sampler_lod_parameters_pan
:
456 bi_emit_sysval(ctx
, &instr
->instr
, 3, 0);
460 unreachable("Unknown intrinsic");
466 emit_load_const(bi_context
*ctx
, nir_load_const_instr
*instr
)
468 /* Make sure we've been lowered */
469 assert(instr
->def
.num_components
== 1);
471 bi_instruction move
= {
473 .dest
= pan_ssa_index(&instr
->def
),
474 .dest_type
= instr
->def
.bit_size
| nir_type_uint
,
479 instr
->def
.bit_size
| nir_type_uint
,
482 .u64
= nir_const_value_as_uint(instr
->value
[0], instr
->def
.bit_size
)
489 #define BI_CASE_CMP(op) \
495 bi_class_for_nir_alu(nir_op op
)
511 BI_CASE_CMP(nir_op_flt
)
512 BI_CASE_CMP(nir_op_fge
)
513 BI_CASE_CMP(nir_op_feq
)
514 BI_CASE_CMP(nir_op_fne
)
515 BI_CASE_CMP(nir_op_ilt
)
516 BI_CASE_CMP(nir_op_ige
)
517 BI_CASE_CMP(nir_op_ieq
)
518 BI_CASE_CMP(nir_op_ine
)
559 unreachable("should've been lowered");
580 case nir_op_fround_even
:
591 unreachable("Unknown ALU op");
595 /* Gets a bi_cond for a given NIR comparison opcode. In soft mode, it will
596 * return BI_COND_ALWAYS as a sentinel if it fails to do so (when used for
597 * optimizations). Otherwise it will bail (when used for primary code
601 bi_cond_for_nir(nir_op op
, bool soft
)
604 BI_CASE_CMP(nir_op_flt
)
605 BI_CASE_CMP(nir_op_ilt
)
608 BI_CASE_CMP(nir_op_fge
)
609 BI_CASE_CMP(nir_op_ige
)
612 BI_CASE_CMP(nir_op_feq
)
613 BI_CASE_CMP(nir_op_ieq
)
616 BI_CASE_CMP(nir_op_fne
)
617 BI_CASE_CMP(nir_op_ine
)
621 return BI_COND_ALWAYS
;
623 unreachable("Invalid compare");
628 bi_copy_src(bi_instruction
*alu
, nir_alu_instr
*instr
, unsigned i
, unsigned to
,
629 unsigned *constants_left
, unsigned *constant_shift
, unsigned comps
)
631 unsigned bits
= nir_src_bit_size(instr
->src
[i
].src
);
632 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
634 alu
->src_types
[to
] = nir_op_infos
[instr
->op
].input_types
[i
]
637 /* Try to inline a constant */
638 if (nir_src_is_const(instr
->src
[i
].src
) && *constants_left
&& (dest_bits
== bits
)) {
639 uint64_t mask
= (1ull << dest_bits
) - 1;
640 uint64_t cons
= nir_src_as_uint(instr
->src
[i
].src
);
642 /* Try to reuse a constant */
643 for (unsigned i
= 0; i
< (*constant_shift
); i
+= dest_bits
) {
644 if (((alu
->constant
.u64
>> i
) & mask
) == cons
) {
645 alu
->src
[to
] = BIR_INDEX_CONSTANT
| i
;
650 alu
->constant
.u64
|= cons
<< *constant_shift
;
651 alu
->src
[to
] = BIR_INDEX_CONSTANT
| (*constant_shift
);
653 (*constant_shift
) += MAX2(dest_bits
, 32); /* lo/hi */
657 alu
->src
[to
] = pan_src_index(&instr
->src
[i
].src
);
659 /* Copy swizzle for all vectored components, replicating last component
660 * to fill undersized */
662 unsigned vec
= alu
->type
== BI_COMBINE
? 1 :
663 MAX2(1, 32 / dest_bits
);
665 for (unsigned j
= 0; j
< vec
; ++j
)
666 alu
->swizzle
[to
][j
] = instr
->src
[i
].swizzle
[MIN2(j
, comps
- 1)];
670 bi_fuse_cond(bi_instruction
*csel
, nir_alu_src cond
,
671 unsigned *constants_left
, unsigned *constant_shift
,
672 unsigned comps
, bool float_only
)
674 /* Bail for vector weirdness */
675 if (cond
.swizzle
[0] != 0)
678 if (!cond
.src
.is_ssa
)
681 nir_ssa_def
*def
= cond
.src
.ssa
;
682 nir_instr
*parent
= def
->parent_instr
;
684 if (parent
->type
!= nir_instr_type_alu
)
687 nir_alu_instr
*alu
= nir_instr_as_alu(parent
);
689 /* Try to match a condition */
690 enum bi_cond bcond
= bi_cond_for_nir(alu
->op
, true);
692 if (bcond
== BI_COND_ALWAYS
)
695 /* Some instructions can't compare ints */
697 nir_alu_type T
= nir_op_infos
[alu
->op
].input_types
[0];
698 T
= nir_alu_type_get_base_type(T
);
700 if (T
!= nir_type_float
)
704 /* We found one, let's fuse it in */
706 bi_copy_src(csel
, alu
, 0, 0, constants_left
, constant_shift
, comps
);
707 bi_copy_src(csel
, alu
, 1, 1, constants_left
, constant_shift
, comps
);
711 emit_alu(bi_context
*ctx
, nir_alu_instr
*instr
)
713 /* Try some special functions */
716 bi_emit_fexp2(ctx
, instr
);
719 bi_emit_flog2(ctx
, instr
);
725 /* Otherwise, assume it's something we can handle normally */
726 bi_instruction alu
= {
727 .type
= bi_class_for_nir_alu(instr
->op
),
728 .dest
= pan_dest_index(&instr
->dest
.dest
),
729 .dest_type
= nir_op_infos
[instr
->op
].output_type
730 | nir_dest_bit_size(instr
->dest
.dest
),
733 /* TODO: Implement lowering of special functions for older Bifrost */
734 assert((alu
.type
!= BI_SPECIAL
) || !(ctx
->quirks
& BIFROST_NO_FAST_OP
));
736 unsigned comps
= nir_dest_num_components(instr
->dest
.dest
);
738 if (alu
.type
!= BI_COMBINE
)
739 assert(comps
<= MAX2(1, 32 / comps
));
741 if (!instr
->dest
.dest
.is_ssa
) {
742 for (unsigned i
= 0; i
< comps
; ++i
)
743 assert(instr
->dest
.write_mask
);
746 /* We inline constants as we go. This tracks how many constants have
747 * been inlined, since we're limited to 64-bits of constants per
750 unsigned dest_bits
= nir_dest_bit_size(instr
->dest
.dest
);
751 unsigned constants_left
= (64 / dest_bits
);
752 unsigned constant_shift
= 0;
754 if (alu
.type
== BI_COMBINE
)
759 unsigned num_inputs
= nir_op_infos
[instr
->op
].num_inputs
;
760 assert(num_inputs
<= ARRAY_SIZE(alu
.src
));
762 for (unsigned i
= 0; i
< num_inputs
; ++i
) {
765 if (i
&& alu
.type
== BI_CSEL
)
768 bi_copy_src(&alu
, instr
, i
, i
+ f
, &constants_left
, &constant_shift
, comps
);
771 /* Op-specific fixup */
774 alu
.src
[2] = BIR_INDEX_ZERO
; /* FMA */
775 alu
.src_types
[2] = alu
.src_types
[1];
778 alu
.outmod
= BIFROST_SAT
; /* FMOV */
781 alu
.src_neg
[0] = true; /* FMOV */
784 alu
.src_abs
[0] = true; /* FMOV */
787 alu
.src_neg
[1] = true; /* FADD */
790 alu
.op
.imath
= BI_IMATH_ADD
;
793 alu
.op
.imath
= BI_IMATH_SUB
;
798 alu
.op
.minmax
= BI_MINMAX_MAX
; /* MINMAX */
801 alu
.op
.special
= BI_SPECIAL_FRCP
;
804 alu
.op
.special
= BI_SPECIAL_FRSQ
;
806 BI_CASE_CMP(nir_op_flt
)
807 BI_CASE_CMP(nir_op_ilt
)
808 BI_CASE_CMP(nir_op_fge
)
809 BI_CASE_CMP(nir_op_ige
)
810 BI_CASE_CMP(nir_op_feq
)
811 BI_CASE_CMP(nir_op_ieq
)
812 BI_CASE_CMP(nir_op_fne
)
813 BI_CASE_CMP(nir_op_ine
)
814 alu
.cond
= bi_cond_for_nir(instr
->op
, false);
816 case nir_op_fround_even
:
817 alu
.roundmode
= BIFROST_RTE
;
820 alu
.roundmode
= BIFROST_RTP
;
823 alu
.roundmode
= BIFROST_RTN
;
826 alu
.roundmode
= BIFROST_RTZ
;
829 alu
.op
.bitwise
= BI_BITWISE_AND
;
832 alu
.op
.bitwise
= BI_BITWISE_OR
;
835 alu
.op
.bitwise
= BI_BITWISE_XOR
;
841 if (alu
.type
== BI_CSEL
) {
842 /* Default to csel3 */
843 alu
.cond
= BI_COND_NE
;
844 alu
.src
[1] = BIR_INDEX_ZERO
;
845 alu
.src_types
[1] = alu
.src_types
[0];
847 /* TODO: Reenable cond fusing when we can split up registers
850 bi_fuse_cond(&alu
, instr
->src
[0],
851 &constants_left
, &constant_shift
, comps
, false);
853 } else if (alu
.type
== BI_BITWISE
) {
854 /* Implicit shift argument... at some point we should fold */
855 alu
.src
[2] = BIR_INDEX_ZERO
;
856 alu
.src_types
[2] = alu
.src_types
[1];
862 /* TEX_COMPACT instructions assume normal 2D f32 operation but are more
863 * space-efficient and with simpler RA/scheduling requirements*/
866 emit_tex_compact(bi_context
*ctx
, nir_tex_instr
*instr
)
868 bi_instruction tex
= {
870 .op
= { .texture
= BI_TEX_COMPACT
},
872 .texture_index
= instr
->texture_index
,
873 .sampler_index
= instr
->sampler_index
,
875 .dest
= pan_dest_index(&instr
->dest
),
876 .dest_type
= instr
->dest_type
,
877 .src_types
= { nir_type_float32
, nir_type_float32
},
881 for (unsigned i
= 0; i
< instr
->num_srcs
; ++i
) {
882 int index
= pan_src_index(&instr
->src
[i
].src
);
884 /* We were checked ahead-of-time */
885 if (instr
->src
[i
].src_type
== nir_tex_src_lod
)
888 assert (instr
->src
[i
].src_type
== nir_tex_src_coord
);
892 tex
.swizzle
[0][0] = 0;
893 tex
.swizzle
[1][0] = 1;
900 emit_tex_full(bi_context
*ctx
, nir_tex_instr
*instr
)
905 /* Normal textures ops are tex for frag shaders and txl for vertex shaders with
906 * lod a constant 0. Anything else needs a full texture op. */
909 bi_is_normal_tex(gl_shader_stage stage
, nir_tex_instr
*instr
)
911 if (stage
== MESA_SHADER_FRAGMENT
)
912 return instr
->op
== nir_texop_tex
;
914 if (instr
->op
!= nir_texop_txl
)
917 for (unsigned i
= 0; i
< instr
->num_srcs
; ++i
) {
918 if (instr
->src
[i
].src_type
!= nir_tex_src_lod
)
921 nir_src src
= instr
->src
[i
].src
;
923 if (!nir_src_is_const(src
))
926 if (nir_src_as_uint(src
) != 0)
934 emit_tex(bi_context
*ctx
, nir_tex_instr
*instr
)
936 nir_alu_type base
= nir_alu_type_get_base_type(instr
->dest_type
);
937 unsigned sz
= nir_dest_bit_size(instr
->dest
);
938 instr
->dest_type
= base
| sz
;
940 bool is_normal
= bi_is_normal_tex(ctx
->stage
, instr
);
941 bool is_2d
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
942 instr
->sampler_dim
== GLSL_SAMPLER_DIM_EXTERNAL
;
943 bool is_f
= base
== nir_type_float
&& (sz
== 16 || sz
== 32);
945 bool is_compact
= is_normal
&& is_2d
&& is_f
&& !instr
->is_shadow
;
948 emit_tex_compact(ctx
, instr
);
950 emit_tex_full(ctx
, instr
);
954 emit_instr(bi_context
*ctx
, struct nir_instr
*instr
)
956 switch (instr
->type
) {
957 case nir_instr_type_load_const
:
958 emit_load_const(ctx
, nir_instr_as_load_const(instr
));
961 case nir_instr_type_intrinsic
:
962 emit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
965 case nir_instr_type_alu
:
966 emit_alu(ctx
, nir_instr_as_alu(instr
));
969 case nir_instr_type_tex
:
970 emit_tex(ctx
, nir_instr_as_tex(instr
));
973 case nir_instr_type_jump
:
974 emit_jump(ctx
, nir_instr_as_jump(instr
));
977 case nir_instr_type_ssa_undef
:
982 unreachable("Unhandled instruction type");
990 create_empty_block(bi_context
*ctx
)
992 bi_block
*blk
= rzalloc(ctx
, bi_block
);
994 blk
->base
.predecessors
= _mesa_set_create(blk
,
996 _mesa_key_pointer_equal
);
998 blk
->base
.name
= ctx
->block_name_count
++;
1004 emit_block(bi_context
*ctx
, nir_block
*block
)
1006 if (ctx
->after_block
) {
1007 ctx
->current_block
= ctx
->after_block
;
1008 ctx
->after_block
= NULL
;
1010 ctx
->current_block
= create_empty_block(ctx
);
1013 list_addtail(&ctx
->current_block
->base
.link
, &ctx
->blocks
);
1014 list_inithead(&ctx
->current_block
->base
.instructions
);
1016 nir_foreach_instr(instr
, block
) {
1017 emit_instr(ctx
, instr
);
1018 ++ctx
->instruction_count
;
1021 return ctx
->current_block
;
1024 /* Emits an unconditional branch to the end of the current block, returning a
1025 * pointer so the user can fill in details */
1027 static bi_instruction
*
1028 bi_emit_branch(bi_context
*ctx
)
1030 bi_instruction branch
= {
1032 .cond
= BI_COND_ALWAYS
1035 return bi_emit(ctx
, branch
);
1038 /* Sets a condition for a branch by examing the NIR condition. If we're
1039 * familiar with the condition, we unwrap it to fold it into the branch
1040 * instruction. Otherwise, we consume the condition directly. We
1041 * generally use 1-bit booleans which allows us to use small types for
1046 bi_set_branch_cond(bi_instruction
*branch
, nir_src
*cond
, bool invert
)
1048 /* TODO: Try to unwrap instead of always bailing */
1049 branch
->src
[0] = pan_src_index(cond
);
1050 branch
->src
[1] = BIR_INDEX_ZERO
;
1051 branch
->src_types
[0] = branch
->src_types
[1] = nir_type_uint16
;
1052 branch
->cond
= invert
? BI_COND_EQ
: BI_COND_NE
;
1056 emit_if(bi_context
*ctx
, nir_if
*nif
)
1058 bi_block
*before_block
= ctx
->current_block
;
1060 /* Speculatively emit the branch, but we can't fill it in until later */
1061 bi_instruction
*then_branch
= bi_emit_branch(ctx
);
1062 bi_set_branch_cond(then_branch
, &nif
->condition
, true);
1064 /* Emit the two subblocks. */
1065 bi_block
*then_block
= emit_cf_list(ctx
, &nif
->then_list
);
1066 bi_block
*end_then_block
= ctx
->current_block
;
1068 /* Emit a jump from the end of the then block to the end of the else */
1069 bi_instruction
*then_exit
= bi_emit_branch(ctx
);
1071 /* Emit second block, and check if it's empty */
1073 int count_in
= ctx
->instruction_count
;
1074 bi_block
*else_block
= emit_cf_list(ctx
, &nif
->else_list
);
1075 bi_block
*end_else_block
= ctx
->current_block
;
1076 ctx
->after_block
= create_empty_block(ctx
);
1078 /* Now that we have the subblocks emitted, fix up the branches */
1083 if (ctx
->instruction_count
== count_in
) {
1084 /* The else block is empty, so don't emit an exit jump */
1085 bi_remove_instruction(then_exit
);
1086 then_branch
->branch_target
= ctx
->after_block
;
1088 then_branch
->branch_target
= else_block
;
1089 then_exit
->branch_target
= ctx
->after_block
;
1090 pan_block_add_successor(&end_then_block
->base
, &then_exit
->branch_target
->base
);
1093 /* Wire up the successors */
1095 pan_block_add_successor(&before_block
->base
, &then_branch
->branch_target
->base
); /* then_branch */
1097 pan_block_add_successor(&before_block
->base
, &then_block
->base
); /* fallthrough */
1098 pan_block_add_successor(&end_else_block
->base
, &ctx
->after_block
->base
); /* fallthrough */
1102 emit_loop(bi_context
*ctx
, nir_loop
*nloop
)
1104 /* Remember where we are */
1105 bi_block
*start_block
= ctx
->current_block
;
1107 bi_block
*saved_break
= ctx
->break_block
;
1108 bi_block
*saved_continue
= ctx
->continue_block
;
1110 ctx
->continue_block
= create_empty_block(ctx
);
1111 ctx
->break_block
= create_empty_block(ctx
);
1112 ctx
->after_block
= ctx
->continue_block
;
1114 /* Emit the body itself */
1115 emit_cf_list(ctx
, &nloop
->body
);
1117 /* Branch back to loop back */
1118 bi_instruction
*br_back
= bi_emit_branch(ctx
);
1119 br_back
->branch_target
= ctx
->continue_block
;
1120 pan_block_add_successor(&start_block
->base
, &ctx
->continue_block
->base
);
1121 pan_block_add_successor(&ctx
->current_block
->base
, &ctx
->continue_block
->base
);
1123 ctx
->after_block
= ctx
->break_block
;
1126 ctx
->break_block
= saved_break
;
1127 ctx
->continue_block
= saved_continue
;
1132 emit_cf_list(bi_context
*ctx
, struct exec_list
*list
)
1134 bi_block
*start_block
= NULL
;
1136 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
1137 switch (node
->type
) {
1138 case nir_cf_node_block
: {
1139 bi_block
*block
= emit_block(ctx
, nir_cf_node_as_block(node
));
1142 start_block
= block
;
1147 case nir_cf_node_if
:
1148 emit_if(ctx
, nir_cf_node_as_if(node
));
1151 case nir_cf_node_loop
:
1152 emit_loop(ctx
, nir_cf_node_as_loop(node
));
1156 unreachable("Unknown control flow");
1164 glsl_type_size(const struct glsl_type
*type
, bool bindless
)
1166 return glsl_count_attribute_slots(type
, false);
1170 bi_optimize_nir(nir_shader
*nir
)
1173 unsigned lower_flrp
= 16 | 32 | 64;
1175 NIR_PASS(progress
, nir
, nir_lower_regs_to_ssa
);
1176 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_fast
);
1178 nir_lower_tex_options lower_tex_options
= {
1179 .lower_txs_lod
= true,
1181 .lower_tex_without_implicit_lod
= true,
1185 NIR_PASS(progress
, nir
, nir_lower_tex
, &lower_tex_options
);
1186 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1187 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
1192 NIR_PASS(progress
, nir
, nir_lower_var_copies
);
1193 NIR_PASS(progress
, nir
, nir_lower_vars_to_ssa
);
1195 NIR_PASS(progress
, nir
, nir_copy_prop
);
1196 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
1197 NIR_PASS(progress
, nir
, nir_opt_dce
);
1198 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
1199 NIR_PASS(progress
, nir
, nir_opt_cse
);
1200 NIR_PASS(progress
, nir
, nir_opt_peephole_select
, 64, false, true);
1201 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
1202 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
1204 if (lower_flrp
!= 0) {
1205 bool lower_flrp_progress
= false;
1206 NIR_PASS(lower_flrp_progress
,
1210 false /* always_precise */,
1211 nir
->options
->lower_ffma
);
1212 if (lower_flrp_progress
) {
1213 NIR_PASS(progress
, nir
,
1214 nir_opt_constant_folding
);
1218 /* Nothing should rematerialize any flrps, so we only
1219 * need to do this lowering once.
1224 NIR_PASS(progress
, nir
, nir_opt_undef
);
1225 NIR_PASS(progress
, nir
, nir_opt_loop_unroll
,
1227 nir_var_shader_out
|
1228 nir_var_function_temp
);
1231 NIR_PASS(progress
, nir
, nir_opt_algebraic_late
);
1232 NIR_PASS(progress
, nir
, nir_lower_bool_to_int32
);
1233 NIR_PASS(progress
, nir
, bifrost_nir_lower_algebraic_late
);
1234 NIR_PASS(progress
, nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
1235 NIR_PASS(progress
, nir
, nir_lower_load_const_to_scalar
);
1237 /* Take us out of SSA */
1238 NIR_PASS(progress
, nir
, nir_lower_locals_to_regs
);
1239 NIR_PASS(progress
, nir
, nir_move_vec_src_uses_to_dest
);
1240 NIR_PASS(progress
, nir
, nir_convert_from_ssa
, true);
1244 bifrost_compile_shader_nir(nir_shader
*nir
, panfrost_program
*program
, unsigned product_id
)
1246 bifrost_debug
= debug_get_option_bifrost_debug();
1248 bi_context
*ctx
= rzalloc(NULL
, bi_context
);
1250 ctx
->stage
= nir
->info
.stage
;
1251 ctx
->quirks
= bifrost_get_quirks(product_id
);
1252 list_inithead(&ctx
->blocks
);
1254 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
1255 * (so we don't accidentally duplicate the epilogue since mesa/st has
1256 * messed with our I/O quite a bit already) */
1258 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1260 if (ctx
->stage
== MESA_SHADER_VERTEX
) {
1261 NIR_PASS_V(nir
, nir_lower_viewport_transform
);
1262 NIR_PASS_V(nir
, nir_lower_point_size
, 1.0, 1024.0);
1265 NIR_PASS_V(nir
, nir_split_var_copies
);
1266 NIR_PASS_V(nir
, nir_lower_global_vars_to_local
);
1267 NIR_PASS_V(nir
, nir_lower_var_copies
);
1268 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
1269 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, glsl_type_size
, 0);
1270 NIR_PASS_V(nir
, nir_lower_ssbo
);
1271 NIR_PASS_V(nir
, nir_lower_mediump_outputs
);
1273 bi_optimize_nir(nir
);
1275 if (bifrost_debug
& BIFROST_DBG_SHADERS
) {
1276 nir_print_shader(nir
, stdout
);
1279 panfrost_nir_assign_sysvals(&ctx
->sysvals
, nir
);
1280 program
->sysval_count
= ctx
->sysvals
.sysval_count
;
1281 memcpy(program
->sysvals
, ctx
->sysvals
.sysvals
, sizeof(ctx
->sysvals
.sysvals
[0]) * ctx
->sysvals
.sysval_count
);
1282 ctx
->blend_types
= program
->blend_types
;
1284 nir_foreach_function(func
, nir
) {
1288 ctx
->impl
= func
->impl
;
1289 emit_cf_list(ctx
, &func
->impl
->body
);
1290 break; /* TODO: Multi-function shaders */
1293 bi_foreach_block(ctx
, _block
) {
1294 bi_block
*block
= (bi_block
*) _block
;
1295 bi_lower_combine(ctx
, block
);
1298 bool progress
= false;
1303 bi_foreach_block(ctx
, _block
) {
1304 bi_block
*block
= (bi_block
*) _block
;
1305 progress
|= bi_opt_dead_code_eliminate(ctx
, block
);
1309 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1310 bi_print_shader(ctx
, stdout
);
1312 bi_register_allocate(ctx
);
1313 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1314 bi_print_shader(ctx
, stdout
);
1315 bi_pack(ctx
, &program
->compiled
);
1317 if (bifrost_debug
& BIFROST_DBG_SHADERS
)
1318 disassemble_bifrost(stdout
, program
->compiled
.data
, program
->compiled
.size
, true);