2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Karol Herbst <kherbst@redhat.com>
25 #include "compiler/nir/nir.h"
27 #include "util/u_debug.h"
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
38 #include <tr1/unordered_map>
46 #if __cplusplus >= 201103L
48 using std::unordered_map
;
51 using std::tr1::unordered_map
;
54 using namespace nv50_ir
;
57 type_size(const struct glsl_type
*type
, bool bindless
)
59 return glsl_count_attribute_slots(type
, false);
62 class Converter
: public ConverterCommon
65 Converter(Program
*, nir_shader
*, nv50_ir_prog_info
*);
69 typedef std::vector
<LValue
*> LValues
;
70 typedef unordered_map
<unsigned, LValues
> NirDefMap
;
71 typedef unordered_map
<unsigned, nir_load_const_instr
*> ImmediateMap
;
72 typedef unordered_map
<unsigned, uint32_t> NirArrayLMemOffsets
;
73 typedef unordered_map
<unsigned, BasicBlock
*> NirBlockMap
;
75 CacheMode
convert(enum gl_access_qualifier
);
76 TexTarget
convert(glsl_sampler_dim
, bool isArray
, bool isShadow
);
77 LValues
& convert(nir_alu_dest
*);
78 BasicBlock
* convert(nir_block
*);
79 LValues
& convert(nir_dest
*);
80 SVSemantic
convert(nir_intrinsic_op
);
81 Value
* convert(nir_load_const_instr
*, uint8_t);
82 LValues
& convert(nir_register
*);
83 LValues
& convert(nir_ssa_def
*);
85 Value
* getSrc(nir_alu_src
*, uint8_t component
= 0);
86 Value
* getSrc(nir_register
*, uint8_t);
87 Value
* getSrc(nir_src
*, uint8_t, bool indirect
= false);
88 Value
* getSrc(nir_ssa_def
*, uint8_t);
90 // returned value is the constant part of the given source (either the
91 // nir_src or the selected source component of an intrinsic). Even though
92 // this is mostly an optimization to be able to skip indirects in a few
93 // cases, sometimes we require immediate values or set some fileds on
94 // instructions (e.g. tex) in order for codegen to consume those.
95 // If the found value has not a constant part, the Value gets returned
96 // through the Value parameter.
97 uint32_t getIndirect(nir_src
*, uint8_t, Value
*&);
98 // isScalar indicates that the addressing is scalar, vec4 addressing is
100 uint32_t getIndirect(nir_intrinsic_instr
*, uint8_t s
, uint8_t c
, Value
*&,
101 bool isScalar
= false);
103 uint32_t getSlotAddress(nir_intrinsic_instr
*, uint8_t idx
, uint8_t slot
);
105 void setInterpolate(nv50_ir_varying
*,
110 Instruction
*loadFrom(DataFile
, uint8_t, DataType
, Value
*def
, uint32_t base
,
111 uint8_t c
, Value
*indirect0
= NULL
,
112 Value
*indirect1
= NULL
, bool patch
= false);
113 void storeTo(nir_intrinsic_instr
*, DataFile
, operation
, DataType
,
114 Value
*src
, uint8_t idx
, uint8_t c
, Value
*indirect0
= NULL
,
115 Value
*indirect1
= NULL
);
117 bool isFloatType(nir_alu_type
);
118 bool isSignedType(nir_alu_type
);
119 bool isResultFloat(nir_op
);
120 bool isResultSigned(nir_op
);
122 DataType
getDType(nir_alu_instr
*);
123 DataType
getDType(nir_intrinsic_instr
*);
124 DataType
getDType(nir_intrinsic_instr
*, bool isSigned
);
125 DataType
getDType(nir_op
, uint8_t);
127 std::vector
<DataType
> getSTypes(nir_alu_instr
*);
128 DataType
getSType(nir_src
&, bool isFloat
, bool isSigned
);
130 operation
getOperation(nir_intrinsic_op
);
131 operation
getOperation(nir_op
);
132 operation
getOperation(nir_texop
);
133 operation
preOperationNeeded(nir_op
);
135 int getSubOp(nir_intrinsic_op
);
136 int getSubOp(nir_op
);
138 CondCode
getCondCode(nir_op
);
143 bool visit(nir_alu_instr
*);
144 bool visit(nir_block
*);
145 bool visit(nir_cf_node
*);
146 bool visit(nir_deref_instr
*);
147 bool visit(nir_function
*);
148 bool visit(nir_if
*);
149 bool visit(nir_instr
*);
150 bool visit(nir_intrinsic_instr
*);
151 bool visit(nir_jump_instr
*);
152 bool visit(nir_load_const_instr
*);
153 bool visit(nir_loop
*);
154 bool visit(nir_ssa_undef_instr
*);
155 bool visit(nir_tex_instr
*);
158 Value
* applyProjection(Value
*src
, Value
*proj
);
159 unsigned int getNIRArgCount(TexInstruction::Target
&);
162 uint16_t handleDeref(nir_deref_instr
*, Value
* & indirect
, const nir_variable
* &);
163 CacheMode
getCacheModeFromVar(const nir_variable
*);
169 ImmediateMap immediates
;
170 NirArrayLMemOffsets regToLmemOffset
;
172 unsigned int curLoopDepth
;
176 Instruction
*immInsertPos
;
178 int clipVertexOutput
;
187 Converter::Converter(Program
*prog
, nir_shader
*nir
, nv50_ir_prog_info
*info
)
188 : ConverterCommon(prog
, info
),
193 zero
= mkImm((uint32_t)0);
197 Converter::convert(nir_block
*block
)
199 NirBlockMap::iterator it
= blocks
.find(block
->index
);
200 if (it
!= blocks
.end())
203 BasicBlock
*bb
= new BasicBlock(func
);
204 blocks
[block
->index
] = bb
;
209 Converter::isFloatType(nir_alu_type type
)
211 return nir_alu_type_get_base_type(type
) == nir_type_float
;
215 Converter::isSignedType(nir_alu_type type
)
217 return nir_alu_type_get_base_type(type
) == nir_type_int
;
221 Converter::isResultFloat(nir_op op
)
223 const nir_op_info
&info
= nir_op_infos
[op
];
224 if (info
.output_type
!= nir_type_invalid
)
225 return isFloatType(info
.output_type
);
227 ERROR("isResultFloat not implemented for %s\n", nir_op_infos
[op
].name
);
233 Converter::isResultSigned(nir_op op
)
236 // there is no umul and we get wrong results if we treat all muls as signed
241 const nir_op_info
&info
= nir_op_infos
[op
];
242 if (info
.output_type
!= nir_type_invalid
)
243 return isSignedType(info
.output_type
);
244 ERROR("isResultSigned not implemented for %s\n", nir_op_infos
[op
].name
);
251 Converter::getDType(nir_alu_instr
*insn
)
253 if (insn
->dest
.dest
.is_ssa
)
254 return getDType(insn
->op
, insn
->dest
.dest
.ssa
.bit_size
);
256 return getDType(insn
->op
, insn
->dest
.dest
.reg
.reg
->bit_size
);
260 Converter::getDType(nir_intrinsic_instr
*insn
)
263 switch (insn
->intrinsic
) {
264 case nir_intrinsic_shared_atomic_imax
:
265 case nir_intrinsic_shared_atomic_imin
:
266 case nir_intrinsic_ssbo_atomic_imax
:
267 case nir_intrinsic_ssbo_atomic_imin
:
275 return getDType(insn
, isSigned
);
279 Converter::getDType(nir_intrinsic_instr
*insn
, bool isSigned
)
281 if (insn
->dest
.is_ssa
)
282 return typeOfSize(insn
->dest
.ssa
.bit_size
/ 8, false, isSigned
);
284 return typeOfSize(insn
->dest
.reg
.reg
->bit_size
/ 8, false, isSigned
);
288 Converter::getDType(nir_op op
, uint8_t bitSize
)
290 DataType ty
= typeOfSize(bitSize
/ 8, isResultFloat(op
), isResultSigned(op
));
291 if (ty
== TYPE_NONE
) {
292 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos
[op
].name
, bitSize
);
298 std::vector
<DataType
>
299 Converter::getSTypes(nir_alu_instr
*insn
)
301 const nir_op_info
&info
= nir_op_infos
[insn
->op
];
302 std::vector
<DataType
> res(info
.num_inputs
);
304 for (uint8_t i
= 0; i
< info
.num_inputs
; ++i
) {
305 if (info
.input_types
[i
] != nir_type_invalid
) {
306 res
[i
] = getSType(insn
->src
[i
].src
, isFloatType(info
.input_types
[i
]), isSignedType(info
.input_types
[i
]));
308 ERROR("getSType not implemented for %s idx %u\n", info
.name
, i
);
319 Converter::getSType(nir_src
&src
, bool isFloat
, bool isSigned
)
323 bitSize
= src
.ssa
->bit_size
;
325 bitSize
= src
.reg
.reg
->bit_size
;
327 DataType ty
= typeOfSize(bitSize
/ 8, isFloat
, isSigned
);
328 if (ty
== TYPE_NONE
) {
336 ERROR("couldn't get Type for %s with bitSize %u\n", str
, bitSize
);
343 Converter::getOperation(nir_op op
)
346 // basic ops with float and int variants
355 case nir_op_ifind_msb
:
356 case nir_op_ufind_msb
:
378 case nir_op_fddx_coarse
:
379 case nir_op_fddx_fine
:
382 case nir_op_fddy_coarse
:
383 case nir_op_fddy_fine
:
401 case nir_op_pack_64_2x32_split
:
415 case nir_op_imul_high
:
416 case nir_op_umul_high
:
458 ERROR("couldn't get operation for op %s\n", nir_op_infos
[op
].name
);
465 Converter::getOperation(nir_texop op
)
477 case nir_texop_txf_ms
:
483 case nir_texop_query_levels
:
484 case nir_texop_texture_samples
:
488 ERROR("couldn't get operation for nir_texop %u\n", op
);
495 Converter::getOperation(nir_intrinsic_op op
)
498 case nir_intrinsic_emit_vertex
:
500 case nir_intrinsic_end_primitive
:
502 case nir_intrinsic_bindless_image_atomic_add
:
503 case nir_intrinsic_image_atomic_add
:
504 case nir_intrinsic_image_deref_atomic_add
:
505 case nir_intrinsic_bindless_image_atomic_and
:
506 case nir_intrinsic_image_atomic_and
:
507 case nir_intrinsic_image_deref_atomic_and
:
508 case nir_intrinsic_bindless_image_atomic_comp_swap
:
509 case nir_intrinsic_image_atomic_comp_swap
:
510 case nir_intrinsic_image_deref_atomic_comp_swap
:
511 case nir_intrinsic_bindless_image_atomic_exchange
:
512 case nir_intrinsic_image_atomic_exchange
:
513 case nir_intrinsic_image_deref_atomic_exchange
:
514 case nir_intrinsic_bindless_image_atomic_imax
:
515 case nir_intrinsic_image_atomic_imax
:
516 case nir_intrinsic_image_deref_atomic_imax
:
517 case nir_intrinsic_bindless_image_atomic_umax
:
518 case nir_intrinsic_image_atomic_umax
:
519 case nir_intrinsic_image_deref_atomic_umax
:
520 case nir_intrinsic_bindless_image_atomic_imin
:
521 case nir_intrinsic_image_atomic_imin
:
522 case nir_intrinsic_image_deref_atomic_imin
:
523 case nir_intrinsic_bindless_image_atomic_umin
:
524 case nir_intrinsic_image_atomic_umin
:
525 case nir_intrinsic_image_deref_atomic_umin
:
526 case nir_intrinsic_bindless_image_atomic_or
:
527 case nir_intrinsic_image_atomic_or
:
528 case nir_intrinsic_image_deref_atomic_or
:
529 case nir_intrinsic_bindless_image_atomic_xor
:
530 case nir_intrinsic_image_atomic_xor
:
531 case nir_intrinsic_image_deref_atomic_xor
:
533 case nir_intrinsic_bindless_image_load
:
534 case nir_intrinsic_image_load
:
535 case nir_intrinsic_image_deref_load
:
537 case nir_intrinsic_bindless_image_samples
:
538 case nir_intrinsic_image_samples
:
539 case nir_intrinsic_image_deref_samples
:
540 case nir_intrinsic_bindless_image_size
:
541 case nir_intrinsic_image_size
:
542 case nir_intrinsic_image_deref_size
:
544 case nir_intrinsic_bindless_image_store
:
545 case nir_intrinsic_image_store
:
546 case nir_intrinsic_image_deref_store
:
549 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op
);
556 Converter::preOperationNeeded(nir_op op
)
568 Converter::getSubOp(nir_op op
)
571 case nir_op_imul_high
:
572 case nir_op_umul_high
:
573 return NV50_IR_SUBOP_MUL_HIGH
;
580 Converter::getSubOp(nir_intrinsic_op op
)
583 case nir_intrinsic_bindless_image_atomic_add
:
584 case nir_intrinsic_global_atomic_add
:
585 case nir_intrinsic_image_atomic_add
:
586 case nir_intrinsic_image_deref_atomic_add
:
587 case nir_intrinsic_shared_atomic_add
:
588 case nir_intrinsic_ssbo_atomic_add
:
589 return NV50_IR_SUBOP_ATOM_ADD
;
590 case nir_intrinsic_bindless_image_atomic_and
:
591 case nir_intrinsic_global_atomic_and
:
592 case nir_intrinsic_image_atomic_and
:
593 case nir_intrinsic_image_deref_atomic_and
:
594 case nir_intrinsic_shared_atomic_and
:
595 case nir_intrinsic_ssbo_atomic_and
:
596 return NV50_IR_SUBOP_ATOM_AND
;
597 case nir_intrinsic_bindless_image_atomic_comp_swap
:
598 case nir_intrinsic_global_atomic_comp_swap
:
599 case nir_intrinsic_image_atomic_comp_swap
:
600 case nir_intrinsic_image_deref_atomic_comp_swap
:
601 case nir_intrinsic_shared_atomic_comp_swap
:
602 case nir_intrinsic_ssbo_atomic_comp_swap
:
603 return NV50_IR_SUBOP_ATOM_CAS
;
604 case nir_intrinsic_bindless_image_atomic_exchange
:
605 case nir_intrinsic_global_atomic_exchange
:
606 case nir_intrinsic_image_atomic_exchange
:
607 case nir_intrinsic_image_deref_atomic_exchange
:
608 case nir_intrinsic_shared_atomic_exchange
:
609 case nir_intrinsic_ssbo_atomic_exchange
:
610 return NV50_IR_SUBOP_ATOM_EXCH
;
611 case nir_intrinsic_bindless_image_atomic_or
:
612 case nir_intrinsic_global_atomic_or
:
613 case nir_intrinsic_image_atomic_or
:
614 case nir_intrinsic_image_deref_atomic_or
:
615 case nir_intrinsic_shared_atomic_or
:
616 case nir_intrinsic_ssbo_atomic_or
:
617 return NV50_IR_SUBOP_ATOM_OR
;
618 case nir_intrinsic_bindless_image_atomic_imax
:
619 case nir_intrinsic_bindless_image_atomic_umax
:
620 case nir_intrinsic_global_atomic_imax
:
621 case nir_intrinsic_global_atomic_umax
:
622 case nir_intrinsic_image_atomic_imax
:
623 case nir_intrinsic_image_atomic_umax
:
624 case nir_intrinsic_image_deref_atomic_imax
:
625 case nir_intrinsic_image_deref_atomic_umax
:
626 case nir_intrinsic_shared_atomic_imax
:
627 case nir_intrinsic_shared_atomic_umax
:
628 case nir_intrinsic_ssbo_atomic_imax
:
629 case nir_intrinsic_ssbo_atomic_umax
:
630 return NV50_IR_SUBOP_ATOM_MAX
;
631 case nir_intrinsic_bindless_image_atomic_imin
:
632 case nir_intrinsic_bindless_image_atomic_umin
:
633 case nir_intrinsic_global_atomic_imin
:
634 case nir_intrinsic_global_atomic_umin
:
635 case nir_intrinsic_image_atomic_imin
:
636 case nir_intrinsic_image_atomic_umin
:
637 case nir_intrinsic_image_deref_atomic_imin
:
638 case nir_intrinsic_image_deref_atomic_umin
:
639 case nir_intrinsic_shared_atomic_imin
:
640 case nir_intrinsic_shared_atomic_umin
:
641 case nir_intrinsic_ssbo_atomic_imin
:
642 case nir_intrinsic_ssbo_atomic_umin
:
643 return NV50_IR_SUBOP_ATOM_MIN
;
644 case nir_intrinsic_bindless_image_atomic_xor
:
645 case nir_intrinsic_global_atomic_xor
:
646 case nir_intrinsic_image_atomic_xor
:
647 case nir_intrinsic_image_deref_atomic_xor
:
648 case nir_intrinsic_shared_atomic_xor
:
649 case nir_intrinsic_ssbo_atomic_xor
:
650 return NV50_IR_SUBOP_ATOM_XOR
;
652 case nir_intrinsic_group_memory_barrier
:
653 case nir_intrinsic_memory_barrier
:
654 case nir_intrinsic_memory_barrier_buffer
:
655 case nir_intrinsic_memory_barrier_image
:
656 return NV50_IR_SUBOP_MEMBAR(M
, GL
);
657 case nir_intrinsic_memory_barrier_shared
:
658 return NV50_IR_SUBOP_MEMBAR(M
, CTA
);
660 case nir_intrinsic_vote_all
:
661 return NV50_IR_SUBOP_VOTE_ALL
;
662 case nir_intrinsic_vote_any
:
663 return NV50_IR_SUBOP_VOTE_ANY
;
664 case nir_intrinsic_vote_ieq
:
665 return NV50_IR_SUBOP_VOTE_UNI
;
672 Converter::getCondCode(nir_op op
)
691 ERROR("couldn't get CondCode for op %s\n", nir_op_infos
[op
].name
);
698 Converter::convert(nir_alu_dest
*dest
)
700 return convert(&dest
->dest
);
704 Converter::convert(nir_dest
*dest
)
707 return convert(&dest
->ssa
);
708 if (dest
->reg
.indirect
) {
709 ERROR("no support for indirects.");
712 return convert(dest
->reg
.reg
);
716 Converter::convert(nir_register
*reg
)
718 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
719 if (it
!= regDefs
.end())
722 LValues
newDef(reg
->num_components
);
723 for (uint8_t i
= 0; i
< reg
->num_components
; i
++)
724 newDef
[i
] = getScratch(std::max(4, reg
->bit_size
/ 8));
725 return regDefs
[reg
->index
] = newDef
;
729 Converter::convert(nir_ssa_def
*def
)
731 NirDefMap::iterator it
= ssaDefs
.find(def
->index
);
732 if (it
!= ssaDefs
.end())
735 LValues
newDef(def
->num_components
);
736 for (uint8_t i
= 0; i
< def
->num_components
; i
++)
737 newDef
[i
] = getSSA(std::max(4, def
->bit_size
/ 8));
738 return ssaDefs
[def
->index
] = newDef
;
742 Converter::getSrc(nir_alu_src
*src
, uint8_t component
)
744 if (src
->abs
|| src
->negate
) {
745 ERROR("modifiers currently not supported on nir_alu_src\n");
748 return getSrc(&src
->src
, src
->swizzle
[component
]);
752 Converter::getSrc(nir_register
*reg
, uint8_t idx
)
754 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
755 if (it
== regDefs
.end())
756 return convert(reg
)[idx
];
757 return it
->second
[idx
];
761 Converter::getSrc(nir_src
*src
, uint8_t idx
, bool indirect
)
764 return getSrc(src
->ssa
, idx
);
766 if (src
->reg
.indirect
) {
768 return getSrc(src
->reg
.indirect
, idx
);
769 ERROR("no support for indirects.");
774 return getSrc(src
->reg
.reg
, idx
);
778 Converter::getSrc(nir_ssa_def
*src
, uint8_t idx
)
780 ImmediateMap::iterator iit
= immediates
.find(src
->index
);
781 if (iit
!= immediates
.end())
782 return convert((*iit
).second
, idx
);
784 NirDefMap::iterator it
= ssaDefs
.find(src
->index
);
785 if (it
== ssaDefs
.end()) {
786 ERROR("SSA value %u not found\n", src
->index
);
790 return it
->second
[idx
];
794 Converter::getIndirect(nir_src
*src
, uint8_t idx
, Value
*&indirect
)
796 nir_const_value
*offset
= nir_src_as_const_value(*src
);
800 return offset
[0].u32
;
803 indirect
= getSrc(src
, idx
, true);
808 Converter::getIndirect(nir_intrinsic_instr
*insn
, uint8_t s
, uint8_t c
, Value
*&indirect
, bool isScalar
)
810 int32_t idx
= nir_intrinsic_base(insn
) + getIndirect(&insn
->src
[s
], c
, indirect
);
811 if (indirect
&& !isScalar
)
812 indirect
= mkOp2v(OP_SHL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), indirect
, loadImm(NULL
, 4));
817 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot
, unsigned *name
, unsigned *index
)
819 assert(name
&& index
);
821 if (slot
>= VERT_ATTRIB_MAX
) {
822 ERROR("invalid varying slot %u\n", slot
);
827 if (slot
>= VERT_ATTRIB_GENERIC0
&&
828 slot
< VERT_ATTRIB_GENERIC0
+ VERT_ATTRIB_GENERIC_MAX
) {
829 *name
= TGSI_SEMANTIC_GENERIC
;
830 *index
= slot
- VERT_ATTRIB_GENERIC0
;
834 if (slot
>= VERT_ATTRIB_TEX0
&&
835 slot
< VERT_ATTRIB_TEX0
+ VERT_ATTRIB_TEX_MAX
) {
836 *name
= TGSI_SEMANTIC_TEXCOORD
;
837 *index
= slot
- VERT_ATTRIB_TEX0
;
842 case VERT_ATTRIB_COLOR0
:
843 *name
= TGSI_SEMANTIC_COLOR
;
846 case VERT_ATTRIB_COLOR1
:
847 *name
= TGSI_SEMANTIC_COLOR
;
850 case VERT_ATTRIB_EDGEFLAG
:
851 *name
= TGSI_SEMANTIC_EDGEFLAG
;
854 case VERT_ATTRIB_FOG
:
855 *name
= TGSI_SEMANTIC_FOG
;
858 case VERT_ATTRIB_NORMAL
:
859 *name
= TGSI_SEMANTIC_NORMAL
;
862 case VERT_ATTRIB_POS
:
863 *name
= TGSI_SEMANTIC_POSITION
;
866 case VERT_ATTRIB_POINT_SIZE
:
867 *name
= TGSI_SEMANTIC_PSIZE
;
871 ERROR("unknown vert attrib slot %u\n", slot
);
878 Converter::setInterpolate(nv50_ir_varying
*var
,
884 case INTERP_MODE_FLAT
:
887 case INTERP_MODE_NONE
:
888 if (semantic
== TGSI_SEMANTIC_COLOR
)
890 else if (semantic
== TGSI_SEMANTIC_POSITION
)
893 case INTERP_MODE_NOPERSPECTIVE
:
896 case INTERP_MODE_SMOOTH
:
899 var
->centroid
= centroid
;
903 calcSlots(const glsl_type
*type
, Program::Type stage
, const shader_info
&info
,
904 bool input
, const nir_variable
*var
)
906 if (!type
->is_array())
907 return type
->count_attribute_slots(false);
911 case Program::TYPE_GEOMETRY
:
912 slots
= type
->uniform_locations();
914 slots
/= info
.gs
.vertices_in
;
916 case Program::TYPE_TESSELLATION_CONTROL
:
917 case Program::TYPE_TESSELLATION_EVAL
:
918 // remove first dimension
919 if (var
->data
.patch
|| (!input
&& stage
== Program::TYPE_TESSELLATION_EVAL
))
920 slots
= type
->uniform_locations();
922 slots
= type
->fields
.array
->uniform_locations();
925 slots
= type
->count_attribute_slots(false);
932 bool Converter::assignSlots() {
936 info
->io
.viewportId
= -1;
938 info
->numOutputs
= 0;
940 // we have to fixup the uniform locations for arrays
941 unsigned numImages
= 0;
942 nir_foreach_variable(var
, &nir
->uniforms
) {
943 const glsl_type
*type
= var
->type
;
944 if (!type
->without_array()->is_image())
946 var
->data
.driver_location
= numImages
;
947 numImages
+= type
->is_array() ? type
->arrays_of_arrays_size() : 1;
950 info
->numSysVals
= 0;
951 for (uint8_t i
= 0; i
< SYSTEM_VALUE_MAX
; ++i
) {
952 if (!(nir
->info
.system_values_read
& 1ull << i
))
955 info
->sv
[info
->numSysVals
].sn
= tgsi_get_sysval_semantic(i
);
956 info
->sv
[info
->numSysVals
].si
= 0;
957 info
->sv
[info
->numSysVals
].input
= 0; // TODO inferSysValDirection(sn);
960 case SYSTEM_VALUE_INSTANCE_ID
:
961 info
->io
.instanceId
= info
->numSysVals
;
963 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
964 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
965 info
->sv
[info
->numSysVals
].patch
= 1;
967 case SYSTEM_VALUE_VERTEX_ID
:
968 info
->io
.vertexId
= info
->numSysVals
;
974 info
->numSysVals
+= 1;
977 if (prog
->getType() == Program::TYPE_COMPUTE
)
980 nir_foreach_variable(var
, &nir
->inputs
) {
981 const glsl_type
*type
= var
->type
;
982 int slot
= var
->data
.location
;
983 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, true, var
);
984 uint32_t comp
= type
->is_array() ? type
->without_array()->component_slots()
985 : type
->component_slots();
986 uint32_t frac
= var
->data
.location_frac
;
987 uint32_t vary
= var
->data
.driver_location
;
989 if (glsl_base_type_is_64bit(type
->without_array()->base_type
)) {
994 assert(vary
+ slots
<= PIPE_MAX_SHADER_INPUTS
);
996 switch(prog
->getType()) {
997 case Program::TYPE_FRAGMENT
:
998 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1000 for (uint16_t i
= 0; i
< slots
; ++i
) {
1001 setInterpolate(&info
->in
[vary
+ i
], var
->data
.interpolation
,
1002 var
->data
.centroid
| var
->data
.sample
, name
);
1005 case Program::TYPE_GEOMETRY
:
1006 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1009 case Program::TYPE_TESSELLATION_CONTROL
:
1010 case Program::TYPE_TESSELLATION_EVAL
:
1011 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1013 if (var
->data
.patch
&& name
== TGSI_SEMANTIC_PATCH
)
1014 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1016 case Program::TYPE_VERTEX
:
1017 vert_attrib_to_tgsi_semantic((gl_vert_attrib
)slot
, &name
, &index
);
1019 case TGSI_SEMANTIC_EDGEFLAG
:
1020 info
->io
.edgeFlagIn
= vary
;
1027 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1031 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1032 info
->in
[vary
].id
= vary
;
1033 info
->in
[vary
].patch
= var
->data
.patch
;
1034 info
->in
[vary
].sn
= name
;
1035 info
->in
[vary
].si
= index
+ i
;
1036 if (glsl_base_type_is_64bit(type
->without_array()->base_type
))
1038 info
->in
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) >> 0x4);
1040 info
->in
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) & 0xf);
1042 info
->in
[vary
].mask
|= ((1 << comp
) - 1) << frac
;
1044 info
->numInputs
= std::max
<uint8_t>(info
->numInputs
, vary
);
1047 nir_foreach_variable(var
, &nir
->outputs
) {
1048 const glsl_type
*type
= var
->type
;
1049 int slot
= var
->data
.location
;
1050 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, false, var
);
1051 uint32_t comp
= type
->is_array() ? type
->without_array()->component_slots()
1052 : type
->component_slots();
1053 uint32_t frac
= var
->data
.location_frac
;
1054 uint32_t vary
= var
->data
.driver_location
;
1056 if (glsl_base_type_is_64bit(type
->without_array()->base_type
)) {
1061 assert(vary
< PIPE_MAX_SHADER_OUTPUTS
);
1063 switch(prog
->getType()) {
1064 case Program::TYPE_FRAGMENT
:
1065 tgsi_get_gl_frag_result_semantic((gl_frag_result
)slot
, &name
, &index
);
1067 case TGSI_SEMANTIC_COLOR
:
1068 if (!var
->data
.fb_fetch_output
)
1069 info
->prop
.fp
.numColourResults
++;
1071 if (var
->data
.location
== FRAG_RESULT_COLOR
&&
1072 nir
->info
.outputs_written
& BITFIELD64_BIT(var
->data
.location
))
1073 info
->prop
.fp
.separateFragData
= true;
1075 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1076 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1077 index
= index
== 0 ? var
->data
.index
: index
;
1079 case TGSI_SEMANTIC_POSITION
:
1080 info
->io
.fragDepth
= vary
;
1081 info
->prop
.fp
.writesDepth
= true;
1083 case TGSI_SEMANTIC_SAMPLEMASK
:
1084 info
->io
.sampleMask
= vary
;
1090 case Program::TYPE_GEOMETRY
:
1091 case Program::TYPE_TESSELLATION_CONTROL
:
1092 case Program::TYPE_TESSELLATION_EVAL
:
1093 case Program::TYPE_VERTEX
:
1094 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1097 if (var
->data
.patch
&& name
!= TGSI_SEMANTIC_TESSINNER
&&
1098 name
!= TGSI_SEMANTIC_TESSOUTER
)
1099 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1102 case TGSI_SEMANTIC_CLIPDIST
:
1103 info
->io
.genUserClip
= -1;
1105 case TGSI_SEMANTIC_CLIPVERTEX
:
1106 clipVertexOutput
= vary
;
1108 case TGSI_SEMANTIC_EDGEFLAG
:
1109 info
->io
.edgeFlagOut
= vary
;
1111 case TGSI_SEMANTIC_POSITION
:
1112 if (clipVertexOutput
< 0)
1113 clipVertexOutput
= vary
;
1120 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1124 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1125 info
->out
[vary
].id
= vary
;
1126 info
->out
[vary
].patch
= var
->data
.patch
;
1127 info
->out
[vary
].sn
= name
;
1128 info
->out
[vary
].si
= index
+ i
;
1129 if (glsl_base_type_is_64bit(type
->without_array()->base_type
))
1131 info
->out
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) >> 0x4);
1133 info
->out
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) & 0xf);
1135 info
->out
[vary
].mask
|= ((1 << comp
) - 1) << frac
;
1137 if (nir
->info
.outputs_read
& 1ull << slot
)
1138 info
->out
[vary
].oread
= 1;
1140 info
->numOutputs
= std::max
<uint8_t>(info
->numOutputs
, vary
);
1143 if (info
->io
.genUserClip
> 0) {
1144 info
->io
.clipDistances
= info
->io
.genUserClip
;
1146 const unsigned int nOut
= (info
->io
.genUserClip
+ 3) / 4;
1148 for (unsigned int n
= 0; n
< nOut
; ++n
) {
1149 unsigned int i
= info
->numOutputs
++;
1150 info
->out
[i
].id
= i
;
1151 info
->out
[i
].sn
= TGSI_SEMANTIC_CLIPDIST
;
1152 info
->out
[i
].si
= n
;
1153 info
->out
[i
].mask
= ((1 << info
->io
.clipDistances
) - 1) >> (n
* 4);
1157 return info
->assignSlots(info
) == 0;
1161 Converter::getSlotAddress(nir_intrinsic_instr
*insn
, uint8_t idx
, uint8_t slot
)
1164 int offset
= nir_intrinsic_component(insn
);
1167 if (nir_intrinsic_infos
[insn
->intrinsic
].has_dest
)
1168 ty
= getDType(insn
);
1170 ty
= getSType(insn
->src
[0], false, false);
1172 switch (insn
->intrinsic
) {
1173 case nir_intrinsic_load_input
:
1174 case nir_intrinsic_load_interpolated_input
:
1175 case nir_intrinsic_load_per_vertex_input
:
1178 case nir_intrinsic_load_output
:
1179 case nir_intrinsic_load_per_vertex_output
:
1180 case nir_intrinsic_store_output
:
1181 case nir_intrinsic_store_per_vertex_output
:
1185 ERROR("unknown intrinsic in getSlotAddress %s",
1186 nir_intrinsic_infos
[insn
->intrinsic
].name
);
1192 if (typeSizeof(ty
) == 8) {
1204 assert(!input
|| idx
< PIPE_MAX_SHADER_INPUTS
);
1205 assert(input
|| idx
< PIPE_MAX_SHADER_OUTPUTS
);
1207 const nv50_ir_varying
*vary
= input
? info
->in
: info
->out
;
1208 return vary
[idx
].slot
[slot
] * 4;
1212 Converter::loadFrom(DataFile file
, uint8_t i
, DataType ty
, Value
*def
,
1213 uint32_t base
, uint8_t c
, Value
*indirect0
,
1214 Value
*indirect1
, bool patch
)
1216 unsigned int tySize
= typeSizeof(ty
);
1219 (file
== FILE_MEMORY_CONST
|| file
== FILE_MEMORY_BUFFER
|| indirect0
)) {
1220 Value
*lo
= getSSA();
1221 Value
*hi
= getSSA();
1224 mkLoad(TYPE_U32
, lo
,
1225 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
),
1227 loi
->setIndirect(0, 1, indirect1
);
1228 loi
->perPatch
= patch
;
1231 mkLoad(TYPE_U32
, hi
,
1232 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
+ 4),
1234 hii
->setIndirect(0, 1, indirect1
);
1235 hii
->perPatch
= patch
;
1237 return mkOp2(OP_MERGE
, ty
, def
, lo
, hi
);
1240 mkLoad(ty
, def
, mkSymbol(file
, i
, ty
, base
+ c
* tySize
), indirect0
);
1241 ld
->setIndirect(0, 1, indirect1
);
1242 ld
->perPatch
= patch
;
1248 Converter::storeTo(nir_intrinsic_instr
*insn
, DataFile file
, operation op
,
1249 DataType ty
, Value
*src
, uint8_t idx
, uint8_t c
,
1250 Value
*indirect0
, Value
*indirect1
)
1252 uint8_t size
= typeSizeof(ty
);
1253 uint32_t address
= getSlotAddress(insn
, idx
, c
);
1255 if (size
== 8 && indirect0
) {
1257 mkSplit(split
, 4, src
);
1259 if (op
== OP_EXPORT
) {
1260 split
[0] = mkMov(getSSA(), split
[0], ty
)->getDef(0);
1261 split
[1] = mkMov(getSSA(), split
[1], ty
)->getDef(0);
1264 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
), indirect0
,
1265 split
[0])->perPatch
= info
->out
[idx
].patch
;
1266 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
+ 4), indirect0
,
1267 split
[1])->perPatch
= info
->out
[idx
].patch
;
1269 if (op
== OP_EXPORT
)
1270 src
= mkMov(getSSA(size
), src
, ty
)->getDef(0);
1271 mkStore(op
, ty
, mkSymbol(file
, 0, ty
, address
), indirect0
,
1272 src
)->perPatch
= info
->out
[idx
].patch
;
1277 Converter::parseNIR()
1279 info
->bin
.tlsSpace
= 0;
1280 info
->io
.clipDistances
= nir
->info
.clip_distance_array_size
;
1281 info
->io
.cullDistances
= nir
->info
.cull_distance_array_size
;
1283 switch(prog
->getType()) {
1284 case Program::TYPE_COMPUTE
:
1285 info
->prop
.cp
.numThreads
[0] = nir
->info
.cs
.local_size
[0];
1286 info
->prop
.cp
.numThreads
[1] = nir
->info
.cs
.local_size
[1];
1287 info
->prop
.cp
.numThreads
[2] = nir
->info
.cs
.local_size
[2];
1288 info
->bin
.smemSize
= nir
->info
.cs
.shared_size
;
1290 case Program::TYPE_FRAGMENT
:
1291 info
->prop
.fp
.earlyFragTests
= nir
->info
.fs
.early_fragment_tests
;
1292 info
->prop
.fp
.persampleInvocation
=
1293 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_ID
) ||
1294 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1295 info
->prop
.fp
.postDepthCoverage
= nir
->info
.fs
.post_depth_coverage
;
1296 info
->prop
.fp
.readsSampleLocations
=
1297 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1298 info
->prop
.fp
.usesDiscard
= nir
->info
.fs
.uses_discard
;
1299 info
->prop
.fp
.usesSampleMaskIn
=
1300 !!(nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_MASK_IN
);
1302 case Program::TYPE_GEOMETRY
:
1303 info
->prop
.gp
.inputPrim
= nir
->info
.gs
.input_primitive
;
1304 info
->prop
.gp
.instanceCount
= nir
->info
.gs
.invocations
;
1305 info
->prop
.gp
.maxVertices
= nir
->info
.gs
.vertices_out
;
1306 info
->prop
.gp
.outputPrim
= nir
->info
.gs
.output_primitive
;
1308 case Program::TYPE_TESSELLATION_CONTROL
:
1309 case Program::TYPE_TESSELLATION_EVAL
:
1310 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
1311 info
->prop
.tp
.domain
= GL_LINES
;
1313 info
->prop
.tp
.domain
= nir
->info
.tess
.primitive_mode
;
1314 info
->prop
.tp
.outputPatchSize
= nir
->info
.tess
.tcs_vertices_out
;
1315 info
->prop
.tp
.outputPrim
=
1316 nir
->info
.tess
.point_mode
? PIPE_PRIM_POINTS
: PIPE_PRIM_TRIANGLES
;
1317 info
->prop
.tp
.partitioning
= (nir
->info
.tess
.spacing
+ 1) % 3;
1318 info
->prop
.tp
.winding
= !nir
->info
.tess
.ccw
;
1320 case Program::TYPE_VERTEX
:
1321 info
->prop
.vp
.usesDrawParameters
=
1322 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
)) ||
1323 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
)) ||
1324 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
));
1334 Converter::visit(nir_function
*function
)
1336 assert(function
->impl
);
1338 // usually the blocks will set everything up, but main is special
1339 BasicBlock
*entry
= new BasicBlock(prog
->main
);
1340 exit
= new BasicBlock(prog
->main
);
1341 blocks
[nir_start_block(function
->impl
)->index
] = entry
;
1342 prog
->main
->setEntry(entry
);
1343 prog
->main
->setExit(exit
);
1345 setPosition(entry
, true);
1347 if (info
->io
.genUserClip
> 0) {
1348 for (int c
= 0; c
< 4; ++c
)
1349 clipVtx
[c
] = getScratch();
1352 switch (prog
->getType()) {
1353 case Program::TYPE_TESSELLATION_CONTROL
:
1355 OP_SUB
, TYPE_U32
, getSSA(),
1356 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LANEID
, 0)),
1357 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_INVOCATION_ID
, 0)));
1359 case Program::TYPE_FRAGMENT
: {
1360 Symbol
*sv
= mkSysVal(SV_POSITION
, 3);
1361 fragCoord
[3] = mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), sv
);
1362 fp
.position
= mkOp1v(OP_RCP
, TYPE_F32
, fragCoord
[3], fragCoord
[3]);
1369 nir_foreach_register(reg
, &function
->impl
->registers
) {
1370 if (reg
->num_array_elems
) {
1371 // TODO: packed variables would be nice, but MemoryOpt fails
1372 // replace 4 with reg->num_components
1373 uint32_t size
= 4 * reg
->num_array_elems
* (reg
->bit_size
/ 8);
1374 regToLmemOffset
[reg
->index
] = info
->bin
.tlsSpace
;
1375 info
->bin
.tlsSpace
+= size
;
1379 nir_index_ssa_defs(function
->impl
);
1380 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
1385 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::TREE
);
1386 setPosition(exit
, true);
1388 if ((prog
->getType() == Program::TYPE_VERTEX
||
1389 prog
->getType() == Program::TYPE_TESSELLATION_EVAL
)
1390 && info
->io
.genUserClip
> 0)
1391 handleUserClipPlanes();
1393 // TODO: for non main function this needs to be a OP_RETURN
1394 mkOp(OP_EXIT
, TYPE_NONE
, NULL
)->terminator
= 1;
1399 Converter::visit(nir_cf_node
*node
)
1401 switch (node
->type
) {
1402 case nir_cf_node_block
:
1403 return visit(nir_cf_node_as_block(node
));
1404 case nir_cf_node_if
:
1405 return visit(nir_cf_node_as_if(node
));
1406 case nir_cf_node_loop
:
1407 return visit(nir_cf_node_as_loop(node
));
1409 ERROR("unknown nir_cf_node type %u\n", node
->type
);
1415 Converter::visit(nir_block
*block
)
1417 if (!block
->predecessors
->entries
&& block
->instr_list
.is_empty())
1420 BasicBlock
*bb
= convert(block
);
1422 setPosition(bb
, true);
1423 nir_foreach_instr(insn
, block
) {
1431 Converter::visit(nir_if
*nif
)
1433 DataType sType
= getSType(nif
->condition
, false, false);
1434 Value
*src
= getSrc(&nif
->condition
, 0);
1436 nir_block
*lastThen
= nir_if_last_then_block(nif
);
1437 nir_block
*lastElse
= nir_if_last_else_block(nif
);
1439 assert(!lastThen
->successors
[1]);
1440 assert(!lastElse
->successors
[1]);
1442 BasicBlock
*ifBB
= convert(nir_if_first_then_block(nif
));
1443 BasicBlock
*elseBB
= convert(nir_if_first_else_block(nif
));
1445 bb
->cfg
.attach(&ifBB
->cfg
, Graph::Edge::TREE
);
1446 bb
->cfg
.attach(&elseBB
->cfg
, Graph::Edge::TREE
);
1448 // we only insert joinats, if both nodes end up at the end of the if again.
1449 // the reason for this to not happens are breaks/continues/ret/... which
1450 // have their own handling
1451 if (lastThen
->successors
[0] == lastElse
->successors
[0])
1452 bb
->joinAt
= mkFlow(OP_JOINAT
, convert(lastThen
->successors
[0]),
1455 mkFlow(OP_BRA
, elseBB
, CC_EQ
, src
)->setType(sType
);
1457 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->then_list
) {
1461 setPosition(convert(lastThen
), true);
1462 if (!bb
->getExit() ||
1463 !bb
->getExit()->asFlow() ||
1464 bb
->getExit()->asFlow()->op
== OP_JOIN
) {
1465 BasicBlock
*tailBB
= convert(lastThen
->successors
[0]);
1466 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1467 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1470 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->else_list
) {
1474 setPosition(convert(lastElse
), true);
1475 if (!bb
->getExit() ||
1476 !bb
->getExit()->asFlow() ||
1477 bb
->getExit()->asFlow()->op
== OP_JOIN
) {
1478 BasicBlock
*tailBB
= convert(lastElse
->successors
[0]);
1479 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1480 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1483 if (lastThen
->successors
[0] == lastElse
->successors
[0]) {
1484 setPosition(convert(lastThen
->successors
[0]), true);
1485 mkFlow(OP_JOIN
, NULL
, CC_ALWAYS
, NULL
)->fixed
= 1;
1492 Converter::visit(nir_loop
*loop
)
1495 func
->loopNestingBound
= std::max(func
->loopNestingBound
, curLoopDepth
);
1497 BasicBlock
*loopBB
= convert(nir_loop_first_block(loop
));
1498 BasicBlock
*tailBB
=
1499 convert(nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
)));
1500 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::TREE
);
1502 mkFlow(OP_PREBREAK
, tailBB
, CC_ALWAYS
, NULL
);
1503 setPosition(loopBB
, false);
1504 mkFlow(OP_PRECONT
, loopBB
, CC_ALWAYS
, NULL
);
1506 foreach_list_typed(nir_cf_node
, node
, node
, &loop
->body
) {
1510 Instruction
*insn
= bb
->getExit();
1511 if (bb
->cfg
.incidentCount() != 0) {
1512 if (!insn
|| !insn
->asFlow()) {
1513 mkFlow(OP_CONT
, loopBB
, CC_ALWAYS
, NULL
);
1514 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::BACK
);
1515 } else if (insn
&& insn
->op
== OP_BRA
&& !insn
->getPredicate() &&
1516 tailBB
->cfg
.incidentCount() == 0) {
1517 // RA doesn't like having blocks around with no incident edge,
1518 // so we create a fake one to make it happy
1519 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::TREE
);
1529 Converter::visit(nir_instr
*insn
)
1531 // we need an insertion point for on the fly generated immediate loads
1532 immInsertPos
= bb
->getExit();
1533 switch (insn
->type
) {
1534 case nir_instr_type_alu
:
1535 return visit(nir_instr_as_alu(insn
));
1536 case nir_instr_type_deref
:
1537 return visit(nir_instr_as_deref(insn
));
1538 case nir_instr_type_intrinsic
:
1539 return visit(nir_instr_as_intrinsic(insn
));
1540 case nir_instr_type_jump
:
1541 return visit(nir_instr_as_jump(insn
));
1542 case nir_instr_type_load_const
:
1543 return visit(nir_instr_as_load_const(insn
));
1544 case nir_instr_type_ssa_undef
:
1545 return visit(nir_instr_as_ssa_undef(insn
));
1546 case nir_instr_type_tex
:
1547 return visit(nir_instr_as_tex(insn
));
1549 ERROR("unknown nir_instr type %u\n", insn
->type
);
1556 Converter::convert(nir_intrinsic_op intr
)
1559 case nir_intrinsic_load_base_vertex
:
1560 return SV_BASEVERTEX
;
1561 case nir_intrinsic_load_base_instance
:
1562 return SV_BASEINSTANCE
;
1563 case nir_intrinsic_load_draw_id
:
1565 case nir_intrinsic_load_front_face
:
1567 case nir_intrinsic_load_helper_invocation
:
1568 return SV_THREAD_KILL
;
1569 case nir_intrinsic_load_instance_id
:
1570 return SV_INSTANCE_ID
;
1571 case nir_intrinsic_load_invocation_id
:
1572 return SV_INVOCATION_ID
;
1573 case nir_intrinsic_load_local_group_size
:
1575 case nir_intrinsic_load_local_invocation_id
:
1577 case nir_intrinsic_load_num_work_groups
:
1579 case nir_intrinsic_load_patch_vertices_in
:
1580 return SV_VERTEX_COUNT
;
1581 case nir_intrinsic_load_primitive_id
:
1582 return SV_PRIMITIVE_ID
;
1583 case nir_intrinsic_load_sample_id
:
1584 return SV_SAMPLE_INDEX
;
1585 case nir_intrinsic_load_sample_mask_in
:
1586 return SV_SAMPLE_MASK
;
1587 case nir_intrinsic_load_sample_pos
:
1588 return SV_SAMPLE_POS
;
1589 case nir_intrinsic_load_subgroup_eq_mask
:
1590 return SV_LANEMASK_EQ
;
1591 case nir_intrinsic_load_subgroup_ge_mask
:
1592 return SV_LANEMASK_GE
;
1593 case nir_intrinsic_load_subgroup_gt_mask
:
1594 return SV_LANEMASK_GT
;
1595 case nir_intrinsic_load_subgroup_le_mask
:
1596 return SV_LANEMASK_LE
;
1597 case nir_intrinsic_load_subgroup_lt_mask
:
1598 return SV_LANEMASK_LT
;
1599 case nir_intrinsic_load_subgroup_invocation
:
1601 case nir_intrinsic_load_tess_coord
:
1602 return SV_TESS_COORD
;
1603 case nir_intrinsic_load_tess_level_inner
:
1604 return SV_TESS_INNER
;
1605 case nir_intrinsic_load_tess_level_outer
:
1606 return SV_TESS_OUTER
;
1607 case nir_intrinsic_load_vertex_id
:
1608 return SV_VERTEX_ID
;
1609 case nir_intrinsic_load_work_group_id
:
1612 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1613 nir_intrinsic_infos
[intr
].name
);
1620 Converter::visit(nir_intrinsic_instr
*insn
)
1622 nir_intrinsic_op op
= insn
->intrinsic
;
1623 const nir_intrinsic_info
&opInfo
= nir_intrinsic_infos
[op
];
1624 unsigned dest_components
= nir_intrinsic_dest_components(insn
);
1627 case nir_intrinsic_load_uniform
: {
1628 LValues
&newDefs
= convert(&insn
->dest
);
1629 const DataType dType
= getDType(insn
);
1631 uint32_t coffset
= getIndirect(insn
, 0, 0, indirect
);
1632 for (uint8_t i
= 0; i
< dest_components
; ++i
) {
1633 loadFrom(FILE_MEMORY_CONST
, 0, dType
, newDefs
[i
], 16 * coffset
, i
, indirect
);
1637 case nir_intrinsic_store_output
:
1638 case nir_intrinsic_store_per_vertex_output
: {
1640 DataType dType
= getSType(insn
->src
[0], false, false);
1641 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_store_output
? 1 : 2, 0, indirect
);
1643 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
1644 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
1648 Value
*src
= getSrc(&insn
->src
[0], i
);
1649 switch (prog
->getType()) {
1650 case Program::TYPE_FRAGMENT
: {
1651 if (info
->out
[idx
].sn
== TGSI_SEMANTIC_POSITION
) {
1652 // TGSI uses a different interface than NIR, TGSI stores that
1653 // value in the z component, NIR in X
1655 src
= mkOp1v(OP_SAT
, TYPE_F32
, getScratch(), src
);
1659 case Program::TYPE_GEOMETRY
:
1660 case Program::TYPE_VERTEX
: {
1661 if (info
->io
.genUserClip
> 0 && idx
== (uint32_t)clipVertexOutput
) {
1662 mkMov(clipVtx
[i
], src
);
1671 storeTo(insn
, FILE_SHADER_OUTPUT
, OP_EXPORT
, dType
, src
, idx
, i
+ offset
, indirect
);
1675 case nir_intrinsic_load_input
:
1676 case nir_intrinsic_load_interpolated_input
:
1677 case nir_intrinsic_load_output
: {
1678 LValues
&newDefs
= convert(&insn
->dest
);
1681 if (prog
->getType() == Program::TYPE_FRAGMENT
&&
1682 op
== nir_intrinsic_load_output
) {
1683 std::vector
<Value
*> defs
, srcs
;
1686 srcs
.push_back(getSSA());
1687 srcs
.push_back(getSSA());
1688 Value
*x
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 0));
1689 Value
*y
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 1));
1690 mkCvt(OP_CVT
, TYPE_U32
, srcs
[0], TYPE_F32
, x
)->rnd
= ROUND_Z
;
1691 mkCvt(OP_CVT
, TYPE_U32
, srcs
[1], TYPE_F32
, y
)->rnd
= ROUND_Z
;
1693 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LAYER
, 0)));
1694 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_SAMPLE_INDEX
, 0)));
1696 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1697 defs
.push_back(newDefs
[i
]);
1701 TexInstruction
*texi
= mkTex(OP_TXF
, TEX_TARGET_2D_MS_ARRAY
, 0, 0, defs
, srcs
);
1702 texi
->tex
.levelZero
= 1;
1703 texi
->tex
.mask
= mask
;
1704 texi
->tex
.useOffsets
= 0;
1705 texi
->tex
.r
= 0xffff;
1706 texi
->tex
.s
= 0xffff;
1708 info
->prop
.fp
.readsFramebuffer
= true;
1712 const DataType dType
= getDType(insn
);
1714 bool input
= op
!= nir_intrinsic_load_output
;
1718 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_load_interpolated_input
? 1 : 0, 0, indirect
);
1719 nv50_ir_varying
& vary
= input
? info
->in
[idx
] : info
->out
[idx
];
1721 // see load_barycentric_* handling
1722 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1723 mode
= translateInterpMode(&vary
, nvirOp
);
1724 if (op
== nir_intrinsic_load_interpolated_input
) {
1725 ImmediateValue immMode
;
1726 if (getSrc(&insn
->src
[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode
))
1727 mode
|= immMode
.reg
.data
.u32
;
1731 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1732 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1733 Symbol
*sym
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
);
1734 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1736 if (typeSizeof(dType
) == 8) {
1737 Value
*lo
= getSSA();
1738 Value
*hi
= getSSA();
1739 Instruction
*interp
;
1741 interp
= mkOp1(nvirOp
, TYPE_U32
, lo
, sym
);
1742 if (nvirOp
== OP_PINTERP
)
1743 interp
->setSrc(s
++, fp
.position
);
1744 if (mode
& NV50_IR_INTERP_OFFSET
)
1745 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1746 interp
->setInterpolate(mode
);
1747 interp
->setIndirect(0, 0, indirect
);
1749 Symbol
*sym1
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
+ 4);
1750 interp
= mkOp1(nvirOp
, TYPE_U32
, hi
, sym1
);
1751 if (nvirOp
== OP_PINTERP
)
1752 interp
->setSrc(s
++, fp
.position
);
1753 if (mode
& NV50_IR_INTERP_OFFSET
)
1754 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1755 interp
->setInterpolate(mode
);
1756 interp
->setIndirect(0, 0, indirect
);
1758 mkOp2(OP_MERGE
, dType
, newDefs
[i
], lo
, hi
);
1760 Instruction
*interp
= mkOp1(nvirOp
, dType
, newDefs
[i
], sym
);
1761 if (nvirOp
== OP_PINTERP
)
1762 interp
->setSrc(s
++, fp
.position
);
1763 if (mode
& NV50_IR_INTERP_OFFSET
)
1764 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1765 interp
->setInterpolate(mode
);
1766 interp
->setIndirect(0, 0, indirect
);
1769 mkLoad(dType
, newDefs
[i
], sym
, indirect
)->perPatch
= vary
.patch
;
1774 case nir_intrinsic_load_kernel_input
: {
1775 assert(prog
->getType() == Program::TYPE_COMPUTE
);
1776 assert(insn
->num_components
== 1);
1778 LValues
&newDefs
= convert(&insn
->dest
);
1779 const DataType dType
= getDType(insn
);
1781 uint32_t idx
= getIndirect(insn
, 0, 0, indirect
, true);
1783 mkLoad(dType
, newDefs
[0], mkSymbol(FILE_SHADER_INPUT
, 0, dType
, idx
), indirect
);
1786 case nir_intrinsic_load_barycentric_at_offset
:
1787 case nir_intrinsic_load_barycentric_at_sample
:
1788 case nir_intrinsic_load_barycentric_centroid
:
1789 case nir_intrinsic_load_barycentric_pixel
:
1790 case nir_intrinsic_load_barycentric_sample
: {
1791 LValues
&newDefs
= convert(&insn
->dest
);
1794 if (op
== nir_intrinsic_load_barycentric_centroid
||
1795 op
== nir_intrinsic_load_barycentric_sample
) {
1796 mode
= NV50_IR_INTERP_CENTROID
;
1797 } else if (op
== nir_intrinsic_load_barycentric_at_offset
) {
1799 for (uint8_t c
= 0; c
< 2; c
++) {
1800 offs
[c
] = getScratch();
1801 mkOp2(OP_MIN
, TYPE_F32
, offs
[c
], getSrc(&insn
->src
[0], c
), loadImm(NULL
, 0.4375f
));
1802 mkOp2(OP_MAX
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, -0.5f
));
1803 mkOp2(OP_MUL
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, 4096.0f
));
1804 mkCvt(OP_CVT
, TYPE_S32
, offs
[c
], TYPE_F32
, offs
[c
]);
1806 mkOp3v(OP_INSBF
, TYPE_U32
, newDefs
[0], offs
[1], mkImm(0x1010), offs
[0]);
1808 mode
= NV50_IR_INTERP_OFFSET
;
1809 } else if (op
== nir_intrinsic_load_barycentric_pixel
) {
1810 mode
= NV50_IR_INTERP_DEFAULT
;
1811 } else if (op
== nir_intrinsic_load_barycentric_at_sample
) {
1812 info
->prop
.fp
.readsSampleLocations
= true;
1813 mkOp1(OP_PIXLD
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0], 0))->subOp
= NV50_IR_SUBOP_PIXLD_OFFSET
;
1814 mode
= NV50_IR_INTERP_OFFSET
;
1816 unreachable("all intrinsics already handled above");
1819 loadImm(newDefs
[1], mode
);
1822 case nir_intrinsic_discard
:
1823 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
);
1825 case nir_intrinsic_discard_if
: {
1826 Value
*pred
= getSSA(1, FILE_PREDICATE
);
1827 if (insn
->num_components
> 1) {
1828 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1832 mkCmp(OP_SET
, CC_NE
, TYPE_U8
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1833 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
)->setPredicate(CC_P
, pred
);
1836 case nir_intrinsic_load_base_vertex
:
1837 case nir_intrinsic_load_base_instance
:
1838 case nir_intrinsic_load_draw_id
:
1839 case nir_intrinsic_load_front_face
:
1840 case nir_intrinsic_load_helper_invocation
:
1841 case nir_intrinsic_load_instance_id
:
1842 case nir_intrinsic_load_invocation_id
:
1843 case nir_intrinsic_load_local_group_size
:
1844 case nir_intrinsic_load_local_invocation_id
:
1845 case nir_intrinsic_load_num_work_groups
:
1846 case nir_intrinsic_load_patch_vertices_in
:
1847 case nir_intrinsic_load_primitive_id
:
1848 case nir_intrinsic_load_sample_id
:
1849 case nir_intrinsic_load_sample_mask_in
:
1850 case nir_intrinsic_load_sample_pos
:
1851 case nir_intrinsic_load_subgroup_eq_mask
:
1852 case nir_intrinsic_load_subgroup_ge_mask
:
1853 case nir_intrinsic_load_subgroup_gt_mask
:
1854 case nir_intrinsic_load_subgroup_le_mask
:
1855 case nir_intrinsic_load_subgroup_lt_mask
:
1856 case nir_intrinsic_load_subgroup_invocation
:
1857 case nir_intrinsic_load_tess_coord
:
1858 case nir_intrinsic_load_tess_level_inner
:
1859 case nir_intrinsic_load_tess_level_outer
:
1860 case nir_intrinsic_load_vertex_id
:
1861 case nir_intrinsic_load_work_group_id
: {
1862 const DataType dType
= getDType(insn
);
1863 SVSemantic sv
= convert(op
);
1864 LValues
&newDefs
= convert(&insn
->dest
);
1866 for (uint8_t i
= 0u; i
< nir_intrinsic_dest_components(insn
); ++i
) {
1868 if (typeSizeof(dType
) == 8)
1873 if (sv
== SV_TID
&& info
->prop
.cp
.numThreads
[i
] == 1) {
1876 Symbol
*sym
= mkSysVal(sv
, i
);
1877 Instruction
*rdsv
= mkOp1(OP_RDSV
, TYPE_U32
, def
, sym
);
1878 if (sv
== SV_TESS_OUTER
|| sv
== SV_TESS_INNER
)
1882 if (typeSizeof(dType
) == 8)
1883 mkOp2(OP_MERGE
, dType
, newDefs
[i
], def
, loadImm(getSSA(), 0u));
1888 case nir_intrinsic_load_subgroup_size
: {
1889 LValues
&newDefs
= convert(&insn
->dest
);
1890 loadImm(newDefs
[0], 32u);
1893 case nir_intrinsic_vote_all
:
1894 case nir_intrinsic_vote_any
:
1895 case nir_intrinsic_vote_ieq
: {
1896 LValues
&newDefs
= convert(&insn
->dest
);
1897 Value
*pred
= getScratch(1, FILE_PREDICATE
);
1898 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1899 mkOp1(OP_VOTE
, TYPE_U32
, pred
, pred
)->subOp
= getSubOp(op
);
1900 mkCvt(OP_CVT
, TYPE_U32
, newDefs
[0], TYPE_U8
, pred
);
1903 case nir_intrinsic_ballot
: {
1904 LValues
&newDefs
= convert(&insn
->dest
);
1905 Value
*pred
= getSSA(1, FILE_PREDICATE
);
1906 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1907 mkOp1(OP_VOTE
, TYPE_U32
, newDefs
[0], pred
)->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
1910 case nir_intrinsic_read_first_invocation
:
1911 case nir_intrinsic_read_invocation
: {
1912 LValues
&newDefs
= convert(&insn
->dest
);
1913 const DataType dType
= getDType(insn
);
1914 Value
*tmp
= getScratch();
1916 if (op
== nir_intrinsic_read_first_invocation
) {
1917 mkOp1(OP_VOTE
, TYPE_U32
, tmp
, mkImm(1))->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
1918 mkOp1(OP_BREV
, TYPE_U32
, tmp
, tmp
);
1919 mkOp1(OP_BFIND
, TYPE_U32
, tmp
, tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
1921 tmp
= getSrc(&insn
->src
[1], 0);
1923 for (uint8_t i
= 0; i
< dest_components
; ++i
) {
1924 mkOp3(OP_SHFL
, dType
, newDefs
[i
], getSrc(&insn
->src
[0], i
), tmp
, mkImm(0x1f))
1925 ->subOp
= NV50_IR_SUBOP_SHFL_IDX
;
1929 case nir_intrinsic_load_per_vertex_input
: {
1930 const DataType dType
= getDType(insn
);
1931 LValues
&newDefs
= convert(&insn
->dest
);
1932 Value
*indirectVertex
;
1933 Value
*indirectOffset
;
1934 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
1935 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
1937 Value
*vtxBase
= mkOp2v(OP_PFETCH
, TYPE_U32
, getSSA(4, FILE_ADDRESS
),
1938 mkImm(baseVertex
), indirectVertex
);
1939 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1940 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1941 loadFrom(FILE_SHADER_INPUT
, 0, dType
, newDefs
[i
], address
, 0,
1942 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
1946 case nir_intrinsic_load_per_vertex_output
: {
1947 const DataType dType
= getDType(insn
);
1948 LValues
&newDefs
= convert(&insn
->dest
);
1949 Value
*indirectVertex
;
1950 Value
*indirectOffset
;
1951 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
1952 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
1953 Value
*vtxBase
= NULL
;
1956 vtxBase
= indirectVertex
;
1958 vtxBase
= loadImm(NULL
, baseVertex
);
1960 vtxBase
= mkOp2v(OP_ADD
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), outBase
, vtxBase
);
1962 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1963 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1964 loadFrom(FILE_SHADER_OUTPUT
, 0, dType
, newDefs
[i
], address
, 0,
1965 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
1969 case nir_intrinsic_emit_vertex
:
1970 if (info
->io
.genUserClip
> 0)
1971 handleUserClipPlanes();
1973 case nir_intrinsic_end_primitive
: {
1974 uint32_t idx
= nir_intrinsic_stream_id(insn
);
1975 mkOp1(getOperation(op
), TYPE_U32
, NULL
, mkImm(idx
))->fixed
= 1;
1978 case nir_intrinsic_load_ubo
: {
1979 const DataType dType
= getDType(insn
);
1980 LValues
&newDefs
= convert(&insn
->dest
);
1981 Value
*indirectIndex
;
1982 Value
*indirectOffset
;
1983 uint32_t index
= getIndirect(&insn
->src
[0], 0, indirectIndex
) + 1;
1984 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
1986 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1987 loadFrom(FILE_MEMORY_CONST
, index
, dType
, newDefs
[i
], offset
, i
,
1988 indirectOffset
, indirectIndex
);
1992 case nir_intrinsic_get_buffer_size
: {
1993 LValues
&newDefs
= convert(&insn
->dest
);
1994 const DataType dType
= getDType(insn
);
1995 Value
*indirectBuffer
;
1996 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
1998 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, 0);
1999 mkOp1(OP_BUFQ
, dType
, newDefs
[0], sym
)->setIndirect(0, 0, indirectBuffer
);
2002 case nir_intrinsic_store_ssbo
: {
2003 DataType sType
= getSType(insn
->src
[0], false, false);
2004 Value
*indirectBuffer
;
2005 Value
*indirectOffset
;
2006 uint32_t buffer
= getIndirect(&insn
->src
[1], 0, indirectBuffer
);
2007 uint32_t offset
= getIndirect(&insn
->src
[2], 0, indirectOffset
);
2009 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2010 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2012 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, sType
,
2013 offset
+ i
* typeSizeof(sType
));
2014 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
))
2015 ->setIndirect(0, 1, indirectBuffer
);
2017 info
->io
.globalAccess
|= 0x2;
2020 case nir_intrinsic_load_ssbo
: {
2021 const DataType dType
= getDType(insn
);
2022 LValues
&newDefs
= convert(&insn
->dest
);
2023 Value
*indirectBuffer
;
2024 Value
*indirectOffset
;
2025 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2026 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2028 for (uint8_t i
= 0u; i
< dest_components
; ++i
)
2029 loadFrom(FILE_MEMORY_BUFFER
, buffer
, dType
, newDefs
[i
], offset
, i
,
2030 indirectOffset
, indirectBuffer
);
2032 info
->io
.globalAccess
|= 0x1;
2035 case nir_intrinsic_shared_atomic_add
:
2036 case nir_intrinsic_shared_atomic_and
:
2037 case nir_intrinsic_shared_atomic_comp_swap
:
2038 case nir_intrinsic_shared_atomic_exchange
:
2039 case nir_intrinsic_shared_atomic_or
:
2040 case nir_intrinsic_shared_atomic_imax
:
2041 case nir_intrinsic_shared_atomic_imin
:
2042 case nir_intrinsic_shared_atomic_umax
:
2043 case nir_intrinsic_shared_atomic_umin
:
2044 case nir_intrinsic_shared_atomic_xor
: {
2045 const DataType dType
= getDType(insn
);
2046 LValues
&newDefs
= convert(&insn
->dest
);
2047 Value
*indirectOffset
;
2048 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2049 Symbol
*sym
= mkSymbol(FILE_MEMORY_SHARED
, 0, dType
, offset
);
2050 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2051 if (op
== nir_intrinsic_shared_atomic_comp_swap
)
2052 atom
->setSrc(2, getSrc(&insn
->src
[2], 0));
2053 atom
->setIndirect(0, 0, indirectOffset
);
2054 atom
->subOp
= getSubOp(op
);
2057 case nir_intrinsic_ssbo_atomic_add
:
2058 case nir_intrinsic_ssbo_atomic_and
:
2059 case nir_intrinsic_ssbo_atomic_comp_swap
:
2060 case nir_intrinsic_ssbo_atomic_exchange
:
2061 case nir_intrinsic_ssbo_atomic_or
:
2062 case nir_intrinsic_ssbo_atomic_imax
:
2063 case nir_intrinsic_ssbo_atomic_imin
:
2064 case nir_intrinsic_ssbo_atomic_umax
:
2065 case nir_intrinsic_ssbo_atomic_umin
:
2066 case nir_intrinsic_ssbo_atomic_xor
: {
2067 const DataType dType
= getDType(insn
);
2068 LValues
&newDefs
= convert(&insn
->dest
);
2069 Value
*indirectBuffer
;
2070 Value
*indirectOffset
;
2071 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2072 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2074 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, offset
);
2075 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
,
2076 getSrc(&insn
->src
[2], 0));
2077 if (op
== nir_intrinsic_ssbo_atomic_comp_swap
)
2078 atom
->setSrc(2, getSrc(&insn
->src
[3], 0));
2079 atom
->setIndirect(0, 0, indirectOffset
);
2080 atom
->setIndirect(0, 1, indirectBuffer
);
2081 atom
->subOp
= getSubOp(op
);
2083 info
->io
.globalAccess
|= 0x2;
2086 case nir_intrinsic_global_atomic_add
:
2087 case nir_intrinsic_global_atomic_and
:
2088 case nir_intrinsic_global_atomic_comp_swap
:
2089 case nir_intrinsic_global_atomic_exchange
:
2090 case nir_intrinsic_global_atomic_or
:
2091 case nir_intrinsic_global_atomic_imax
:
2092 case nir_intrinsic_global_atomic_imin
:
2093 case nir_intrinsic_global_atomic_umax
:
2094 case nir_intrinsic_global_atomic_umin
:
2095 case nir_intrinsic_global_atomic_xor
: {
2096 const DataType dType
= getDType(insn
);
2097 LValues
&newDefs
= convert(&insn
->dest
);
2099 uint32_t offset
= getIndirect(&insn
->src
[0], 0, address
);
2101 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, dType
, offset
);
2103 mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2104 atom
->setIndirect(0, 0, address
);
2105 atom
->subOp
= getSubOp(op
);
2107 info
->io
.globalAccess
|= 0x2;
2110 case nir_intrinsic_bindless_image_atomic_add
:
2111 case nir_intrinsic_bindless_image_atomic_and
:
2112 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2113 case nir_intrinsic_bindless_image_atomic_exchange
:
2114 case nir_intrinsic_bindless_image_atomic_imax
:
2115 case nir_intrinsic_bindless_image_atomic_umax
:
2116 case nir_intrinsic_bindless_image_atomic_imin
:
2117 case nir_intrinsic_bindless_image_atomic_umin
:
2118 case nir_intrinsic_bindless_image_atomic_or
:
2119 case nir_intrinsic_bindless_image_atomic_xor
:
2120 case nir_intrinsic_bindless_image_load
:
2121 case nir_intrinsic_bindless_image_samples
:
2122 case nir_intrinsic_bindless_image_size
:
2123 case nir_intrinsic_bindless_image_store
: {
2124 std::vector
<Value
*> srcs
, defs
;
2125 Value
*indirect
= getSrc(&insn
->src
[0], 0);
2129 TexInstruction::Target target
=
2130 convert(nir_intrinsic_image_dim(insn
), !!nir_intrinsic_image_array(insn
), false);
2131 unsigned int argCount
= getNIRArgCount(target
);
2132 uint16_t location
= 0;
2134 if (opInfo
.has_dest
) {
2135 LValues
&newDefs
= convert(&insn
->dest
);
2136 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
) {
2137 defs
.push_back(newDefs
[i
]);
2143 case nir_intrinsic_bindless_image_atomic_add
:
2144 case nir_intrinsic_bindless_image_atomic_and
:
2145 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2146 case nir_intrinsic_bindless_image_atomic_exchange
:
2147 case nir_intrinsic_bindless_image_atomic_imax
:
2148 case nir_intrinsic_bindless_image_atomic_umax
:
2149 case nir_intrinsic_bindless_image_atomic_imin
:
2150 case nir_intrinsic_bindless_image_atomic_umin
:
2151 case nir_intrinsic_bindless_image_atomic_or
:
2152 case nir_intrinsic_bindless_image_atomic_xor
:
2153 ty
= getDType(insn
);
2155 info
->io
.globalAccess
|= 0x2;
2157 case nir_intrinsic_bindless_image_load
:
2159 info
->io
.globalAccess
|= 0x1;
2161 case nir_intrinsic_bindless_image_store
:
2164 info
->io
.globalAccess
|= 0x2;
2166 case nir_intrinsic_bindless_image_samples
:
2170 case nir_intrinsic_bindless_image_size
:
2174 unreachable("unhandled image opcode");
2179 if (opInfo
.num_srcs
>= 2)
2180 for (unsigned int i
= 0u; i
< argCount
; ++i
)
2181 srcs
.push_back(getSrc(&insn
->src
[1], i
));
2183 // the sampler is just another src added after coords
2184 if (opInfo
.num_srcs
>= 3 && target
.isMS())
2185 srcs
.push_back(getSrc(&insn
->src
[2], 0));
2187 if (opInfo
.num_srcs
>= 4) {
2188 unsigned components
= opInfo
.src_components
[3] ? opInfo
.src_components
[3] : insn
->num_components
;
2189 for (uint8_t i
= 0u; i
< components
; ++i
)
2190 srcs
.push_back(getSrc(&insn
->src
[3], i
));
2193 if (opInfo
.num_srcs
>= 5)
2194 // 1 for aotmic swap
2195 for (uint8_t i
= 0u; i
< opInfo
.src_components
[4]; ++i
)
2196 srcs
.push_back(getSrc(&insn
->src
[4], i
));
2198 TexInstruction
*texi
= mkTex(getOperation(op
), target
.getEnum(), location
, 0, defs
, srcs
);
2199 texi
->tex
.bindless
= false;
2200 texi
->tex
.format
= nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn
));
2201 texi
->tex
.mask
= mask
;
2202 texi
->tex
.bindless
= true;
2203 texi
->cache
= convert(nir_intrinsic_access(insn
));
2205 texi
->subOp
= getSubOp(op
);
2208 texi
->setIndirectR(indirect
);
2212 case nir_intrinsic_image_deref_atomic_add
:
2213 case nir_intrinsic_image_deref_atomic_and
:
2214 case nir_intrinsic_image_deref_atomic_comp_swap
:
2215 case nir_intrinsic_image_deref_atomic_exchange
:
2216 case nir_intrinsic_image_deref_atomic_imax
:
2217 case nir_intrinsic_image_deref_atomic_umax
:
2218 case nir_intrinsic_image_deref_atomic_imin
:
2219 case nir_intrinsic_image_deref_atomic_umin
:
2220 case nir_intrinsic_image_deref_atomic_or
:
2221 case nir_intrinsic_image_deref_atomic_xor
:
2222 case nir_intrinsic_image_deref_load
:
2223 case nir_intrinsic_image_deref_samples
:
2224 case nir_intrinsic_image_deref_size
:
2225 case nir_intrinsic_image_deref_store
: {
2226 const nir_variable
*tex
;
2227 std::vector
<Value
*> srcs
, defs
;
2232 nir_deref_instr
*deref
= nir_src_as_deref(insn
->src
[0]);
2233 const glsl_type
*type
= deref
->type
;
2234 TexInstruction::Target target
=
2235 convert((glsl_sampler_dim
)type
->sampler_dimensionality
,
2236 type
->sampler_array
, type
->sampler_shadow
);
2237 unsigned int argCount
= getNIRArgCount(target
);
2238 uint16_t location
= handleDeref(deref
, indirect
, tex
);
2240 if (opInfo
.has_dest
) {
2241 LValues
&newDefs
= convert(&insn
->dest
);
2242 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
) {
2243 defs
.push_back(newDefs
[i
]);
2249 case nir_intrinsic_image_deref_atomic_add
:
2250 case nir_intrinsic_image_deref_atomic_and
:
2251 case nir_intrinsic_image_deref_atomic_comp_swap
:
2252 case nir_intrinsic_image_deref_atomic_exchange
:
2253 case nir_intrinsic_image_deref_atomic_imax
:
2254 case nir_intrinsic_image_deref_atomic_umax
:
2255 case nir_intrinsic_image_deref_atomic_imin
:
2256 case nir_intrinsic_image_deref_atomic_umin
:
2257 case nir_intrinsic_image_deref_atomic_or
:
2258 case nir_intrinsic_image_deref_atomic_xor
:
2259 ty
= getDType(insn
);
2261 info
->io
.globalAccess
|= 0x2;
2263 case nir_intrinsic_image_deref_load
:
2265 info
->io
.globalAccess
|= 0x1;
2267 case nir_intrinsic_image_deref_store
:
2270 info
->io
.globalAccess
|= 0x2;
2272 case nir_intrinsic_image_deref_samples
:
2276 case nir_intrinsic_image_deref_size
:
2280 unreachable("unhandled image opcode");
2285 if (opInfo
.num_srcs
>= 2)
2286 for (unsigned int i
= 0u; i
< argCount
; ++i
)
2287 srcs
.push_back(getSrc(&insn
->src
[1], i
));
2289 // the sampler is just another src added after coords
2290 if (opInfo
.num_srcs
>= 3 && target
.isMS())
2291 srcs
.push_back(getSrc(&insn
->src
[2], 0));
2293 if (opInfo
.num_srcs
>= 4) {
2294 unsigned components
= opInfo
.src_components
[3] ? opInfo
.src_components
[3] : insn
->num_components
;
2295 for (uint8_t i
= 0u; i
< components
; ++i
)
2296 srcs
.push_back(getSrc(&insn
->src
[3], i
));
2299 if (opInfo
.num_srcs
>= 5)
2300 // 1 for aotmic swap
2301 for (uint8_t i
= 0u; i
< opInfo
.src_components
[4]; ++i
)
2302 srcs
.push_back(getSrc(&insn
->src
[4], i
));
2304 TexInstruction
*texi
= mkTex(getOperation(op
), target
.getEnum(), location
, 0, defs
, srcs
);
2305 texi
->tex
.bindless
= false;
2306 texi
->tex
.format
= nv50_ir::TexInstruction::translateImgFormat(tex
->data
.image
.format
);
2307 texi
->tex
.mask
= mask
;
2308 texi
->cache
= getCacheModeFromVar(tex
);
2310 texi
->subOp
= getSubOp(op
);
2313 texi
->setIndirectR(indirect
);
2317 case nir_intrinsic_store_shared
: {
2318 DataType sType
= getSType(insn
->src
[0], false, false);
2319 Value
*indirectOffset
;
2320 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2322 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2323 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2325 Symbol
*sym
= mkSymbol(FILE_MEMORY_SHARED
, 0, sType
, offset
+ i
* typeSizeof(sType
));
2326 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
));
2330 case nir_intrinsic_load_shared
: {
2331 const DataType dType
= getDType(insn
);
2332 LValues
&newDefs
= convert(&insn
->dest
);
2333 Value
*indirectOffset
;
2334 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2336 for (uint8_t i
= 0u; i
< dest_components
; ++i
)
2337 loadFrom(FILE_MEMORY_SHARED
, 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2341 case nir_intrinsic_control_barrier
: {
2342 // TODO: add flag to shader_info
2343 info
->numBarriers
= 1;
2344 Instruction
*bar
= mkOp2(OP_BAR
, TYPE_U32
, NULL
, mkImm(0), mkImm(0));
2346 bar
->subOp
= NV50_IR_SUBOP_BAR_SYNC
;
2349 case nir_intrinsic_group_memory_barrier
:
2350 case nir_intrinsic_memory_barrier
:
2351 case nir_intrinsic_memory_barrier_buffer
:
2352 case nir_intrinsic_memory_barrier_image
:
2353 case nir_intrinsic_memory_barrier_shared
: {
2354 Instruction
*bar
= mkOp(OP_MEMBAR
, TYPE_NONE
, NULL
);
2356 bar
->subOp
= getSubOp(op
);
2359 case nir_intrinsic_memory_barrier_tcs_patch
:
2361 case nir_intrinsic_shader_clock
: {
2362 const DataType dType
= getDType(insn
);
2363 LValues
&newDefs
= convert(&insn
->dest
);
2365 loadImm(newDefs
[0], 0u);
2366 mkOp1(OP_RDSV
, dType
, newDefs
[1], mkSysVal(SV_CLOCK
, 0))->fixed
= 1;
2369 case nir_intrinsic_load_global
: {
2370 const DataType dType
= getDType(insn
);
2371 LValues
&newDefs
= convert(&insn
->dest
);
2372 Value
*indirectOffset
;
2373 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2375 for (auto i
= 0u; i
< dest_components
; ++i
)
2376 loadFrom(FILE_MEMORY_GLOBAL
, 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2378 info
->io
.globalAccess
|= 0x1;
2381 case nir_intrinsic_store_global
: {
2382 DataType sType
= getSType(insn
->src
[0], false, false);
2384 for (auto i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2385 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2387 if (typeSizeof(sType
) == 8) {
2389 mkSplit(split
, 4, getSrc(&insn
->src
[0], i
));
2391 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
));
2392 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[0]);
2394 sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
) + 4);
2395 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[1]);
2397 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, sType
, i
* typeSizeof(sType
));
2398 mkStore(OP_STORE
, sType
, sym
, getSrc(&insn
->src
[1], 0), getSrc(&insn
->src
[0], i
));
2402 info
->io
.globalAccess
|= 0x2;
2406 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos
[op
].name
);
2414 Converter::visit(nir_jump_instr
*insn
)
2416 switch (insn
->type
) {
2417 case nir_jump_return
:
2418 // TODO: this only works in the main function
2419 mkFlow(OP_BRA
, exit
, CC_ALWAYS
, NULL
);
2420 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::CROSS
);
2422 case nir_jump_break
:
2423 case nir_jump_continue
: {
2424 bool isBreak
= insn
->type
== nir_jump_break
;
2425 nir_block
*block
= insn
->instr
.block
;
2426 assert(!block
->successors
[1]);
2427 BasicBlock
*target
= convert(block
->successors
[0]);
2428 mkFlow(isBreak
? OP_BREAK
: OP_CONT
, target
, CC_ALWAYS
, NULL
);
2429 bb
->cfg
.attach(&target
->cfg
, isBreak
? Graph::Edge::CROSS
: Graph::Edge::BACK
);
2433 ERROR("unknown nir_jump_type %u\n", insn
->type
);
2441 Converter::convert(nir_load_const_instr
*insn
, uint8_t idx
)
2446 setPosition(immInsertPos
, true);
2448 setPosition(bb
, false);
2450 switch (insn
->def
.bit_size
) {
2452 val
= loadImm(getSSA(8), insn
->value
[idx
].u64
);
2455 val
= loadImm(getSSA(4), insn
->value
[idx
].u32
);
2458 val
= loadImm(getSSA(2), insn
->value
[idx
].u16
);
2461 val
= loadImm(getSSA(1), insn
->value
[idx
].u8
);
2464 unreachable("unhandled bit size!\n");
2466 setPosition(bb
, true);
2471 Converter::visit(nir_load_const_instr
*insn
)
2473 assert(insn
->def
.bit_size
<= 64);
2474 immediates
[insn
->def
.index
] = insn
;
2478 #define DEFAULT_CHECKS \
2479 if (insn->dest.dest.ssa.num_components > 1) { \
2480 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2483 if (insn->dest.write_mask != 1) { \
2484 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2488 Converter::visit(nir_alu_instr
*insn
)
2490 const nir_op op
= insn
->op
;
2491 const nir_op_info
&info
= nir_op_infos
[op
];
2492 DataType dType
= getDType(insn
);
2493 const std::vector
<DataType
> sTypes
= getSTypes(insn
);
2495 Instruction
*oldPos
= this->bb
->getExit();
2506 case nir_op_fddx_coarse
:
2507 case nir_op_fddx_fine
:
2509 case nir_op_fddy_coarse
:
2510 case nir_op_fddy_fine
:
2529 case nir_op_imul_high
:
2530 case nir_op_umul_high
:
2535 case nir_op_pack_64_2x32_split
:
2550 LValues
&newDefs
= convert(&insn
->dest
);
2551 operation preOp
= preOperationNeeded(op
);
2552 if (preOp
!= OP_NOP
) {
2553 assert(info
.num_inputs
< 2);
2554 Value
*tmp
= getSSA(typeSizeof(dType
));
2555 Instruction
*i0
= mkOp(preOp
, dType
, tmp
);
2556 Instruction
*i1
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2557 if (info
.num_inputs
) {
2558 i0
->setSrc(0, getSrc(&insn
->src
[0]));
2561 i1
->subOp
= getSubOp(op
);
2563 Instruction
*i
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2564 for (unsigned s
= 0u; s
< info
.num_inputs
; ++s
) {
2565 i
->setSrc(s
, getSrc(&insn
->src
[s
]));
2567 i
->subOp
= getSubOp(op
);
2571 case nir_op_ifind_msb
:
2572 case nir_op_ufind_msb
: {
2574 LValues
&newDefs
= convert(&insn
->dest
);
2576 mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2579 case nir_op_fround_even
: {
2581 LValues
&newDefs
= convert(&insn
->dest
);
2582 mkCvt(OP_CVT
, dType
, newDefs
[0], dType
, getSrc(&insn
->src
[0]))->rnd
= ROUND_NI
;
2585 // convert instructions
2599 case nir_op_u2u64
: {
2601 LValues
&newDefs
= convert(&insn
->dest
);
2602 Instruction
*i
= mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2603 if (op
== nir_op_f2i32
|| op
== nir_op_f2i64
|| op
== nir_op_f2u32
|| op
== nir_op_f2u64
)
2605 i
->sType
= sTypes
[0];
2608 // compare instructions
2618 case nir_op_ine32
: {
2620 LValues
&newDefs
= convert(&insn
->dest
);
2621 Instruction
*i
= mkCmp(getOperation(op
),
2626 getSrc(&insn
->src
[0]),
2627 getSrc(&insn
->src
[1]));
2628 if (info
.num_inputs
== 3)
2629 i
->setSrc(2, getSrc(&insn
->src
[2]));
2630 i
->sType
= sTypes
[0];
2633 // those are weird ALU ops and need special handling, because
2634 // 1. they are always componend based
2635 // 2. they basically just merge multiple values into one data type
2637 if (!insn
->dest
.dest
.is_ssa
&& insn
->dest
.dest
.reg
.reg
->num_array_elems
) {
2638 nir_reg_dest
& reg
= insn
->dest
.dest
.reg
;
2639 uint32_t goffset
= regToLmemOffset
[reg
.reg
->index
];
2640 uint8_t comps
= reg
.reg
->num_components
;
2641 uint8_t size
= reg
.reg
->bit_size
/ 8;
2642 uint8_t csize
= 4 * size
; // TODO after fixing MemoryOpts: comps * size;
2643 uint32_t aoffset
= csize
* reg
.base_offset
;
2644 Value
*indirect
= NULL
;
2647 indirect
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
),
2648 getSrc(reg
.indirect
, 0), mkImm(csize
));
2650 for (uint8_t i
= 0u; i
< comps
; ++i
) {
2651 if (!((1u << i
) & insn
->dest
.write_mask
))
2654 Symbol
*sym
= mkSymbol(FILE_MEMORY_LOCAL
, 0, dType
, goffset
+ aoffset
+ i
* size
);
2655 mkStore(OP_STORE
, dType
, sym
, indirect
, getSrc(&insn
->src
[0], i
));
2658 } else if (!insn
->src
[0].src
.is_ssa
&& insn
->src
[0].src
.reg
.reg
->num_array_elems
) {
2659 LValues
&newDefs
= convert(&insn
->dest
);
2660 nir_reg_src
& reg
= insn
->src
[0].src
.reg
;
2661 uint32_t goffset
= regToLmemOffset
[reg
.reg
->index
];
2662 // uint8_t comps = reg.reg->num_components;
2663 uint8_t size
= reg
.reg
->bit_size
/ 8;
2664 uint8_t csize
= 4 * size
; // TODO after fixing MemoryOpts: comps * size;
2665 uint32_t aoffset
= csize
* reg
.base_offset
;
2666 Value
*indirect
= NULL
;
2669 indirect
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), getSrc(reg
.indirect
, 0), mkImm(csize
));
2671 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
)
2672 loadFrom(FILE_MEMORY_LOCAL
, 0, dType
, newDefs
[i
], goffset
+ aoffset
, i
, indirect
);
2676 LValues
&newDefs
= convert(&insn
->dest
);
2677 for (LValues::size_type c
= 0u; c
< newDefs
.size(); ++c
) {
2678 mkMov(newDefs
[c
], getSrc(&insn
->src
[0], c
), dType
);
2686 case nir_op_vec16
: {
2687 LValues
&newDefs
= convert(&insn
->dest
);
2688 for (LValues::size_type c
= 0u; c
< newDefs
.size(); ++c
) {
2689 mkMov(newDefs
[c
], getSrc(&insn
->src
[c
]), dType
);
2694 case nir_op_pack_64_2x32
: {
2695 LValues
&newDefs
= convert(&insn
->dest
);
2696 Instruction
*merge
= mkOp(OP_MERGE
, dType
, newDefs
[0]);
2697 merge
->setSrc(0, getSrc(&insn
->src
[0], 0));
2698 merge
->setSrc(1, getSrc(&insn
->src
[0], 1));
2701 case nir_op_pack_half_2x16_split
: {
2702 LValues
&newDefs
= convert(&insn
->dest
);
2703 Value
*tmpH
= getSSA();
2704 Value
*tmpL
= getSSA();
2706 mkCvt(OP_CVT
, TYPE_F16
, tmpL
, TYPE_F32
, getSrc(&insn
->src
[0]));
2707 mkCvt(OP_CVT
, TYPE_F16
, tmpH
, TYPE_F32
, getSrc(&insn
->src
[1]));
2708 mkOp3(OP_INSBF
, TYPE_U32
, newDefs
[0], tmpH
, mkImm(0x1010), tmpL
);
2711 case nir_op_unpack_half_2x16_split_x
:
2712 case nir_op_unpack_half_2x16_split_y
: {
2713 LValues
&newDefs
= convert(&insn
->dest
);
2714 Instruction
*cvt
= mkCvt(OP_CVT
, TYPE_F32
, newDefs
[0], TYPE_F16
, getSrc(&insn
->src
[0]));
2715 if (op
== nir_op_unpack_half_2x16_split_y
)
2719 case nir_op_unpack_64_2x32
: {
2720 LValues
&newDefs
= convert(&insn
->dest
);
2721 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, newDefs
[1]);
2724 case nir_op_unpack_64_2x32_split_x
: {
2725 LValues
&newDefs
= convert(&insn
->dest
);
2726 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, getSSA());
2729 case nir_op_unpack_64_2x32_split_y
: {
2730 LValues
&newDefs
= convert(&insn
->dest
);
2731 mkOp1(OP_SPLIT
, dType
, getSSA(), getSrc(&insn
->src
[0]))->setDef(1, newDefs
[0]);
2734 // special instructions
2736 case nir_op_isign
: {
2739 if (::isFloatType(dType
))
2744 LValues
&newDefs
= convert(&insn
->dest
);
2745 LValue
*val0
= getScratch();
2746 LValue
*val1
= getScratch();
2747 mkCmp(OP_SET
, CC_GT
, iType
, val0
, dType
, getSrc(&insn
->src
[0]), zero
);
2748 mkCmp(OP_SET
, CC_LT
, iType
, val1
, dType
, getSrc(&insn
->src
[0]), zero
);
2750 if (dType
== TYPE_F64
) {
2751 mkOp2(OP_SUB
, iType
, val0
, val0
, val1
);
2752 mkCvt(OP_CVT
, TYPE_F64
, newDefs
[0], iType
, val0
);
2753 } else if (dType
== TYPE_S64
|| dType
== TYPE_U64
) {
2754 mkOp2(OP_SUB
, iType
, val0
, val1
, val0
);
2755 mkOp2(OP_SHR
, iType
, val1
, val0
, loadImm(NULL
, 31));
2756 mkOp2(OP_MERGE
, dType
, newDefs
[0], val0
, val1
);
2757 } else if (::isFloatType(dType
))
2758 mkOp2(OP_SUB
, iType
, newDefs
[0], val0
, val1
);
2760 mkOp2(OP_SUB
, iType
, newDefs
[0], val1
, val0
);
2764 case nir_op_b32csel
: {
2766 LValues
&newDefs
= convert(&insn
->dest
);
2767 mkCmp(OP_SLCT
, CC_NE
, dType
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[1]), getSrc(&insn
->src
[2]), getSrc(&insn
->src
[0]));
2770 case nir_op_ibitfield_extract
:
2771 case nir_op_ubitfield_extract
: {
2773 Value
*tmp
= getSSA();
2774 LValues
&newDefs
= convert(&insn
->dest
);
2775 mkOp3(OP_INSBF
, dType
, tmp
, getSrc(&insn
->src
[2]), loadImm(NULL
, 0x808), getSrc(&insn
->src
[1]));
2776 mkOp2(OP_EXTBF
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), tmp
);
2781 LValues
&newDefs
= convert(&insn
->dest
);
2782 mkOp2(OP_BMSK
, dType
, newDefs
[0], getSrc(&insn
->src
[1]), getSrc(&insn
->src
[0]))->subOp
= NV50_IR_SUBOP_BMSK_W
;
2785 case nir_op_bitfield_insert
: {
2787 LValues
&newDefs
= convert(&insn
->dest
);
2788 LValue
*temp
= getSSA();
2789 mkOp3(OP_INSBF
, TYPE_U32
, temp
, getSrc(&insn
->src
[3]), mkImm(0x808), getSrc(&insn
->src
[2]));
2790 mkOp3(OP_INSBF
, dType
, newDefs
[0], getSrc(&insn
->src
[1]), temp
, getSrc(&insn
->src
[0]));
2793 case nir_op_bit_count
: {
2795 LValues
&newDefs
= convert(&insn
->dest
);
2796 mkOp2(OP_POPCNT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), getSrc(&insn
->src
[0]));
2799 case nir_op_bitfield_reverse
: {
2801 LValues
&newDefs
= convert(&insn
->dest
);
2802 mkOp1(OP_BREV
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]));
2805 case nir_op_find_lsb
: {
2807 LValues
&newDefs
= convert(&insn
->dest
);
2808 Value
*tmp
= getSSA();
2809 mkOp1(OP_BREV
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]));
2810 mkOp1(OP_BFIND
, TYPE_U32
, newDefs
[0], tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
2813 case nir_op_extract_u8
: {
2815 LValues
&newDefs
= convert(&insn
->dest
);
2816 Value
*prmt
= getSSA();
2817 mkOp2(OP_OR
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x4440));
2818 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2821 case nir_op_extract_i8
: {
2823 LValues
&newDefs
= convert(&insn
->dest
);
2824 Value
*prmt
= getSSA();
2825 mkOp3(OP_MAD
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x1111), loadImm(NULL
, 0x8880));
2826 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2829 case nir_op_extract_u16
: {
2831 LValues
&newDefs
= convert(&insn
->dest
);
2832 Value
*prmt
= getSSA();
2833 mkOp3(OP_MAD
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x22), loadImm(NULL
, 0x4410));
2834 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2837 // boolean conversions
2838 case nir_op_b2f32
: {
2840 LValues
&newDefs
= convert(&insn
->dest
);
2841 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1.0f
));
2844 case nir_op_b2f64
: {
2846 LValues
&newDefs
= convert(&insn
->dest
);
2847 Value
*tmp
= getSSA(4);
2848 mkOp2(OP_AND
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]), loadImm(NULL
, 0x3ff00000));
2849 mkOp2(OP_MERGE
, TYPE_U64
, newDefs
[0], loadImm(NULL
, 0), tmp
);
2853 case nir_op_i2b32
: {
2855 LValues
&newDefs
= convert(&insn
->dest
);
2857 if (typeSizeof(sTypes
[0]) == 8) {
2858 src1
= loadImm(getSSA(8), 0.0);
2862 CondCode cc
= op
== nir_op_f2b32
? CC_NEU
: CC_NE
;
2863 mkCmp(OP_SET
, cc
, TYPE_U32
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[0]), src1
);
2866 case nir_op_b2i32
: {
2868 LValues
&newDefs
= convert(&insn
->dest
);
2869 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2872 case nir_op_b2i64
: {
2874 LValues
&newDefs
= convert(&insn
->dest
);
2875 LValue
*def
= getScratch();
2876 mkOp2(OP_AND
, TYPE_U32
, def
, getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2877 mkOp2(OP_MERGE
, TYPE_S64
, newDefs
[0], def
, loadImm(NULL
, 0));
2881 ERROR("unknown nir_op %s\n", info
.name
);
2886 oldPos
= this->bb
->getEntry();
2887 oldPos
->precise
= insn
->exact
;
2890 if (unlikely(!oldPos
))
2893 while (oldPos
->next
) {
2894 oldPos
= oldPos
->next
;
2895 oldPos
->precise
= insn
->exact
;
2897 oldPos
->saturate
= insn
->dest
.saturate
;
2901 #undef DEFAULT_CHECKS
2904 Converter::visit(nir_ssa_undef_instr
*insn
)
2906 LValues
&newDefs
= convert(&insn
->def
);
2907 for (uint8_t i
= 0u; i
< insn
->def
.num_components
; ++i
) {
2908 mkOp(OP_NOP
, TYPE_NONE
, newDefs
[i
]);
2913 #define CASE_SAMPLER(ty) \
2914 case GLSL_SAMPLER_DIM_ ## ty : \
2915 if (isArray && !isShadow) \
2916 return TEX_TARGET_ ## ty ## _ARRAY; \
2917 else if (!isArray && isShadow) \
2918 return TEX_TARGET_## ty ## _SHADOW; \
2919 else if (isArray && isShadow) \
2920 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2922 return TEX_TARGET_ ## ty
2925 Converter::convert(glsl_sampler_dim dim
, bool isArray
, bool isShadow
)
2931 case GLSL_SAMPLER_DIM_3D
:
2932 return TEX_TARGET_3D
;
2933 case GLSL_SAMPLER_DIM_MS
:
2935 return TEX_TARGET_2D_MS_ARRAY
;
2936 return TEX_TARGET_2D_MS
;
2937 case GLSL_SAMPLER_DIM_RECT
:
2939 return TEX_TARGET_RECT_SHADOW
;
2940 return TEX_TARGET_RECT
;
2941 case GLSL_SAMPLER_DIM_BUF
:
2942 return TEX_TARGET_BUFFER
;
2943 case GLSL_SAMPLER_DIM_EXTERNAL
:
2944 return TEX_TARGET_2D
;
2946 ERROR("unknown glsl_sampler_dim %u\n", dim
);
2948 return TEX_TARGET_COUNT
;
2954 Converter::applyProjection(Value
*src
, Value
*proj
)
2958 return mkOp2v(OP_MUL
, TYPE_F32
, getScratch(), src
, proj
);
2962 Converter::getNIRArgCount(TexInstruction::Target
& target
)
2964 unsigned int result
= target
.getArgCount();
2965 if (target
.isCube() && target
.isArray())
2973 Converter::handleDeref(nir_deref_instr
*deref
, Value
* &indirect
, const nir_variable
* &tex
)
2975 typedef std::pair
<uint32_t,Value
*> DerefPair
;
2976 std::list
<DerefPair
> derefs
;
2978 uint16_t result
= 0;
2979 while (deref
->deref_type
!= nir_deref_type_var
) {
2980 switch (deref
->deref_type
) {
2981 case nir_deref_type_array
: {
2983 uint8_t size
= type_size(deref
->type
, true);
2984 result
+= size
* getIndirect(&deref
->arr
.index
, 0, indirect
);
2987 derefs
.push_front(std::make_pair(size
, indirect
));
2992 case nir_deref_type_struct
: {
2993 result
+= nir_deref_instr_parent(deref
)->type
->struct_location_offset(deref
->strct
.index
);
2996 case nir_deref_type_var
:
2998 unreachable("nir_deref_type_var reached in handleDeref!");
3001 deref
= nir_deref_instr_parent(deref
);
3005 for (std::list
<DerefPair
>::const_iterator it
= derefs
.begin(); it
!= derefs
.end(); ++it
) {
3006 Value
*offset
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(), loadImm(getSSA(), it
->first
), it
->second
);
3008 indirect
= mkOp2v(OP_ADD
, TYPE_U32
, getSSA(), indirect
, offset
);
3013 tex
= nir_deref_instr_get_variable(deref
);
3016 return result
+ tex
->data
.driver_location
;
3020 Converter::convert(enum gl_access_qualifier access
)
3023 case ACCESS_VOLATILE
:
3025 case ACCESS_COHERENT
:
3033 Converter::getCacheModeFromVar(const nir_variable
*var
)
3035 return convert(var
->data
.access
);
3039 Converter::visit(nir_tex_instr
*insn
)
3043 case nir_texop_query_levels
:
3045 case nir_texop_texture_samples
:
3050 case nir_texop_txf_ms
:
3052 case nir_texop_txs
: {
3053 LValues
&newDefs
= convert(&insn
->dest
);
3054 std::vector
<Value
*> srcs
;
3055 std::vector
<Value
*> defs
;
3056 std::vector
<nir_src
*> offsets
;
3060 TexInstruction::Target target
= convert(insn
->sampler_dim
, insn
->is_array
, insn
->is_shadow
);
3061 operation op
= getOperation(insn
->op
);
3064 int biasIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_bias
);
3065 int compIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_comparator
);
3066 int coordsIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_coord
);
3067 int ddxIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddx
);
3068 int ddyIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddy
);
3069 int msIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ms_index
);
3070 int lodIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_lod
);
3071 int offsetIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_offset
);
3072 int projIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_projector
);
3073 int sampOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_offset
);
3074 int texOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_offset
);
3075 int sampHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_handle
);
3076 int texHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_handle
);
3078 bool bindless
= sampHandleIdx
!= -1 || texHandleIdx
!= -1;
3079 assert((sampHandleIdx
!= -1) == (texHandleIdx
!= -1));
3082 proj
= mkOp1v(OP_RCP
, TYPE_F32
, getScratch(), getSrc(&insn
->src
[projIdx
].src
, 0));
3084 srcs
.resize(insn
->coord_components
);
3085 for (uint8_t i
= 0u; i
< insn
->coord_components
; ++i
)
3086 srcs
[i
] = applyProjection(getSrc(&insn
->src
[coordsIdx
].src
, i
), proj
);
3088 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3089 if (insn
->coord_components
) {
3090 uint32_t argCount
= target
.getArgCount();
3095 for (uint32_t i
= 0u; i
< (argCount
- insn
->coord_components
); ++i
)
3096 srcs
.push_back(getSSA());
3099 if (insn
->op
== nir_texop_texture_samples
)
3100 srcs
.push_back(zero
);
3101 else if (!insn
->num_srcs
)
3102 srcs
.push_back(loadImm(NULL
, 0));
3104 srcs
.push_back(getSrc(&insn
->src
[biasIdx
].src
, 0));
3106 srcs
.push_back(getSrc(&insn
->src
[lodIdx
].src
, 0));
3107 else if (op
== OP_TXF
)
3110 srcs
.push_back(getSrc(&insn
->src
[msIdx
].src
, 0));
3111 if (offsetIdx
!= -1)
3112 offsets
.push_back(&insn
->src
[offsetIdx
].src
);
3114 srcs
.push_back(applyProjection(getSrc(&insn
->src
[compIdx
].src
, 0), proj
));
3115 if (texOffIdx
!= -1) {
3116 srcs
.push_back(getSrc(&insn
->src
[texOffIdx
].src
, 0));
3117 texOffIdx
= srcs
.size() - 1;
3119 if (sampOffIdx
!= -1) {
3120 srcs
.push_back(getSrc(&insn
->src
[sampOffIdx
].src
, 0));
3121 sampOffIdx
= srcs
.size() - 1;
3124 // currently we use the lower bits
3126 Value
*handle
= getSrc(&insn
->src
[sampHandleIdx
].src
, 0);
3128 mkSplit(split
, 4, handle
);
3130 srcs
.push_back(split
[0]);
3131 texOffIdx
= srcs
.size() - 1;
3134 r
= bindless
? 0xff : insn
->texture_index
;
3135 s
= bindless
? 0x1f : insn
->sampler_index
;
3137 defs
.resize(newDefs
.size());
3138 for (uint8_t d
= 0u; d
< newDefs
.size(); ++d
) {
3139 defs
[d
] = newDefs
[d
];
3142 if (target
.isMS() || (op
== OP_TEX
&& prog
->getType() != Program::TYPE_FRAGMENT
))
3145 TexInstruction
*texi
= mkTex(op
, target
.getEnum(), r
, s
, defs
, srcs
);
3146 texi
->tex
.levelZero
= lz
;
3147 texi
->tex
.mask
= mask
;
3148 texi
->tex
.bindless
= bindless
;
3150 if (texOffIdx
!= -1)
3151 texi
->tex
.rIndirectSrc
= texOffIdx
;
3152 if (sampOffIdx
!= -1)
3153 texi
->tex
.sIndirectSrc
= sampOffIdx
;
3157 if (!target
.isShadow())
3158 texi
->tex
.gatherComp
= insn
->component
;
3161 texi
->tex
.query
= TXQ_DIMS
;
3163 case nir_texop_texture_samples
:
3164 texi
->tex
.mask
= 0x4;
3165 texi
->tex
.query
= TXQ_TYPE
;
3167 case nir_texop_query_levels
:
3168 texi
->tex
.mask
= 0x8;
3169 texi
->tex
.query
= TXQ_DIMS
;
3175 texi
->tex
.useOffsets
= offsets
.size();
3176 if (texi
->tex
.useOffsets
) {
3177 for (uint8_t s
= 0; s
< texi
->tex
.useOffsets
; ++s
) {
3178 for (uint32_t c
= 0u; c
< 3; ++c
) {
3179 uint8_t s2
= std::min(c
, target
.getDim() - 1);
3180 texi
->offset
[s
][c
].set(getSrc(offsets
[s
], s2
));
3181 texi
->offset
[s
][c
].setInsn(texi
);
3186 if (op
== OP_TXG
&& offsetIdx
== -1) {
3187 if (nir_tex_instr_has_explicit_tg4_offsets(insn
)) {
3188 texi
->tex
.useOffsets
= 4;
3189 setPosition(texi
, false);
3190 for (uint8_t i
= 0; i
< 4; ++i
) {
3191 for (uint8_t j
= 0; j
< 2; ++j
) {
3192 texi
->offset
[i
][j
].set(loadImm(NULL
, insn
->tg4_offsets
[i
][j
]));
3193 texi
->offset
[i
][j
].setInsn(texi
);
3196 setPosition(texi
, true);
3200 if (ddxIdx
!= -1 && ddyIdx
!= -1) {
3201 for (uint8_t c
= 0u; c
< target
.getDim() + target
.isCube(); ++c
) {
3202 texi
->dPdx
[c
].set(getSrc(&insn
->src
[ddxIdx
].src
, c
));
3203 texi
->dPdy
[c
].set(getSrc(&insn
->src
[ddyIdx
].src
, c
));
3210 ERROR("unknown nir_texop %u\n", insn
->op
);
3217 Converter::visit(nir_deref_instr
*deref
)
3219 // we just ignore those, because images intrinsics are the only place where
3220 // we should end up with deref sources and those have to backtrack anyway
3221 // to get the nir_variable. This code just exists to handle some special
3223 switch (deref
->deref_type
) {
3224 case nir_deref_type_array
:
3225 case nir_deref_type_struct
:
3226 case nir_deref_type_var
:
3229 ERROR("unknown nir_deref_instr %u\n", deref
->deref_type
);
3240 if (prog
->dbgFlags
& NV50_IR_DEBUG_VERBOSE
)
3241 nir_print_shader(nir
, stderr
);
3243 struct nir_lower_subgroups_options subgroup_options
= {
3244 .subgroup_size
= 32,
3245 .ballot_bit_size
= 32,
3248 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, type_size
, (nir_lower_io_options
)0);
3249 NIR_PASS_V(nir
, nir_lower_subgroups
, &subgroup_options
);
3250 NIR_PASS_V(nir
, nir_lower_regs_to_ssa
);
3251 NIR_PASS_V(nir
, nir_lower_load_const_to_scalar
);
3252 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
3253 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
3254 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
3258 NIR_PASS(progress
, nir
, nir_copy_prop
);
3259 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
3260 NIR_PASS(progress
, nir
, nir_opt_trivial_continues
);
3261 NIR_PASS(progress
, nir
, nir_opt_cse
);
3262 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
3263 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
3264 NIR_PASS(progress
, nir
, nir_copy_prop
);
3265 NIR_PASS(progress
, nir
, nir_opt_dce
);
3266 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
3269 NIR_PASS_V(nir
, nir_lower_bool_to_int32
);
3270 NIR_PASS_V(nir
, nir_lower_locals_to_regs
);
3271 NIR_PASS_V(nir
, nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
3272 NIR_PASS_V(nir
, nir_convert_from_ssa
, true);
3274 // Garbage collect dead instructions
3278 ERROR("Couldn't prase NIR!\n");
3282 if (!assignSlots()) {
3283 ERROR("Couldn't assign slots!\n");
3287 if (prog
->dbgFlags
& NV50_IR_DEBUG_BASIC
)
3288 nir_print_shader(nir
, stderr
);
3290 nir_foreach_function(function
, nir
) {
3291 if (!visit(function
))
3298 } // unnamed namespace
3303 Program::makeFromNIR(struct nv50_ir_prog_info
*info
)
3305 nir_shader
*nir
= (nir_shader
*)info
->bin
.source
;
3306 Converter
converter(this, nir
, info
);
3307 bool result
= converter
.run();
3310 LoweringHelper lowering
;
3312 tlsSize
= info
->bin
.tlsSpace
;
3316 } // namespace nv50_ir
3318 static nir_shader_compiler_options
3319 nvir_nir_shader_compiler_options(int chipset
)
3322 .lower_fdiv
= false,
3323 .lower_ffma
= false,
3324 .fuse_ffma
= false, /* nir doesn't track mad vs fma */
3325 .lower_flrp16
= false,
3326 .lower_flrp32
= true,
3327 .lower_flrp64
= true,
3328 .lower_fpow
= false,
3329 .lower_fsat
= false,
3330 .lower_fsqrt
= false, // TODO: only before gm200
3331 .lower_sincos
= false,
3333 .lower_bitfield_extract
= false,
3334 .lower_bitfield_extract_to_shifts
= false,
3335 .lower_bitfield_insert
= false,
3336 .lower_bitfield_insert_to_shifts
= false,
3337 .lower_bitfield_insert_to_bitfield_select
= false,
3338 .lower_bitfield_reverse
= false,
3339 .lower_bit_count
= false,
3340 .lower_ifind_msb
= false,
3341 .lower_find_lsb
= false,
3342 .lower_uadd_carry
= true, // TODO
3343 .lower_usub_borrow
= true, // TODO
3344 .lower_mul_high
= false,
3345 .lower_negate
= false,
3347 .lower_scmp
= true, // TODO: not implemented yet
3348 .lower_vector_cmp
= false,
3350 .lower_bitops
= false,
3351 .lower_isign
= false, // TODO
3352 .lower_fsign
= false,
3353 .lower_fdph
= false,
3354 .lower_fdot
= false,
3355 .fdot_replicates
= false, // TODO
3356 .lower_ffloor
= false, // TODO
3357 .lower_ffract
= true,
3358 .lower_fceil
= false, // TODO
3359 .lower_ftrunc
= false,
3360 .lower_ldexp
= true,
3361 .lower_pack_half_2x16
= true,
3362 .lower_pack_unorm_2x16
= true,
3363 .lower_pack_snorm_2x16
= true,
3364 .lower_pack_unorm_4x8
= true,
3365 .lower_pack_snorm_4x8
= true,
3366 .lower_unpack_half_2x16
= true,
3367 .lower_unpack_unorm_2x16
= true,
3368 .lower_unpack_snorm_2x16
= true,
3369 .lower_unpack_unorm_4x8
= true,
3370 .lower_unpack_snorm_4x8
= true,
3371 .lower_pack_split
= false,
3372 .lower_extract_byte
= true,
3373 .lower_extract_word
= true,
3374 .lower_all_io_to_temps
= false,
3375 .lower_all_io_to_elements
= false,
3376 .vertex_id_zero_based
= false,
3377 .lower_base_vertex
= false,
3378 .lower_helper_invocation
= false,
3379 .optimize_sample_mask_in
= false,
3380 .lower_cs_local_index_from_id
= true,
3381 .lower_cs_local_id_from_index
= false,
3382 .lower_device_index_to_zero
= false, // TODO
3383 .lower_wpos_pntc
= false, // TODO
3384 .lower_hadd
= true, // TODO
3385 .lower_add_sat
= true, // TODO
3386 .vectorize_io
= false,
3387 .lower_to_scalar
= true,
3388 .unify_interfaces
= false,
3389 .use_interpolated_input_intrinsics
= true,
3390 .lower_mul_2x32_64
= true, // TODO
3391 .lower_rotate
= true,
3392 .has_imul24
= false,
3393 .intel_vec4
= false,
3394 .max_unroll_iterations
= 32,
3395 .lower_int64_options
= (nir_lower_int64_options
) ( // TODO
3396 nir_lower_divmod64
|
3397 nir_lower_ufind_msb64
3399 .lower_doubles_options
= (nir_lower_doubles_options
) ( // TODO
3405 static const nir_shader_compiler_options gf100_nir_shader_compiler_options
=
3406 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET
);
3408 const nir_shader_compiler_options
*
3409 nv50_ir_nir_shader_compiler_options(int chipset
)
3411 return &gf100_nir_shader_compiler_options
;