2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Karol Herbst <kherbst@redhat.com>
25 #include "compiler/nir/nir.h"
27 #include "util/u_debug.h"
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
38 #include <tr1/unordered_map>
46 #if __cplusplus >= 201103L
48 using std::unordered_map
;
51 using std::tr1::unordered_map
;
54 using namespace nv50_ir
;
57 type_size(const struct glsl_type
*type
, bool bindless
)
59 return glsl_count_attribute_slots(type
, false);
63 function_temp_type_info(const struct glsl_type
*type
, unsigned *size
, unsigned *align
)
65 assert(glsl_type_is_vector_or_scalar(type
));
67 unsigned comp_size
= glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
68 unsigned length
= glsl_get_vector_elements(type
);
70 *size
= comp_size
* length
;
74 class Converter
: public ConverterCommon
77 Converter(Program
*, nir_shader
*, nv50_ir_prog_info
*);
81 typedef std::vector
<LValue
*> LValues
;
82 typedef unordered_map
<unsigned, LValues
> NirDefMap
;
83 typedef unordered_map
<unsigned, nir_load_const_instr
*> ImmediateMap
;
84 typedef unordered_map
<unsigned, BasicBlock
*> NirBlockMap
;
86 CacheMode
convert(enum gl_access_qualifier
);
87 TexTarget
convert(glsl_sampler_dim
, bool isArray
, bool isShadow
);
88 LValues
& convert(nir_alu_dest
*);
89 BasicBlock
* convert(nir_block
*);
90 LValues
& convert(nir_dest
*);
91 SVSemantic
convert(nir_intrinsic_op
);
92 Value
* convert(nir_load_const_instr
*, uint8_t);
93 LValues
& convert(nir_register
*);
94 LValues
& convert(nir_ssa_def
*);
96 Value
* getSrc(nir_alu_src
*, uint8_t component
= 0);
97 Value
* getSrc(nir_register
*, uint8_t);
98 Value
* getSrc(nir_src
*, uint8_t, bool indirect
= false);
99 Value
* getSrc(nir_ssa_def
*, uint8_t);
101 // returned value is the constant part of the given source (either the
102 // nir_src or the selected source component of an intrinsic). Even though
103 // this is mostly an optimization to be able to skip indirects in a few
104 // cases, sometimes we require immediate values or set some fileds on
105 // instructions (e.g. tex) in order for codegen to consume those.
106 // If the found value has not a constant part, the Value gets returned
107 // through the Value parameter.
108 uint32_t getIndirect(nir_src
*, uint8_t, Value
*&);
109 // isScalar indicates that the addressing is scalar, vec4 addressing is
111 uint32_t getIndirect(nir_intrinsic_instr
*, uint8_t s
, uint8_t c
, Value
*&,
112 bool isScalar
= false);
114 uint32_t getSlotAddress(nir_intrinsic_instr
*, uint8_t idx
, uint8_t slot
);
116 void setInterpolate(nv50_ir_varying
*,
121 Instruction
*loadFrom(DataFile
, uint8_t, DataType
, Value
*def
, uint32_t base
,
122 uint8_t c
, Value
*indirect0
= NULL
,
123 Value
*indirect1
= NULL
, bool patch
= false);
124 void storeTo(nir_intrinsic_instr
*, DataFile
, operation
, DataType
,
125 Value
*src
, uint8_t idx
, uint8_t c
, Value
*indirect0
= NULL
,
126 Value
*indirect1
= NULL
);
128 bool isFloatType(nir_alu_type
);
129 bool isSignedType(nir_alu_type
);
130 bool isResultFloat(nir_op
);
131 bool isResultSigned(nir_op
);
133 DataType
getDType(nir_alu_instr
*);
134 DataType
getDType(nir_intrinsic_instr
*);
135 DataType
getDType(nir_intrinsic_instr
*, bool isSigned
);
136 DataType
getDType(nir_op
, uint8_t);
138 DataFile
getFile(nir_intrinsic_op
);
140 std::vector
<DataType
> getSTypes(nir_alu_instr
*);
141 DataType
getSType(nir_src
&, bool isFloat
, bool isSigned
);
143 operation
getOperation(nir_intrinsic_op
);
144 operation
getOperation(nir_op
);
145 operation
getOperation(nir_texop
);
146 operation
preOperationNeeded(nir_op
);
148 int getSubOp(nir_intrinsic_op
);
149 int getSubOp(nir_op
);
151 CondCode
getCondCode(nir_op
);
156 bool visit(nir_alu_instr
*);
157 bool visit(nir_block
*);
158 bool visit(nir_cf_node
*);
159 bool visit(nir_function
*);
160 bool visit(nir_if
*);
161 bool visit(nir_instr
*);
162 bool visit(nir_intrinsic_instr
*);
163 bool visit(nir_jump_instr
*);
164 bool visit(nir_load_const_instr
*);
165 bool visit(nir_loop
*);
166 bool visit(nir_ssa_undef_instr
*);
167 bool visit(nir_tex_instr
*);
170 Value
* applyProjection(Value
*src
, Value
*proj
);
171 unsigned int getNIRArgCount(TexInstruction::Target
&);
177 ImmediateMap immediates
;
179 unsigned int curLoopDepth
;
180 unsigned int curIfDepth
;
184 Instruction
*immInsertPos
;
186 int clipVertexOutput
;
195 Converter::Converter(Program
*prog
, nir_shader
*nir
, nv50_ir_prog_info
*info
)
196 : ConverterCommon(prog
, info
),
202 zero
= mkImm((uint32_t)0);
206 Converter::convert(nir_block
*block
)
208 NirBlockMap::iterator it
= blocks
.find(block
->index
);
209 if (it
!= blocks
.end())
212 BasicBlock
*bb
= new BasicBlock(func
);
213 blocks
[block
->index
] = bb
;
218 Converter::isFloatType(nir_alu_type type
)
220 return nir_alu_type_get_base_type(type
) == nir_type_float
;
224 Converter::isSignedType(nir_alu_type type
)
226 return nir_alu_type_get_base_type(type
) == nir_type_int
;
230 Converter::isResultFloat(nir_op op
)
232 const nir_op_info
&info
= nir_op_infos
[op
];
233 if (info
.output_type
!= nir_type_invalid
)
234 return isFloatType(info
.output_type
);
236 ERROR("isResultFloat not implemented for %s\n", nir_op_infos
[op
].name
);
242 Converter::isResultSigned(nir_op op
)
245 // there is no umul and we get wrong results if we treat all muls as signed
250 const nir_op_info
&info
= nir_op_infos
[op
];
251 if (info
.output_type
!= nir_type_invalid
)
252 return isSignedType(info
.output_type
);
253 ERROR("isResultSigned not implemented for %s\n", nir_op_infos
[op
].name
);
260 Converter::getDType(nir_alu_instr
*insn
)
262 if (insn
->dest
.dest
.is_ssa
)
263 return getDType(insn
->op
, insn
->dest
.dest
.ssa
.bit_size
);
265 return getDType(insn
->op
, insn
->dest
.dest
.reg
.reg
->bit_size
);
269 Converter::getDType(nir_intrinsic_instr
*insn
)
272 switch (insn
->intrinsic
) {
273 case nir_intrinsic_shared_atomic_imax
:
274 case nir_intrinsic_shared_atomic_imin
:
275 case nir_intrinsic_ssbo_atomic_imax
:
276 case nir_intrinsic_ssbo_atomic_imin
:
284 return getDType(insn
, isSigned
);
288 Converter::getDType(nir_intrinsic_instr
*insn
, bool isSigned
)
290 if (insn
->dest
.is_ssa
)
291 return typeOfSize(insn
->dest
.ssa
.bit_size
/ 8, false, isSigned
);
293 return typeOfSize(insn
->dest
.reg
.reg
->bit_size
/ 8, false, isSigned
);
297 Converter::getDType(nir_op op
, uint8_t bitSize
)
299 DataType ty
= typeOfSize(bitSize
/ 8, isResultFloat(op
), isResultSigned(op
));
300 if (ty
== TYPE_NONE
) {
301 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos
[op
].name
, bitSize
);
307 std::vector
<DataType
>
308 Converter::getSTypes(nir_alu_instr
*insn
)
310 const nir_op_info
&info
= nir_op_infos
[insn
->op
];
311 std::vector
<DataType
> res(info
.num_inputs
);
313 for (uint8_t i
= 0; i
< info
.num_inputs
; ++i
) {
314 if (info
.input_types
[i
] != nir_type_invalid
) {
315 res
[i
] = getSType(insn
->src
[i
].src
, isFloatType(info
.input_types
[i
]), isSignedType(info
.input_types
[i
]));
317 ERROR("getSType not implemented for %s idx %u\n", info
.name
, i
);
328 Converter::getSType(nir_src
&src
, bool isFloat
, bool isSigned
)
332 bitSize
= src
.ssa
->bit_size
;
334 bitSize
= src
.reg
.reg
->bit_size
;
336 DataType ty
= typeOfSize(bitSize
/ 8, isFloat
, isSigned
);
337 if (ty
== TYPE_NONE
) {
345 ERROR("couldn't get Type for %s with bitSize %u\n", str
, bitSize
);
352 Converter::getFile(nir_intrinsic_op op
)
355 case nir_intrinsic_load_global
:
356 case nir_intrinsic_store_global
:
357 return FILE_MEMORY_GLOBAL
;
358 case nir_intrinsic_load_scratch
:
359 case nir_intrinsic_store_scratch
:
360 return FILE_MEMORY_LOCAL
;
361 case nir_intrinsic_load_shared
:
362 case nir_intrinsic_store_shared
:
363 return FILE_MEMORY_SHARED
;
364 case nir_intrinsic_load_kernel_input
:
365 return FILE_SHADER_INPUT
;
367 ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos
[op
].name
);
374 Converter::getOperation(nir_op op
)
377 // basic ops with float and int variants
386 case nir_op_ifind_msb
:
387 case nir_op_ufind_msb
:
409 case nir_op_fddx_coarse
:
410 case nir_op_fddx_fine
:
413 case nir_op_fddy_coarse
:
414 case nir_op_fddy_fine
:
432 case nir_op_pack_64_2x32_split
:
446 case nir_op_imul_high
:
447 case nir_op_umul_high
:
489 ERROR("couldn't get operation for op %s\n", nir_op_infos
[op
].name
);
496 Converter::getOperation(nir_texop op
)
508 case nir_texop_txf_ms
:
514 case nir_texop_query_levels
:
515 case nir_texop_texture_samples
:
519 ERROR("couldn't get operation for nir_texop %u\n", op
);
526 Converter::getOperation(nir_intrinsic_op op
)
529 case nir_intrinsic_emit_vertex
:
531 case nir_intrinsic_end_primitive
:
533 case nir_intrinsic_bindless_image_atomic_add
:
534 case nir_intrinsic_image_atomic_add
:
535 case nir_intrinsic_bindless_image_atomic_and
:
536 case nir_intrinsic_image_atomic_and
:
537 case nir_intrinsic_bindless_image_atomic_comp_swap
:
538 case nir_intrinsic_image_atomic_comp_swap
:
539 case nir_intrinsic_bindless_image_atomic_exchange
:
540 case nir_intrinsic_image_atomic_exchange
:
541 case nir_intrinsic_bindless_image_atomic_imax
:
542 case nir_intrinsic_image_atomic_imax
:
543 case nir_intrinsic_bindless_image_atomic_umax
:
544 case nir_intrinsic_image_atomic_umax
:
545 case nir_intrinsic_bindless_image_atomic_imin
:
546 case nir_intrinsic_image_atomic_imin
:
547 case nir_intrinsic_bindless_image_atomic_umin
:
548 case nir_intrinsic_image_atomic_umin
:
549 case nir_intrinsic_bindless_image_atomic_or
:
550 case nir_intrinsic_image_atomic_or
:
551 case nir_intrinsic_bindless_image_atomic_xor
:
552 case nir_intrinsic_image_atomic_xor
:
553 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
554 case nir_intrinsic_image_atomic_inc_wrap
:
555 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
556 case nir_intrinsic_image_atomic_dec_wrap
:
558 case nir_intrinsic_bindless_image_load
:
559 case nir_intrinsic_image_load
:
561 case nir_intrinsic_bindless_image_samples
:
562 case nir_intrinsic_image_samples
:
563 case nir_intrinsic_bindless_image_size
:
564 case nir_intrinsic_image_size
:
566 case nir_intrinsic_bindless_image_store
:
567 case nir_intrinsic_image_store
:
570 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op
);
577 Converter::preOperationNeeded(nir_op op
)
589 Converter::getSubOp(nir_op op
)
592 case nir_op_imul_high
:
593 case nir_op_umul_high
:
594 return NV50_IR_SUBOP_MUL_HIGH
;
598 return NV50_IR_SUBOP_SHIFT_WRAP
;
605 Converter::getSubOp(nir_intrinsic_op op
)
608 case nir_intrinsic_bindless_image_atomic_add
:
609 case nir_intrinsic_global_atomic_add
:
610 case nir_intrinsic_image_atomic_add
:
611 case nir_intrinsic_shared_atomic_add
:
612 case nir_intrinsic_ssbo_atomic_add
:
613 return NV50_IR_SUBOP_ATOM_ADD
;
614 case nir_intrinsic_bindless_image_atomic_and
:
615 case nir_intrinsic_global_atomic_and
:
616 case nir_intrinsic_image_atomic_and
:
617 case nir_intrinsic_shared_atomic_and
:
618 case nir_intrinsic_ssbo_atomic_and
:
619 return NV50_IR_SUBOP_ATOM_AND
;
620 case nir_intrinsic_bindless_image_atomic_comp_swap
:
621 case nir_intrinsic_global_atomic_comp_swap
:
622 case nir_intrinsic_image_atomic_comp_swap
:
623 case nir_intrinsic_shared_atomic_comp_swap
:
624 case nir_intrinsic_ssbo_atomic_comp_swap
:
625 return NV50_IR_SUBOP_ATOM_CAS
;
626 case nir_intrinsic_bindless_image_atomic_exchange
:
627 case nir_intrinsic_global_atomic_exchange
:
628 case nir_intrinsic_image_atomic_exchange
:
629 case nir_intrinsic_shared_atomic_exchange
:
630 case nir_intrinsic_ssbo_atomic_exchange
:
631 return NV50_IR_SUBOP_ATOM_EXCH
;
632 case nir_intrinsic_bindless_image_atomic_or
:
633 case nir_intrinsic_global_atomic_or
:
634 case nir_intrinsic_image_atomic_or
:
635 case nir_intrinsic_shared_atomic_or
:
636 case nir_intrinsic_ssbo_atomic_or
:
637 return NV50_IR_SUBOP_ATOM_OR
;
638 case nir_intrinsic_bindless_image_atomic_imax
:
639 case nir_intrinsic_bindless_image_atomic_umax
:
640 case nir_intrinsic_global_atomic_imax
:
641 case nir_intrinsic_global_atomic_umax
:
642 case nir_intrinsic_image_atomic_imax
:
643 case nir_intrinsic_image_atomic_umax
:
644 case nir_intrinsic_shared_atomic_imax
:
645 case nir_intrinsic_shared_atomic_umax
:
646 case nir_intrinsic_ssbo_atomic_imax
:
647 case nir_intrinsic_ssbo_atomic_umax
:
648 return NV50_IR_SUBOP_ATOM_MAX
;
649 case nir_intrinsic_bindless_image_atomic_imin
:
650 case nir_intrinsic_bindless_image_atomic_umin
:
651 case nir_intrinsic_global_atomic_imin
:
652 case nir_intrinsic_global_atomic_umin
:
653 case nir_intrinsic_image_atomic_imin
:
654 case nir_intrinsic_image_atomic_umin
:
655 case nir_intrinsic_shared_atomic_imin
:
656 case nir_intrinsic_shared_atomic_umin
:
657 case nir_intrinsic_ssbo_atomic_imin
:
658 case nir_intrinsic_ssbo_atomic_umin
:
659 return NV50_IR_SUBOP_ATOM_MIN
;
660 case nir_intrinsic_bindless_image_atomic_xor
:
661 case nir_intrinsic_global_atomic_xor
:
662 case nir_intrinsic_image_atomic_xor
:
663 case nir_intrinsic_shared_atomic_xor
:
664 case nir_intrinsic_ssbo_atomic_xor
:
665 return NV50_IR_SUBOP_ATOM_XOR
;
666 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
667 case nir_intrinsic_image_atomic_inc_wrap
:
668 return NV50_IR_SUBOP_ATOM_INC
;
669 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
670 case nir_intrinsic_image_atomic_dec_wrap
:
671 return NV50_IR_SUBOP_ATOM_DEC
;
673 case nir_intrinsic_group_memory_barrier
:
674 case nir_intrinsic_memory_barrier
:
675 case nir_intrinsic_memory_barrier_buffer
:
676 case nir_intrinsic_memory_barrier_image
:
677 return NV50_IR_SUBOP_MEMBAR(M
, GL
);
678 case nir_intrinsic_memory_barrier_shared
:
679 return NV50_IR_SUBOP_MEMBAR(M
, CTA
);
681 case nir_intrinsic_vote_all
:
682 return NV50_IR_SUBOP_VOTE_ALL
;
683 case nir_intrinsic_vote_any
:
684 return NV50_IR_SUBOP_VOTE_ANY
;
685 case nir_intrinsic_vote_ieq
:
686 return NV50_IR_SUBOP_VOTE_UNI
;
693 Converter::getCondCode(nir_op op
)
712 ERROR("couldn't get CondCode for op %s\n", nir_op_infos
[op
].name
);
719 Converter::convert(nir_alu_dest
*dest
)
721 return convert(&dest
->dest
);
725 Converter::convert(nir_dest
*dest
)
728 return convert(&dest
->ssa
);
729 if (dest
->reg
.indirect
) {
730 ERROR("no support for indirects.");
733 return convert(dest
->reg
.reg
);
737 Converter::convert(nir_register
*reg
)
739 assert(!reg
->num_array_elems
);
741 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
742 if (it
!= regDefs
.end())
745 LValues
newDef(reg
->num_components
);
746 for (uint8_t i
= 0; i
< reg
->num_components
; i
++)
747 newDef
[i
] = getScratch(std::max(4, reg
->bit_size
/ 8));
748 return regDefs
[reg
->index
] = newDef
;
752 Converter::convert(nir_ssa_def
*def
)
754 NirDefMap::iterator it
= ssaDefs
.find(def
->index
);
755 if (it
!= ssaDefs
.end())
758 LValues
newDef(def
->num_components
);
759 for (uint8_t i
= 0; i
< def
->num_components
; i
++)
760 newDef
[i
] = getSSA(std::max(4, def
->bit_size
/ 8));
761 return ssaDefs
[def
->index
] = newDef
;
765 Converter::getSrc(nir_alu_src
*src
, uint8_t component
)
767 if (src
->abs
|| src
->negate
) {
768 ERROR("modifiers currently not supported on nir_alu_src\n");
771 return getSrc(&src
->src
, src
->swizzle
[component
]);
775 Converter::getSrc(nir_register
*reg
, uint8_t idx
)
777 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
778 if (it
== regDefs
.end())
779 return convert(reg
)[idx
];
780 return it
->second
[idx
];
784 Converter::getSrc(nir_src
*src
, uint8_t idx
, bool indirect
)
787 return getSrc(src
->ssa
, idx
);
789 if (src
->reg
.indirect
) {
791 return getSrc(src
->reg
.indirect
, idx
);
792 ERROR("no support for indirects.");
797 return getSrc(src
->reg
.reg
, idx
);
801 Converter::getSrc(nir_ssa_def
*src
, uint8_t idx
)
803 ImmediateMap::iterator iit
= immediates
.find(src
->index
);
804 if (iit
!= immediates
.end())
805 return convert((*iit
).second
, idx
);
807 NirDefMap::iterator it
= ssaDefs
.find(src
->index
);
808 if (it
== ssaDefs
.end()) {
809 ERROR("SSA value %u not found\n", src
->index
);
813 return it
->second
[idx
];
817 Converter::getIndirect(nir_src
*src
, uint8_t idx
, Value
*&indirect
)
819 nir_const_value
*offset
= nir_src_as_const_value(*src
);
823 return offset
[0].u32
;
826 indirect
= getSrc(src
, idx
, true);
831 Converter::getIndirect(nir_intrinsic_instr
*insn
, uint8_t s
, uint8_t c
, Value
*&indirect
, bool isScalar
)
833 int32_t idx
= nir_intrinsic_base(insn
) + getIndirect(&insn
->src
[s
], c
, indirect
);
834 if (indirect
&& !isScalar
)
835 indirect
= mkOp2v(OP_SHL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), indirect
, loadImm(NULL
, 4));
840 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot
, unsigned *name
, unsigned *index
)
842 assert(name
&& index
);
844 if (slot
>= VERT_ATTRIB_MAX
) {
845 ERROR("invalid varying slot %u\n", slot
);
850 if (slot
>= VERT_ATTRIB_GENERIC0
&&
851 slot
< VERT_ATTRIB_GENERIC0
+ VERT_ATTRIB_GENERIC_MAX
) {
852 *name
= TGSI_SEMANTIC_GENERIC
;
853 *index
= slot
- VERT_ATTRIB_GENERIC0
;
857 if (slot
>= VERT_ATTRIB_TEX0
&&
858 slot
< VERT_ATTRIB_TEX0
+ VERT_ATTRIB_TEX_MAX
) {
859 *name
= TGSI_SEMANTIC_TEXCOORD
;
860 *index
= slot
- VERT_ATTRIB_TEX0
;
865 case VERT_ATTRIB_COLOR0
:
866 *name
= TGSI_SEMANTIC_COLOR
;
869 case VERT_ATTRIB_COLOR1
:
870 *name
= TGSI_SEMANTIC_COLOR
;
873 case VERT_ATTRIB_EDGEFLAG
:
874 *name
= TGSI_SEMANTIC_EDGEFLAG
;
877 case VERT_ATTRIB_FOG
:
878 *name
= TGSI_SEMANTIC_FOG
;
881 case VERT_ATTRIB_NORMAL
:
882 *name
= TGSI_SEMANTIC_NORMAL
;
885 case VERT_ATTRIB_POS
:
886 *name
= TGSI_SEMANTIC_POSITION
;
889 case VERT_ATTRIB_POINT_SIZE
:
890 *name
= TGSI_SEMANTIC_PSIZE
;
894 ERROR("unknown vert attrib slot %u\n", slot
);
901 Converter::setInterpolate(nv50_ir_varying
*var
,
907 case INTERP_MODE_FLAT
:
910 case INTERP_MODE_NONE
:
911 if (semantic
== TGSI_SEMANTIC_COLOR
)
913 else if (semantic
== TGSI_SEMANTIC_POSITION
)
916 case INTERP_MODE_NOPERSPECTIVE
:
919 case INTERP_MODE_SMOOTH
:
922 var
->centroid
= centroid
;
926 calcSlots(const glsl_type
*type
, Program::Type stage
, const shader_info
&info
,
927 bool input
, const nir_variable
*var
)
929 if (!type
->is_array())
930 return type
->count_attribute_slots(false);
934 case Program::TYPE_GEOMETRY
:
935 slots
= type
->count_attribute_slots(false);
937 slots
/= info
.gs
.vertices_in
;
939 case Program::TYPE_TESSELLATION_CONTROL
:
940 case Program::TYPE_TESSELLATION_EVAL
:
941 // remove first dimension
942 if (var
->data
.patch
|| (!input
&& stage
== Program::TYPE_TESSELLATION_EVAL
))
943 slots
= type
->count_attribute_slots(false);
945 slots
= type
->fields
.array
->count_attribute_slots(false);
948 slots
= type
->count_attribute_slots(false);
956 getMaskForType(const glsl_type
*type
, uint8_t slot
) {
957 uint16_t comp
= type
->without_array()->components();
958 comp
= comp
? comp
: 4;
960 if (glsl_base_type_is_64bit(type
->without_array()->base_type
)) {
970 return (1 << comp
) - 1;
973 bool Converter::assignSlots() {
977 info
->io
.viewportId
= -1;
979 info
->numOutputs
= 0;
980 info
->numSysVals
= 0;
982 for (uint8_t i
= 0; i
< SYSTEM_VALUE_MAX
; ++i
) {
983 if (!(nir
->info
.system_values_read
& 1ull << i
))
986 info
->sv
[info
->numSysVals
].sn
= tgsi_get_sysval_semantic(i
);
987 info
->sv
[info
->numSysVals
].si
= 0;
988 info
->sv
[info
->numSysVals
].input
= 0; // TODO inferSysValDirection(sn);
991 case SYSTEM_VALUE_INSTANCE_ID
:
992 info
->io
.instanceId
= info
->numSysVals
;
994 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
995 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
996 info
->sv
[info
->numSysVals
].patch
= 1;
998 case SYSTEM_VALUE_VERTEX_ID
:
999 info
->io
.vertexId
= info
->numSysVals
;
1005 info
->numSysVals
+= 1;
1008 if (prog
->getType() == Program::TYPE_COMPUTE
)
1011 nir_foreach_shader_in_variable(var
, nir
) {
1012 const glsl_type
*type
= var
->type
;
1013 int slot
= var
->data
.location
;
1014 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, true, var
);
1015 uint32_t vary
= var
->data
.driver_location
;
1017 assert(vary
+ slots
<= PIPE_MAX_SHADER_INPUTS
);
1019 switch(prog
->getType()) {
1020 case Program::TYPE_FRAGMENT
:
1021 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1023 for (uint16_t i
= 0; i
< slots
; ++i
) {
1024 setInterpolate(&info
->in
[vary
+ i
], var
->data
.interpolation
,
1025 var
->data
.centroid
| var
->data
.sample
, name
);
1028 case Program::TYPE_GEOMETRY
:
1029 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1032 case Program::TYPE_TESSELLATION_CONTROL
:
1033 case Program::TYPE_TESSELLATION_EVAL
:
1034 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1036 if (var
->data
.patch
&& name
== TGSI_SEMANTIC_PATCH
)
1037 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1039 case Program::TYPE_VERTEX
:
1040 if (slot
>= VERT_ATTRIB_GENERIC0
)
1041 slot
= VERT_ATTRIB_GENERIC0
+ vary
;
1042 vert_attrib_to_tgsi_semantic((gl_vert_attrib
)slot
, &name
, &index
);
1044 case TGSI_SEMANTIC_EDGEFLAG
:
1045 info
->io
.edgeFlagIn
= vary
;
1052 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1056 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1057 nv50_ir_varying
*v
= &info
->in
[vary
];
1059 v
->patch
= var
->data
.patch
;
1062 v
->mask
|= getMaskForType(type
, i
) << var
->data
.location_frac
;
1064 info
->numInputs
= std::max
<uint8_t>(info
->numInputs
, vary
);
1067 nir_foreach_shader_out_variable(var
, nir
) {
1068 const glsl_type
*type
= var
->type
;
1069 int slot
= var
->data
.location
;
1070 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, false, var
);
1071 uint32_t vary
= var
->data
.driver_location
;
1073 assert(vary
< PIPE_MAX_SHADER_OUTPUTS
);
1075 switch(prog
->getType()) {
1076 case Program::TYPE_FRAGMENT
:
1077 tgsi_get_gl_frag_result_semantic((gl_frag_result
)slot
, &name
, &index
);
1079 case TGSI_SEMANTIC_COLOR
:
1080 if (!var
->data
.fb_fetch_output
)
1081 info
->prop
.fp
.numColourResults
++;
1083 if (var
->data
.location
== FRAG_RESULT_COLOR
&&
1084 nir
->info
.outputs_written
& BITFIELD64_BIT(var
->data
.location
))
1085 info
->prop
.fp
.separateFragData
= true;
1087 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1088 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1089 index
= index
== 0 ? var
->data
.index
: index
;
1091 case TGSI_SEMANTIC_POSITION
:
1092 info
->io
.fragDepth
= vary
;
1093 info
->prop
.fp
.writesDepth
= true;
1095 case TGSI_SEMANTIC_SAMPLEMASK
:
1096 info
->io
.sampleMask
= vary
;
1102 case Program::TYPE_GEOMETRY
:
1103 case Program::TYPE_TESSELLATION_CONTROL
:
1104 case Program::TYPE_TESSELLATION_EVAL
:
1105 case Program::TYPE_VERTEX
:
1106 tgsi_get_gl_varying_semantic((gl_varying_slot
)slot
, true,
1109 if (var
->data
.patch
&& name
!= TGSI_SEMANTIC_TESSINNER
&&
1110 name
!= TGSI_SEMANTIC_TESSOUTER
)
1111 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1114 case TGSI_SEMANTIC_CLIPDIST
:
1115 info
->io
.genUserClip
= -1;
1117 case TGSI_SEMANTIC_CLIPVERTEX
:
1118 clipVertexOutput
= vary
;
1120 case TGSI_SEMANTIC_EDGEFLAG
:
1121 info
->io
.edgeFlagOut
= vary
;
1123 case TGSI_SEMANTIC_POSITION
:
1124 if (clipVertexOutput
< 0)
1125 clipVertexOutput
= vary
;
1132 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1136 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1137 nv50_ir_varying
*v
= &info
->out
[vary
];
1138 v
->patch
= var
->data
.patch
;
1141 v
->mask
|= getMaskForType(type
, i
) << var
->data
.location_frac
;
1143 if (nir
->info
.outputs_read
& 1ull << slot
)
1146 info
->numOutputs
= std::max
<uint8_t>(info
->numOutputs
, vary
);
1149 if (info
->io
.genUserClip
> 0) {
1150 info
->io
.clipDistances
= info
->io
.genUserClip
;
1152 const unsigned int nOut
= (info
->io
.genUserClip
+ 3) / 4;
1154 for (unsigned int n
= 0; n
< nOut
; ++n
) {
1155 unsigned int i
= info
->numOutputs
++;
1156 info
->out
[i
].id
= i
;
1157 info
->out
[i
].sn
= TGSI_SEMANTIC_CLIPDIST
;
1158 info
->out
[i
].si
= n
;
1159 info
->out
[i
].mask
= ((1 << info
->io
.clipDistances
) - 1) >> (n
* 4);
1163 return info
->assignSlots(info
) == 0;
1167 Converter::getSlotAddress(nir_intrinsic_instr
*insn
, uint8_t idx
, uint8_t slot
)
1170 int offset
= nir_intrinsic_component(insn
);
1173 if (nir_intrinsic_infos
[insn
->intrinsic
].has_dest
)
1174 ty
= getDType(insn
);
1176 ty
= getSType(insn
->src
[0], false, false);
1178 switch (insn
->intrinsic
) {
1179 case nir_intrinsic_load_input
:
1180 case nir_intrinsic_load_interpolated_input
:
1181 case nir_intrinsic_load_per_vertex_input
:
1184 case nir_intrinsic_load_output
:
1185 case nir_intrinsic_load_per_vertex_output
:
1186 case nir_intrinsic_store_output
:
1187 case nir_intrinsic_store_per_vertex_output
:
1191 ERROR("unknown intrinsic in getSlotAddress %s",
1192 nir_intrinsic_infos
[insn
->intrinsic
].name
);
1198 if (typeSizeof(ty
) == 8) {
1210 assert(!input
|| idx
< PIPE_MAX_SHADER_INPUTS
);
1211 assert(input
|| idx
< PIPE_MAX_SHADER_OUTPUTS
);
1213 const nv50_ir_varying
*vary
= input
? info
->in
: info
->out
;
1214 return vary
[idx
].slot
[slot
] * 4;
1218 Converter::loadFrom(DataFile file
, uint8_t i
, DataType ty
, Value
*def
,
1219 uint32_t base
, uint8_t c
, Value
*indirect0
,
1220 Value
*indirect1
, bool patch
)
1222 unsigned int tySize
= typeSizeof(ty
);
1225 (file
== FILE_MEMORY_CONST
|| file
== FILE_MEMORY_BUFFER
|| indirect0
)) {
1226 Value
*lo
= getSSA();
1227 Value
*hi
= getSSA();
1230 mkLoad(TYPE_U32
, lo
,
1231 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
),
1233 loi
->setIndirect(0, 1, indirect1
);
1234 loi
->perPatch
= patch
;
1237 mkLoad(TYPE_U32
, hi
,
1238 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
+ 4),
1240 hii
->setIndirect(0, 1, indirect1
);
1241 hii
->perPatch
= patch
;
1243 return mkOp2(OP_MERGE
, ty
, def
, lo
, hi
);
1246 mkLoad(ty
, def
, mkSymbol(file
, i
, ty
, base
+ c
* tySize
), indirect0
);
1247 ld
->setIndirect(0, 1, indirect1
);
1248 ld
->perPatch
= patch
;
1254 Converter::storeTo(nir_intrinsic_instr
*insn
, DataFile file
, operation op
,
1255 DataType ty
, Value
*src
, uint8_t idx
, uint8_t c
,
1256 Value
*indirect0
, Value
*indirect1
)
1258 uint8_t size
= typeSizeof(ty
);
1259 uint32_t address
= getSlotAddress(insn
, idx
, c
);
1261 if (size
== 8 && indirect0
) {
1263 mkSplit(split
, 4, src
);
1265 if (op
== OP_EXPORT
) {
1266 split
[0] = mkMov(getSSA(), split
[0], ty
)->getDef(0);
1267 split
[1] = mkMov(getSSA(), split
[1], ty
)->getDef(0);
1270 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
), indirect0
,
1271 split
[0])->perPatch
= info
->out
[idx
].patch
;
1272 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
+ 4), indirect0
,
1273 split
[1])->perPatch
= info
->out
[idx
].patch
;
1275 if (op
== OP_EXPORT
)
1276 src
= mkMov(getSSA(size
), src
, ty
)->getDef(0);
1277 mkStore(op
, ty
, mkSymbol(file
, 0, ty
, address
), indirect0
,
1278 src
)->perPatch
= info
->out
[idx
].patch
;
1283 Converter::parseNIR()
1285 info
->bin
.tlsSpace
= nir
->scratch_size
;
1286 info
->io
.clipDistances
= nir
->info
.clip_distance_array_size
;
1287 info
->io
.cullDistances
= nir
->info
.cull_distance_array_size
;
1288 info
->io
.layer_viewport_relative
= nir
->info
.layer_viewport_relative
;
1290 switch(prog
->getType()) {
1291 case Program::TYPE_COMPUTE
:
1292 info
->prop
.cp
.numThreads
[0] = nir
->info
.cs
.local_size
[0];
1293 info
->prop
.cp
.numThreads
[1] = nir
->info
.cs
.local_size
[1];
1294 info
->prop
.cp
.numThreads
[2] = nir
->info
.cs
.local_size
[2];
1295 info
->bin
.smemSize
+= nir
->info
.cs
.shared_size
;
1297 case Program::TYPE_FRAGMENT
:
1298 info
->prop
.fp
.earlyFragTests
= nir
->info
.fs
.early_fragment_tests
;
1299 prog
->persampleInvocation
=
1300 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_ID
) ||
1301 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1302 info
->prop
.fp
.postDepthCoverage
= nir
->info
.fs
.post_depth_coverage
;
1303 info
->prop
.fp
.readsSampleLocations
=
1304 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1305 info
->prop
.fp
.usesDiscard
= nir
->info
.fs
.uses_discard
|| nir
->info
.fs
.uses_demote
;
1306 info
->prop
.fp
.usesSampleMaskIn
=
1307 !!(nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_MASK_IN
);
1309 case Program::TYPE_GEOMETRY
:
1310 info
->prop
.gp
.instanceCount
= nir
->info
.gs
.invocations
;
1311 info
->prop
.gp
.maxVertices
= nir
->info
.gs
.vertices_out
;
1312 info
->prop
.gp
.outputPrim
= nir
->info
.gs
.output_primitive
;
1314 case Program::TYPE_TESSELLATION_CONTROL
:
1315 case Program::TYPE_TESSELLATION_EVAL
:
1316 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
1317 info
->prop
.tp
.domain
= GL_LINES
;
1319 info
->prop
.tp
.domain
= nir
->info
.tess
.primitive_mode
;
1320 info
->prop
.tp
.outputPatchSize
= nir
->info
.tess
.tcs_vertices_out
;
1321 info
->prop
.tp
.outputPrim
=
1322 nir
->info
.tess
.point_mode
? PIPE_PRIM_POINTS
: PIPE_PRIM_TRIANGLES
;
1323 info
->prop
.tp
.partitioning
= (nir
->info
.tess
.spacing
+ 1) % 3;
1324 info
->prop
.tp
.winding
= !nir
->info
.tess
.ccw
;
1326 case Program::TYPE_VERTEX
:
1327 info
->prop
.vp
.usesDrawParameters
=
1328 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
)) ||
1329 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
)) ||
1330 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
));
1340 Converter::visit(nir_function
*function
)
1342 assert(function
->impl
);
1344 // usually the blocks will set everything up, but main is special
1345 BasicBlock
*entry
= new BasicBlock(prog
->main
);
1346 exit
= new BasicBlock(prog
->main
);
1347 blocks
[nir_start_block(function
->impl
)->index
] = entry
;
1348 prog
->main
->setEntry(entry
);
1349 prog
->main
->setExit(exit
);
1351 setPosition(entry
, true);
1353 if (info
->io
.genUserClip
> 0) {
1354 for (int c
= 0; c
< 4; ++c
)
1355 clipVtx
[c
] = getScratch();
1358 switch (prog
->getType()) {
1359 case Program::TYPE_TESSELLATION_CONTROL
:
1361 OP_SUB
, TYPE_U32
, getSSA(),
1362 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LANEID
, 0)),
1363 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_INVOCATION_ID
, 0)));
1365 case Program::TYPE_FRAGMENT
: {
1366 Symbol
*sv
= mkSysVal(SV_POSITION
, 3);
1367 fragCoord
[3] = mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), sv
);
1368 fp
.position
= mkOp1v(OP_RCP
, TYPE_F32
, fragCoord
[3], fragCoord
[3]);
1375 nir_index_ssa_defs(function
->impl
);
1376 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
1381 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::TREE
);
1382 setPosition(exit
, true);
1384 if ((prog
->getType() == Program::TYPE_VERTEX
||
1385 prog
->getType() == Program::TYPE_TESSELLATION_EVAL
)
1386 && info
->io
.genUserClip
> 0)
1387 handleUserClipPlanes();
1389 // TODO: for non main function this needs to be a OP_RETURN
1390 mkOp(OP_EXIT
, TYPE_NONE
, NULL
)->terminator
= 1;
1395 Converter::visit(nir_cf_node
*node
)
1397 switch (node
->type
) {
1398 case nir_cf_node_block
:
1399 return visit(nir_cf_node_as_block(node
));
1400 case nir_cf_node_if
:
1401 return visit(nir_cf_node_as_if(node
));
1402 case nir_cf_node_loop
:
1403 return visit(nir_cf_node_as_loop(node
));
1405 ERROR("unknown nir_cf_node type %u\n", node
->type
);
1411 Converter::visit(nir_block
*block
)
1413 if (!block
->predecessors
->entries
&& block
->instr_list
.is_empty())
1416 BasicBlock
*bb
= convert(block
);
1418 setPosition(bb
, true);
1419 nir_foreach_instr(insn
, block
) {
1427 Converter::visit(nir_if
*nif
)
1431 DataType sType
= getSType(nif
->condition
, false, false);
1432 Value
*src
= getSrc(&nif
->condition
, 0);
1434 nir_block
*lastThen
= nir_if_last_then_block(nif
);
1435 nir_block
*lastElse
= nir_if_last_else_block(nif
);
1437 BasicBlock
*headBB
= bb
;
1438 BasicBlock
*ifBB
= convert(nir_if_first_then_block(nif
));
1439 BasicBlock
*elseBB
= convert(nir_if_first_else_block(nif
));
1441 bb
->cfg
.attach(&ifBB
->cfg
, Graph::Edge::TREE
);
1442 bb
->cfg
.attach(&elseBB
->cfg
, Graph::Edge::TREE
);
1444 bool insertJoins
= lastThen
->successors
[0] == lastElse
->successors
[0];
1445 mkFlow(OP_BRA
, elseBB
, CC_EQ
, src
)->setType(sType
);
1447 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->then_list
) {
1452 setPosition(convert(lastThen
), true);
1453 if (!bb
->isTerminated()) {
1454 BasicBlock
*tailBB
= convert(lastThen
->successors
[0]);
1455 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1456 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1458 insertJoins
= insertJoins
&& bb
->getExit()->op
== OP_BRA
;
1461 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->else_list
) {
1466 setPosition(convert(lastElse
), true);
1467 if (!bb
->isTerminated()) {
1468 BasicBlock
*tailBB
= convert(lastElse
->successors
[0]);
1469 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1470 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1472 insertJoins
= insertJoins
&& bb
->getExit()->op
== OP_BRA
;
1475 /* only insert joins for the most outer if */
1477 insertJoins
= false;
1479 /* we made sure that all threads would converge at the same block */
1481 BasicBlock
*conv
= convert(lastThen
->successors
[0]);
1482 setPosition(headBB
->getExit(), false);
1483 headBB
->joinAt
= mkFlow(OP_JOINAT
, conv
, CC_ALWAYS
, NULL
);
1484 setPosition(conv
, false);
1485 mkFlow(OP_JOIN
, NULL
, CC_ALWAYS
, NULL
)->fixed
= 1;
1491 // TODO: add convergency
1493 Converter::visit(nir_loop
*loop
)
1496 func
->loopNestingBound
= std::max(func
->loopNestingBound
, curLoopDepth
);
1498 BasicBlock
*loopBB
= convert(nir_loop_first_block(loop
));
1499 BasicBlock
*tailBB
= convert(nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
)));
1501 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::TREE
);
1503 mkFlow(OP_PREBREAK
, tailBB
, CC_ALWAYS
, NULL
);
1504 setPosition(loopBB
, false);
1505 mkFlow(OP_PRECONT
, loopBB
, CC_ALWAYS
, NULL
);
1507 foreach_list_typed(nir_cf_node
, node
, node
, &loop
->body
) {
1512 if (!bb
->isTerminated()) {
1513 mkFlow(OP_CONT
, loopBB
, CC_ALWAYS
, NULL
);
1514 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::BACK
);
1517 if (tailBB
->cfg
.incidentCount() == 0)
1518 loopBB
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::TREE
);
1526 Converter::visit(nir_instr
*insn
)
1528 // we need an insertion point for on the fly generated immediate loads
1529 immInsertPos
= bb
->getExit();
1530 switch (insn
->type
) {
1531 case nir_instr_type_alu
:
1532 return visit(nir_instr_as_alu(insn
));
1533 case nir_instr_type_intrinsic
:
1534 return visit(nir_instr_as_intrinsic(insn
));
1535 case nir_instr_type_jump
:
1536 return visit(nir_instr_as_jump(insn
));
1537 case nir_instr_type_load_const
:
1538 return visit(nir_instr_as_load_const(insn
));
1539 case nir_instr_type_ssa_undef
:
1540 return visit(nir_instr_as_ssa_undef(insn
));
1541 case nir_instr_type_tex
:
1542 return visit(nir_instr_as_tex(insn
));
1544 ERROR("unknown nir_instr type %u\n", insn
->type
);
1551 Converter::convert(nir_intrinsic_op intr
)
1554 case nir_intrinsic_load_base_vertex
:
1555 return SV_BASEVERTEX
;
1556 case nir_intrinsic_load_base_instance
:
1557 return SV_BASEINSTANCE
;
1558 case nir_intrinsic_load_draw_id
:
1560 case nir_intrinsic_load_front_face
:
1562 case nir_intrinsic_is_helper_invocation
:
1563 case nir_intrinsic_load_helper_invocation
:
1564 return SV_THREAD_KILL
;
1565 case nir_intrinsic_load_instance_id
:
1566 return SV_INSTANCE_ID
;
1567 case nir_intrinsic_load_invocation_id
:
1568 return SV_INVOCATION_ID
;
1569 case nir_intrinsic_load_local_group_size
:
1571 case nir_intrinsic_load_local_invocation_id
:
1573 case nir_intrinsic_load_num_work_groups
:
1575 case nir_intrinsic_load_patch_vertices_in
:
1576 return SV_VERTEX_COUNT
;
1577 case nir_intrinsic_load_primitive_id
:
1578 return SV_PRIMITIVE_ID
;
1579 case nir_intrinsic_load_sample_id
:
1580 return SV_SAMPLE_INDEX
;
1581 case nir_intrinsic_load_sample_mask_in
:
1582 return SV_SAMPLE_MASK
;
1583 case nir_intrinsic_load_sample_pos
:
1584 return SV_SAMPLE_POS
;
1585 case nir_intrinsic_load_subgroup_eq_mask
:
1586 return SV_LANEMASK_EQ
;
1587 case nir_intrinsic_load_subgroup_ge_mask
:
1588 return SV_LANEMASK_GE
;
1589 case nir_intrinsic_load_subgroup_gt_mask
:
1590 return SV_LANEMASK_GT
;
1591 case nir_intrinsic_load_subgroup_le_mask
:
1592 return SV_LANEMASK_LE
;
1593 case nir_intrinsic_load_subgroup_lt_mask
:
1594 return SV_LANEMASK_LT
;
1595 case nir_intrinsic_load_subgroup_invocation
:
1597 case nir_intrinsic_load_tess_coord
:
1598 return SV_TESS_COORD
;
1599 case nir_intrinsic_load_tess_level_inner
:
1600 return SV_TESS_INNER
;
1601 case nir_intrinsic_load_tess_level_outer
:
1602 return SV_TESS_OUTER
;
1603 case nir_intrinsic_load_vertex_id
:
1604 return SV_VERTEX_ID
;
1605 case nir_intrinsic_load_work_group_id
:
1607 case nir_intrinsic_load_work_dim
:
1610 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1611 nir_intrinsic_infos
[intr
].name
);
1618 Converter::visit(nir_intrinsic_instr
*insn
)
1620 nir_intrinsic_op op
= insn
->intrinsic
;
1621 const nir_intrinsic_info
&opInfo
= nir_intrinsic_infos
[op
];
1622 unsigned dest_components
= nir_intrinsic_dest_components(insn
);
1625 case nir_intrinsic_load_uniform
: {
1626 LValues
&newDefs
= convert(&insn
->dest
);
1627 const DataType dType
= getDType(insn
);
1629 uint32_t coffset
= getIndirect(insn
, 0, 0, indirect
);
1630 for (uint8_t i
= 0; i
< dest_components
; ++i
) {
1631 loadFrom(FILE_MEMORY_CONST
, 0, dType
, newDefs
[i
], 16 * coffset
, i
, indirect
);
1635 case nir_intrinsic_store_output
:
1636 case nir_intrinsic_store_per_vertex_output
: {
1638 DataType dType
= getSType(insn
->src
[0], false, false);
1639 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_store_output
? 1 : 2, 0, indirect
);
1641 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
1642 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
1646 Value
*src
= getSrc(&insn
->src
[0], i
);
1647 switch (prog
->getType()) {
1648 case Program::TYPE_FRAGMENT
: {
1649 if (info
->out
[idx
].sn
== TGSI_SEMANTIC_POSITION
) {
1650 // TGSI uses a different interface than NIR, TGSI stores that
1651 // value in the z component, NIR in X
1653 src
= mkOp1v(OP_SAT
, TYPE_F32
, getScratch(), src
);
1657 case Program::TYPE_GEOMETRY
:
1658 case Program::TYPE_TESSELLATION_EVAL
:
1659 case Program::TYPE_VERTEX
: {
1660 if (info
->io
.genUserClip
> 0 && idx
== (uint32_t)clipVertexOutput
) {
1661 mkMov(clipVtx
[i
], src
);
1670 storeTo(insn
, FILE_SHADER_OUTPUT
, OP_EXPORT
, dType
, src
, idx
, i
+ offset
, indirect
);
1674 case nir_intrinsic_load_input
:
1675 case nir_intrinsic_load_interpolated_input
:
1676 case nir_intrinsic_load_output
: {
1677 LValues
&newDefs
= convert(&insn
->dest
);
1680 if (prog
->getType() == Program::TYPE_FRAGMENT
&&
1681 op
== nir_intrinsic_load_output
) {
1682 std::vector
<Value
*> defs
, srcs
;
1685 srcs
.push_back(getSSA());
1686 srcs
.push_back(getSSA());
1687 Value
*x
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 0));
1688 Value
*y
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 1));
1689 mkCvt(OP_CVT
, TYPE_U32
, srcs
[0], TYPE_F32
, x
)->rnd
= ROUND_Z
;
1690 mkCvt(OP_CVT
, TYPE_U32
, srcs
[1], TYPE_F32
, y
)->rnd
= ROUND_Z
;
1692 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LAYER
, 0)));
1693 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_SAMPLE_INDEX
, 0)));
1695 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1696 defs
.push_back(newDefs
[i
]);
1700 TexInstruction
*texi
= mkTex(OP_TXF
, TEX_TARGET_2D_MS_ARRAY
, 0, 0, defs
, srcs
);
1701 texi
->tex
.levelZero
= 1;
1702 texi
->tex
.mask
= mask
;
1703 texi
->tex
.useOffsets
= 0;
1704 texi
->tex
.r
= 0xffff;
1705 texi
->tex
.s
= 0xffff;
1707 info
->prop
.fp
.readsFramebuffer
= true;
1711 const DataType dType
= getDType(insn
);
1713 bool input
= op
!= nir_intrinsic_load_output
;
1717 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_load_interpolated_input
? 1 : 0, 0, indirect
);
1718 nv50_ir_varying
& vary
= input
? info
->in
[idx
] : info
->out
[idx
];
1720 // see load_barycentric_* handling
1721 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1722 if (op
== nir_intrinsic_load_interpolated_input
) {
1723 ImmediateValue immMode
;
1724 if (getSrc(&insn
->src
[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode
))
1725 mode
= immMode
.reg
.data
.u32
;
1727 if (mode
== NV50_IR_INTERP_DEFAULT
)
1728 mode
|= translateInterpMode(&vary
, nvirOp
);
1731 nvirOp
= OP_LINTERP
;
1732 mode
|= NV50_IR_INTERP_LINEAR
;
1734 nvirOp
= OP_PINTERP
;
1735 mode
|= NV50_IR_INTERP_PERSPECTIVE
;
1740 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1741 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1742 Symbol
*sym
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
);
1743 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1745 if (typeSizeof(dType
) == 8) {
1746 Value
*lo
= getSSA();
1747 Value
*hi
= getSSA();
1748 Instruction
*interp
;
1750 interp
= mkOp1(nvirOp
, TYPE_U32
, lo
, sym
);
1751 if (nvirOp
== OP_PINTERP
)
1752 interp
->setSrc(s
++, fp
.position
);
1753 if (mode
& NV50_IR_INTERP_OFFSET
)
1754 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1755 interp
->setInterpolate(mode
);
1756 interp
->setIndirect(0, 0, indirect
);
1758 Symbol
*sym1
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
+ 4);
1759 interp
= mkOp1(nvirOp
, TYPE_U32
, hi
, sym1
);
1760 if (nvirOp
== OP_PINTERP
)
1761 interp
->setSrc(s
++, fp
.position
);
1762 if (mode
& NV50_IR_INTERP_OFFSET
)
1763 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1764 interp
->setInterpolate(mode
);
1765 interp
->setIndirect(0, 0, indirect
);
1767 mkOp2(OP_MERGE
, dType
, newDefs
[i
], lo
, hi
);
1769 Instruction
*interp
= mkOp1(nvirOp
, dType
, newDefs
[i
], sym
);
1770 if (nvirOp
== OP_PINTERP
)
1771 interp
->setSrc(s
++, fp
.position
);
1772 if (mode
& NV50_IR_INTERP_OFFSET
)
1773 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1774 interp
->setInterpolate(mode
);
1775 interp
->setIndirect(0, 0, indirect
);
1778 mkLoad(dType
, newDefs
[i
], sym
, indirect
)->perPatch
= vary
.patch
;
1783 case nir_intrinsic_load_barycentric_at_offset
:
1784 case nir_intrinsic_load_barycentric_at_sample
:
1785 case nir_intrinsic_load_barycentric_centroid
:
1786 case nir_intrinsic_load_barycentric_pixel
:
1787 case nir_intrinsic_load_barycentric_sample
: {
1788 LValues
&newDefs
= convert(&insn
->dest
);
1791 if (op
== nir_intrinsic_load_barycentric_centroid
||
1792 op
== nir_intrinsic_load_barycentric_sample
) {
1793 mode
= NV50_IR_INTERP_CENTROID
;
1794 } else if (op
== nir_intrinsic_load_barycentric_at_offset
) {
1796 for (uint8_t c
= 0; c
< 2; c
++) {
1797 offs
[c
] = getScratch();
1798 mkOp2(OP_MIN
, TYPE_F32
, offs
[c
], getSrc(&insn
->src
[0], c
), loadImm(NULL
, 0.4375f
));
1799 mkOp2(OP_MAX
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, -0.5f
));
1800 mkOp2(OP_MUL
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, 4096.0f
));
1801 mkCvt(OP_CVT
, TYPE_S32
, offs
[c
], TYPE_F32
, offs
[c
]);
1803 mkOp3v(OP_INSBF
, TYPE_U32
, newDefs
[0], offs
[1], mkImm(0x1010), offs
[0]);
1805 mode
= NV50_IR_INTERP_OFFSET
;
1806 } else if (op
== nir_intrinsic_load_barycentric_pixel
) {
1807 mode
= NV50_IR_INTERP_DEFAULT
;
1808 } else if (op
== nir_intrinsic_load_barycentric_at_sample
) {
1809 info
->prop
.fp
.readsSampleLocations
= true;
1810 mkOp1(OP_PIXLD
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0], 0))->subOp
= NV50_IR_SUBOP_PIXLD_OFFSET
;
1811 mode
= NV50_IR_INTERP_OFFSET
;
1813 unreachable("all intrinsics already handled above");
1816 loadImm(newDefs
[1], mode
);
1819 case nir_intrinsic_demote
:
1820 case nir_intrinsic_discard
:
1821 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
);
1823 case nir_intrinsic_demote_if
:
1824 case nir_intrinsic_discard_if
: {
1825 Value
*pred
= getSSA(1, FILE_PREDICATE
);
1826 if (insn
->num_components
> 1) {
1827 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1831 mkCmp(OP_SET
, CC_NE
, TYPE_U8
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1832 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
)->setPredicate(CC_P
, pred
);
1835 case nir_intrinsic_load_base_vertex
:
1836 case nir_intrinsic_load_base_instance
:
1837 case nir_intrinsic_load_draw_id
:
1838 case nir_intrinsic_load_front_face
:
1839 case nir_intrinsic_is_helper_invocation
:
1840 case nir_intrinsic_load_helper_invocation
:
1841 case nir_intrinsic_load_instance_id
:
1842 case nir_intrinsic_load_invocation_id
:
1843 case nir_intrinsic_load_local_group_size
:
1844 case nir_intrinsic_load_local_invocation_id
:
1845 case nir_intrinsic_load_num_work_groups
:
1846 case nir_intrinsic_load_patch_vertices_in
:
1847 case nir_intrinsic_load_primitive_id
:
1848 case nir_intrinsic_load_sample_id
:
1849 case nir_intrinsic_load_sample_mask_in
:
1850 case nir_intrinsic_load_sample_pos
:
1851 case nir_intrinsic_load_subgroup_eq_mask
:
1852 case nir_intrinsic_load_subgroup_ge_mask
:
1853 case nir_intrinsic_load_subgroup_gt_mask
:
1854 case nir_intrinsic_load_subgroup_le_mask
:
1855 case nir_intrinsic_load_subgroup_lt_mask
:
1856 case nir_intrinsic_load_subgroup_invocation
:
1857 case nir_intrinsic_load_tess_coord
:
1858 case nir_intrinsic_load_tess_level_inner
:
1859 case nir_intrinsic_load_tess_level_outer
:
1860 case nir_intrinsic_load_vertex_id
:
1861 case nir_intrinsic_load_work_group_id
:
1862 case nir_intrinsic_load_work_dim
: {
1863 const DataType dType
= getDType(insn
);
1864 SVSemantic sv
= convert(op
);
1865 LValues
&newDefs
= convert(&insn
->dest
);
1867 for (uint8_t i
= 0u; i
< nir_intrinsic_dest_components(insn
); ++i
) {
1869 if (typeSizeof(dType
) == 8)
1874 if (sv
== SV_TID
&& info
->prop
.cp
.numThreads
[i
] == 1) {
1877 Symbol
*sym
= mkSysVal(sv
, i
);
1878 Instruction
*rdsv
= mkOp1(OP_RDSV
, TYPE_U32
, def
, sym
);
1879 if (sv
== SV_TESS_OUTER
|| sv
== SV_TESS_INNER
)
1883 if (typeSizeof(dType
) == 8)
1884 mkOp2(OP_MERGE
, dType
, newDefs
[i
], def
, loadImm(getSSA(), 0u));
1889 case nir_intrinsic_load_subgroup_size
: {
1890 LValues
&newDefs
= convert(&insn
->dest
);
1891 loadImm(newDefs
[0], 32u);
1894 case nir_intrinsic_vote_all
:
1895 case nir_intrinsic_vote_any
:
1896 case nir_intrinsic_vote_ieq
: {
1897 LValues
&newDefs
= convert(&insn
->dest
);
1898 Value
*pred
= getScratch(1, FILE_PREDICATE
);
1899 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1900 mkOp1(OP_VOTE
, TYPE_U32
, pred
, pred
)->subOp
= getSubOp(op
);
1901 mkCvt(OP_CVT
, TYPE_U32
, newDefs
[0], TYPE_U8
, pred
);
1904 case nir_intrinsic_ballot
: {
1905 LValues
&newDefs
= convert(&insn
->dest
);
1906 Value
*pred
= getSSA(1, FILE_PREDICATE
);
1907 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1908 mkOp1(OP_VOTE
, TYPE_U32
, newDefs
[0], pred
)->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
1911 case nir_intrinsic_read_first_invocation
:
1912 case nir_intrinsic_read_invocation
: {
1913 LValues
&newDefs
= convert(&insn
->dest
);
1914 const DataType dType
= getDType(insn
);
1915 Value
*tmp
= getScratch();
1917 if (op
== nir_intrinsic_read_first_invocation
) {
1918 mkOp1(OP_VOTE
, TYPE_U32
, tmp
, mkImm(1))->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
1919 mkOp1(OP_BREV
, TYPE_U32
, tmp
, tmp
);
1920 mkOp1(OP_BFIND
, TYPE_U32
, tmp
, tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
1922 tmp
= getSrc(&insn
->src
[1], 0);
1924 for (uint8_t i
= 0; i
< dest_components
; ++i
) {
1925 mkOp3(OP_SHFL
, dType
, newDefs
[i
], getSrc(&insn
->src
[0], i
), tmp
, mkImm(0x1f))
1926 ->subOp
= NV50_IR_SUBOP_SHFL_IDX
;
1930 case nir_intrinsic_load_per_vertex_input
: {
1931 const DataType dType
= getDType(insn
);
1932 LValues
&newDefs
= convert(&insn
->dest
);
1933 Value
*indirectVertex
;
1934 Value
*indirectOffset
;
1935 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
1936 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
1938 Value
*vtxBase
= mkOp2v(OP_PFETCH
, TYPE_U32
, getSSA(4, FILE_ADDRESS
),
1939 mkImm(baseVertex
), indirectVertex
);
1940 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1941 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1942 loadFrom(FILE_SHADER_INPUT
, 0, dType
, newDefs
[i
], address
, 0,
1943 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
1947 case nir_intrinsic_load_per_vertex_output
: {
1948 const DataType dType
= getDType(insn
);
1949 LValues
&newDefs
= convert(&insn
->dest
);
1950 Value
*indirectVertex
;
1951 Value
*indirectOffset
;
1952 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
1953 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
1954 Value
*vtxBase
= NULL
;
1957 vtxBase
= indirectVertex
;
1959 vtxBase
= loadImm(NULL
, baseVertex
);
1961 vtxBase
= mkOp2v(OP_ADD
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), outBase
, vtxBase
);
1963 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1964 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1965 loadFrom(FILE_SHADER_OUTPUT
, 0, dType
, newDefs
[i
], address
, 0,
1966 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
1970 case nir_intrinsic_emit_vertex
: {
1971 if (info
->io
.genUserClip
> 0)
1972 handleUserClipPlanes();
1973 uint32_t idx
= nir_intrinsic_stream_id(insn
);
1974 mkOp1(getOperation(op
), TYPE_U32
, NULL
, mkImm(idx
))->fixed
= 1;
1977 case nir_intrinsic_end_primitive
: {
1978 uint32_t idx
= nir_intrinsic_stream_id(insn
);
1981 mkOp1(getOperation(op
), TYPE_U32
, NULL
, mkImm(idx
))->fixed
= 1;
1984 case nir_intrinsic_load_ubo
: {
1985 const DataType dType
= getDType(insn
);
1986 LValues
&newDefs
= convert(&insn
->dest
);
1987 Value
*indirectIndex
;
1988 Value
*indirectOffset
;
1989 uint32_t index
= getIndirect(&insn
->src
[0], 0, indirectIndex
) + 1;
1990 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
1992 for (uint8_t i
= 0u; i
< dest_components
; ++i
) {
1993 loadFrom(FILE_MEMORY_CONST
, index
, dType
, newDefs
[i
], offset
, i
,
1994 indirectOffset
, indirectIndex
);
1998 case nir_intrinsic_get_buffer_size
: {
1999 LValues
&newDefs
= convert(&insn
->dest
);
2000 const DataType dType
= getDType(insn
);
2001 Value
*indirectBuffer
;
2002 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2004 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, 0);
2005 mkOp1(OP_BUFQ
, dType
, newDefs
[0], sym
)->setIndirect(0, 0, indirectBuffer
);
2008 case nir_intrinsic_store_ssbo
: {
2009 DataType sType
= getSType(insn
->src
[0], false, false);
2010 Value
*indirectBuffer
;
2011 Value
*indirectOffset
;
2012 uint32_t buffer
= getIndirect(&insn
->src
[1], 0, indirectBuffer
);
2013 uint32_t offset
= getIndirect(&insn
->src
[2], 0, indirectOffset
);
2015 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2016 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2018 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, sType
,
2019 offset
+ i
* typeSizeof(sType
));
2020 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
))
2021 ->setIndirect(0, 1, indirectBuffer
);
2023 info
->io
.globalAccess
|= 0x2;
2026 case nir_intrinsic_load_ssbo
: {
2027 const DataType dType
= getDType(insn
);
2028 LValues
&newDefs
= convert(&insn
->dest
);
2029 Value
*indirectBuffer
;
2030 Value
*indirectOffset
;
2031 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2032 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2034 for (uint8_t i
= 0u; i
< dest_components
; ++i
)
2035 loadFrom(FILE_MEMORY_BUFFER
, buffer
, dType
, newDefs
[i
], offset
, i
,
2036 indirectOffset
, indirectBuffer
);
2038 info
->io
.globalAccess
|= 0x1;
2041 case nir_intrinsic_shared_atomic_add
:
2042 case nir_intrinsic_shared_atomic_and
:
2043 case nir_intrinsic_shared_atomic_comp_swap
:
2044 case nir_intrinsic_shared_atomic_exchange
:
2045 case nir_intrinsic_shared_atomic_or
:
2046 case nir_intrinsic_shared_atomic_imax
:
2047 case nir_intrinsic_shared_atomic_imin
:
2048 case nir_intrinsic_shared_atomic_umax
:
2049 case nir_intrinsic_shared_atomic_umin
:
2050 case nir_intrinsic_shared_atomic_xor
: {
2051 const DataType dType
= getDType(insn
);
2052 LValues
&newDefs
= convert(&insn
->dest
);
2053 Value
*indirectOffset
;
2054 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2055 Symbol
*sym
= mkSymbol(FILE_MEMORY_SHARED
, 0, dType
, offset
);
2056 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2057 if (op
== nir_intrinsic_shared_atomic_comp_swap
)
2058 atom
->setSrc(2, getSrc(&insn
->src
[2], 0));
2059 atom
->setIndirect(0, 0, indirectOffset
);
2060 atom
->subOp
= getSubOp(op
);
2063 case nir_intrinsic_ssbo_atomic_add
:
2064 case nir_intrinsic_ssbo_atomic_and
:
2065 case nir_intrinsic_ssbo_atomic_comp_swap
:
2066 case nir_intrinsic_ssbo_atomic_exchange
:
2067 case nir_intrinsic_ssbo_atomic_or
:
2068 case nir_intrinsic_ssbo_atomic_imax
:
2069 case nir_intrinsic_ssbo_atomic_imin
:
2070 case nir_intrinsic_ssbo_atomic_umax
:
2071 case nir_intrinsic_ssbo_atomic_umin
:
2072 case nir_intrinsic_ssbo_atomic_xor
: {
2073 const DataType dType
= getDType(insn
);
2074 LValues
&newDefs
= convert(&insn
->dest
);
2075 Value
*indirectBuffer
;
2076 Value
*indirectOffset
;
2077 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2078 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2080 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, offset
);
2081 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
,
2082 getSrc(&insn
->src
[2], 0));
2083 if (op
== nir_intrinsic_ssbo_atomic_comp_swap
)
2084 atom
->setSrc(2, getSrc(&insn
->src
[3], 0));
2085 atom
->setIndirect(0, 0, indirectOffset
);
2086 atom
->setIndirect(0, 1, indirectBuffer
);
2087 atom
->subOp
= getSubOp(op
);
2089 info
->io
.globalAccess
|= 0x2;
2092 case nir_intrinsic_global_atomic_add
:
2093 case nir_intrinsic_global_atomic_and
:
2094 case nir_intrinsic_global_atomic_comp_swap
:
2095 case nir_intrinsic_global_atomic_exchange
:
2096 case nir_intrinsic_global_atomic_or
:
2097 case nir_intrinsic_global_atomic_imax
:
2098 case nir_intrinsic_global_atomic_imin
:
2099 case nir_intrinsic_global_atomic_umax
:
2100 case nir_intrinsic_global_atomic_umin
:
2101 case nir_intrinsic_global_atomic_xor
: {
2102 const DataType dType
= getDType(insn
);
2103 LValues
&newDefs
= convert(&insn
->dest
);
2105 uint32_t offset
= getIndirect(&insn
->src
[0], 0, address
);
2107 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, dType
, offset
);
2109 mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2110 if (op
== nir_intrinsic_global_atomic_comp_swap
)
2111 atom
->setSrc(2, getSrc(&insn
->src
[2], 0));
2112 atom
->setIndirect(0, 0, address
);
2113 atom
->subOp
= getSubOp(op
);
2115 info
->io
.globalAccess
|= 0x2;
2118 case nir_intrinsic_bindless_image_atomic_add
:
2119 case nir_intrinsic_bindless_image_atomic_and
:
2120 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2121 case nir_intrinsic_bindless_image_atomic_exchange
:
2122 case nir_intrinsic_bindless_image_atomic_imax
:
2123 case nir_intrinsic_bindless_image_atomic_umax
:
2124 case nir_intrinsic_bindless_image_atomic_imin
:
2125 case nir_intrinsic_bindless_image_atomic_umin
:
2126 case nir_intrinsic_bindless_image_atomic_or
:
2127 case nir_intrinsic_bindless_image_atomic_xor
:
2128 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2129 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2130 case nir_intrinsic_bindless_image_load
:
2131 case nir_intrinsic_bindless_image_samples
:
2132 case nir_intrinsic_bindless_image_size
:
2133 case nir_intrinsic_bindless_image_store
:
2134 case nir_intrinsic_image_atomic_add
:
2135 case nir_intrinsic_image_atomic_and
:
2136 case nir_intrinsic_image_atomic_comp_swap
:
2137 case nir_intrinsic_image_atomic_exchange
:
2138 case nir_intrinsic_image_atomic_imax
:
2139 case nir_intrinsic_image_atomic_umax
:
2140 case nir_intrinsic_image_atomic_imin
:
2141 case nir_intrinsic_image_atomic_umin
:
2142 case nir_intrinsic_image_atomic_or
:
2143 case nir_intrinsic_image_atomic_xor
:
2144 case nir_intrinsic_image_atomic_inc_wrap
:
2145 case nir_intrinsic_image_atomic_dec_wrap
:
2146 case nir_intrinsic_image_load
:
2147 case nir_intrinsic_image_samples
:
2148 case nir_intrinsic_image_size
:
2149 case nir_intrinsic_image_store
: {
2150 std::vector
<Value
*> srcs
, defs
;
2155 TexInstruction::Target target
=
2156 convert(nir_intrinsic_image_dim(insn
), !!nir_intrinsic_image_array(insn
), false);
2157 unsigned int argCount
= getNIRArgCount(target
);
2158 uint16_t location
= 0;
2160 if (opInfo
.has_dest
) {
2161 LValues
&newDefs
= convert(&insn
->dest
);
2162 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
) {
2163 defs
.push_back(newDefs
[i
]);
2169 bool bindless
= false;
2171 case nir_intrinsic_bindless_image_atomic_add
:
2172 case nir_intrinsic_bindless_image_atomic_and
:
2173 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2174 case nir_intrinsic_bindless_image_atomic_exchange
:
2175 case nir_intrinsic_bindless_image_atomic_imax
:
2176 case nir_intrinsic_bindless_image_atomic_umax
:
2177 case nir_intrinsic_bindless_image_atomic_imin
:
2178 case nir_intrinsic_bindless_image_atomic_umin
:
2179 case nir_intrinsic_bindless_image_atomic_or
:
2180 case nir_intrinsic_bindless_image_atomic_xor
:
2181 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2182 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2183 ty
= getDType(insn
);
2185 info
->io
.globalAccess
|= 0x2;
2188 case nir_intrinsic_image_atomic_add
:
2189 case nir_intrinsic_image_atomic_and
:
2190 case nir_intrinsic_image_atomic_comp_swap
:
2191 case nir_intrinsic_image_atomic_exchange
:
2192 case nir_intrinsic_image_atomic_imax
:
2193 case nir_intrinsic_image_atomic_umax
:
2194 case nir_intrinsic_image_atomic_imin
:
2195 case nir_intrinsic_image_atomic_umin
:
2196 case nir_intrinsic_image_atomic_or
:
2197 case nir_intrinsic_image_atomic_xor
:
2198 case nir_intrinsic_image_atomic_inc_wrap
:
2199 case nir_intrinsic_image_atomic_dec_wrap
:
2200 ty
= getDType(insn
);
2202 info
->io
.globalAccess
|= 0x2;
2205 case nir_intrinsic_bindless_image_load
:
2206 case nir_intrinsic_image_load
:
2208 bindless
= op
== nir_intrinsic_bindless_image_load
;
2209 info
->io
.globalAccess
|= 0x1;
2212 case nir_intrinsic_bindless_image_store
:
2213 case nir_intrinsic_image_store
:
2215 bindless
= op
== nir_intrinsic_bindless_image_store
;
2216 info
->io
.globalAccess
|= 0x2;
2220 case nir_intrinsic_bindless_image_samples
:
2221 case nir_intrinsic_image_samples
:
2223 bindless
= op
== nir_intrinsic_bindless_image_samples
;
2226 case nir_intrinsic_bindless_image_size
:
2227 case nir_intrinsic_image_size
:
2228 assert(nir_src_as_uint(insn
->src
[1]) == 0);
2230 bindless
= op
== nir_intrinsic_bindless_image_size
;
2233 unreachable("unhandled image opcode");
2238 indirect
= getSrc(&insn
->src
[0], 0);
2240 location
= getIndirect(&insn
->src
[0], 0, indirect
);
2243 if (opInfo
.num_srcs
>= 2)
2244 for (unsigned int i
= 0u; i
< argCount
; ++i
)
2245 srcs
.push_back(getSrc(&insn
->src
[1], i
));
2247 // the sampler is just another src added after coords
2248 if (opInfo
.num_srcs
>= 3 && target
.isMS())
2249 srcs
.push_back(getSrc(&insn
->src
[2], 0));
2251 if (opInfo
.num_srcs
>= 4 && lod_src
!= 4) {
2252 unsigned components
= opInfo
.src_components
[3] ? opInfo
.src_components
[3] : insn
->num_components
;
2253 for (uint8_t i
= 0u; i
< components
; ++i
)
2254 srcs
.push_back(getSrc(&insn
->src
[3], i
));
2257 if (opInfo
.num_srcs
>= 5 && lod_src
!= 5)
2258 // 1 for aotmic swap
2259 for (uint8_t i
= 0u; i
< opInfo
.src_components
[4]; ++i
)
2260 srcs
.push_back(getSrc(&insn
->src
[4], i
));
2262 TexInstruction
*texi
= mkTex(getOperation(op
), target
.getEnum(), location
, 0, defs
, srcs
);
2263 texi
->tex
.bindless
= bindless
;
2264 texi
->tex
.format
= nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn
));
2265 texi
->tex
.mask
= mask
;
2266 texi
->cache
= convert(nir_intrinsic_access(insn
));
2268 texi
->subOp
= getSubOp(op
);
2271 texi
->setIndirectR(indirect
);
2275 case nir_intrinsic_store_scratch
:
2276 case nir_intrinsic_store_shared
: {
2277 DataType sType
= getSType(insn
->src
[0], false, false);
2278 Value
*indirectOffset
;
2279 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2281 for (uint8_t i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2282 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2284 Symbol
*sym
= mkSymbol(getFile(op
), 0, sType
, offset
+ i
* typeSizeof(sType
));
2285 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
));
2289 case nir_intrinsic_load_kernel_input
:
2290 case nir_intrinsic_load_scratch
:
2291 case nir_intrinsic_load_shared
: {
2292 const DataType dType
= getDType(insn
);
2293 LValues
&newDefs
= convert(&insn
->dest
);
2294 Value
*indirectOffset
;
2295 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2297 for (uint8_t i
= 0u; i
< dest_components
; ++i
)
2298 loadFrom(getFile(op
), 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2302 case nir_intrinsic_control_barrier
: {
2303 // TODO: add flag to shader_info
2304 info
->numBarriers
= 1;
2305 Instruction
*bar
= mkOp2(OP_BAR
, TYPE_U32
, NULL
, mkImm(0), mkImm(0));
2307 bar
->subOp
= NV50_IR_SUBOP_BAR_SYNC
;
2310 case nir_intrinsic_group_memory_barrier
:
2311 case nir_intrinsic_memory_barrier
:
2312 case nir_intrinsic_memory_barrier_buffer
:
2313 case nir_intrinsic_memory_barrier_image
:
2314 case nir_intrinsic_memory_barrier_shared
: {
2315 Instruction
*bar
= mkOp(OP_MEMBAR
, TYPE_NONE
, NULL
);
2317 bar
->subOp
= getSubOp(op
);
2320 case nir_intrinsic_memory_barrier_tcs_patch
:
2322 case nir_intrinsic_shader_clock
: {
2323 const DataType dType
= getDType(insn
);
2324 LValues
&newDefs
= convert(&insn
->dest
);
2326 loadImm(newDefs
[0], 0u);
2327 mkOp1(OP_RDSV
, dType
, newDefs
[1], mkSysVal(SV_CLOCK
, 0))->fixed
= 1;
2330 case nir_intrinsic_load_global
: {
2331 const DataType dType
= getDType(insn
);
2332 LValues
&newDefs
= convert(&insn
->dest
);
2333 Value
*indirectOffset
;
2334 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2336 for (auto i
= 0u; i
< dest_components
; ++i
)
2337 loadFrom(FILE_MEMORY_GLOBAL
, 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2339 info
->io
.globalAccess
|= 0x1;
2342 case nir_intrinsic_store_global
: {
2343 DataType sType
= getSType(insn
->src
[0], false, false);
2345 for (auto i
= 0u; i
< nir_intrinsic_src_components(insn
, 0); ++i
) {
2346 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2348 if (typeSizeof(sType
) == 8) {
2350 mkSplit(split
, 4, getSrc(&insn
->src
[0], i
));
2352 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
));
2353 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[0]);
2355 sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
) + 4);
2356 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[1]);
2358 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, sType
, i
* typeSizeof(sType
));
2359 mkStore(OP_STORE
, sType
, sym
, getSrc(&insn
->src
[1], 0), getSrc(&insn
->src
[0], i
));
2363 info
->io
.globalAccess
|= 0x2;
2367 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos
[op
].name
);
2375 Converter::visit(nir_jump_instr
*insn
)
2377 switch (insn
->type
) {
2378 case nir_jump_return
:
2379 // TODO: this only works in the main function
2380 mkFlow(OP_BRA
, exit
, CC_ALWAYS
, NULL
);
2381 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::CROSS
);
2383 case nir_jump_break
:
2384 case nir_jump_continue
: {
2385 bool isBreak
= insn
->type
== nir_jump_break
;
2386 nir_block
*block
= insn
->instr
.block
;
2387 BasicBlock
*target
= convert(block
->successors
[0]);
2388 mkFlow(isBreak
? OP_BREAK
: OP_CONT
, target
, CC_ALWAYS
, NULL
);
2389 bb
->cfg
.attach(&target
->cfg
, isBreak
? Graph::Edge::CROSS
: Graph::Edge::BACK
);
2393 ERROR("unknown nir_jump_type %u\n", insn
->type
);
2401 Converter::convert(nir_load_const_instr
*insn
, uint8_t idx
)
2406 setPosition(immInsertPos
, true);
2408 setPosition(bb
, false);
2410 switch (insn
->def
.bit_size
) {
2412 val
= loadImm(getSSA(8), insn
->value
[idx
].u64
);
2415 val
= loadImm(getSSA(4), insn
->value
[idx
].u32
);
2418 val
= loadImm(getSSA(2), insn
->value
[idx
].u16
);
2421 val
= loadImm(getSSA(1), insn
->value
[idx
].u8
);
2424 unreachable("unhandled bit size!\n");
2426 setPosition(bb
, true);
2431 Converter::visit(nir_load_const_instr
*insn
)
2433 assert(insn
->def
.bit_size
<= 64);
2434 immediates
[insn
->def
.index
] = insn
;
2438 #define DEFAULT_CHECKS \
2439 if (insn->dest.dest.ssa.num_components > 1) { \
2440 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2443 if (insn->dest.write_mask != 1) { \
2444 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2448 Converter::visit(nir_alu_instr
*insn
)
2450 const nir_op op
= insn
->op
;
2451 const nir_op_info
&info
= nir_op_infos
[op
];
2452 DataType dType
= getDType(insn
);
2453 const std::vector
<DataType
> sTypes
= getSTypes(insn
);
2455 Instruction
*oldPos
= this->bb
->getExit();
2466 case nir_op_fddx_coarse
:
2467 case nir_op_fddx_fine
:
2469 case nir_op_fddy_coarse
:
2470 case nir_op_fddy_fine
:
2489 case nir_op_imul_high
:
2490 case nir_op_umul_high
:
2495 case nir_op_pack_64_2x32_split
:
2510 LValues
&newDefs
= convert(&insn
->dest
);
2511 operation preOp
= preOperationNeeded(op
);
2512 if (preOp
!= OP_NOP
) {
2513 assert(info
.num_inputs
< 2);
2514 Value
*tmp
= getSSA(typeSizeof(dType
));
2515 Instruction
*i0
= mkOp(preOp
, dType
, tmp
);
2516 Instruction
*i1
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2517 if (info
.num_inputs
) {
2518 i0
->setSrc(0, getSrc(&insn
->src
[0]));
2521 i1
->subOp
= getSubOp(op
);
2523 Instruction
*i
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2524 for (unsigned s
= 0u; s
< info
.num_inputs
; ++s
) {
2525 i
->setSrc(s
, getSrc(&insn
->src
[s
]));
2527 i
->subOp
= getSubOp(op
);
2531 case nir_op_ifind_msb
:
2532 case nir_op_ufind_msb
: {
2534 LValues
&newDefs
= convert(&insn
->dest
);
2536 mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2539 case nir_op_fround_even
: {
2541 LValues
&newDefs
= convert(&insn
->dest
);
2542 mkCvt(OP_CVT
, dType
, newDefs
[0], dType
, getSrc(&insn
->src
[0]))->rnd
= ROUND_NI
;
2545 // convert instructions
2559 case nir_op_u2u64
: {
2561 LValues
&newDefs
= convert(&insn
->dest
);
2562 Instruction
*i
= mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2563 if (op
== nir_op_f2i32
|| op
== nir_op_f2i64
|| op
== nir_op_f2u32
|| op
== nir_op_f2u64
)
2565 i
->sType
= sTypes
[0];
2568 // compare instructions
2578 case nir_op_ine32
: {
2580 LValues
&newDefs
= convert(&insn
->dest
);
2581 Instruction
*i
= mkCmp(getOperation(op
),
2586 getSrc(&insn
->src
[0]),
2587 getSrc(&insn
->src
[1]));
2588 if (info
.num_inputs
== 3)
2589 i
->setSrc(2, getSrc(&insn
->src
[2]));
2590 i
->sType
= sTypes
[0];
2598 case nir_op_vec16
: {
2599 LValues
&newDefs
= convert(&insn
->dest
);
2600 for (LValues::size_type c
= 0u; c
< newDefs
.size(); ++c
) {
2601 mkMov(newDefs
[c
], getSrc(&insn
->src
[c
]), dType
);
2606 case nir_op_pack_64_2x32
: {
2607 LValues
&newDefs
= convert(&insn
->dest
);
2608 Instruction
*merge
= mkOp(OP_MERGE
, dType
, newDefs
[0]);
2609 merge
->setSrc(0, getSrc(&insn
->src
[0], 0));
2610 merge
->setSrc(1, getSrc(&insn
->src
[0], 1));
2613 case nir_op_pack_half_2x16_split
: {
2614 LValues
&newDefs
= convert(&insn
->dest
);
2615 Value
*tmpH
= getSSA();
2616 Value
*tmpL
= getSSA();
2618 mkCvt(OP_CVT
, TYPE_F16
, tmpL
, TYPE_F32
, getSrc(&insn
->src
[0]));
2619 mkCvt(OP_CVT
, TYPE_F16
, tmpH
, TYPE_F32
, getSrc(&insn
->src
[1]));
2620 mkOp3(OP_INSBF
, TYPE_U32
, newDefs
[0], tmpH
, mkImm(0x1010), tmpL
);
2623 case nir_op_unpack_half_2x16_split_x
:
2624 case nir_op_unpack_half_2x16_split_y
: {
2625 LValues
&newDefs
= convert(&insn
->dest
);
2626 Instruction
*cvt
= mkCvt(OP_CVT
, TYPE_F32
, newDefs
[0], TYPE_F16
, getSrc(&insn
->src
[0]));
2627 if (op
== nir_op_unpack_half_2x16_split_y
)
2631 case nir_op_unpack_64_2x32
: {
2632 LValues
&newDefs
= convert(&insn
->dest
);
2633 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, newDefs
[1]);
2636 case nir_op_unpack_64_2x32_split_x
: {
2637 LValues
&newDefs
= convert(&insn
->dest
);
2638 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, getSSA());
2641 case nir_op_unpack_64_2x32_split_y
: {
2642 LValues
&newDefs
= convert(&insn
->dest
);
2643 mkOp1(OP_SPLIT
, dType
, getSSA(), getSrc(&insn
->src
[0]))->setDef(1, newDefs
[0]);
2646 // special instructions
2648 case nir_op_isign
: {
2651 if (::isFloatType(dType
))
2656 LValues
&newDefs
= convert(&insn
->dest
);
2657 LValue
*val0
= getScratch();
2658 LValue
*val1
= getScratch();
2659 mkCmp(OP_SET
, CC_GT
, iType
, val0
, dType
, getSrc(&insn
->src
[0]), zero
);
2660 mkCmp(OP_SET
, CC_LT
, iType
, val1
, dType
, getSrc(&insn
->src
[0]), zero
);
2662 if (dType
== TYPE_F64
) {
2663 mkOp2(OP_SUB
, iType
, val0
, val0
, val1
);
2664 mkCvt(OP_CVT
, TYPE_F64
, newDefs
[0], iType
, val0
);
2665 } else if (dType
== TYPE_S64
|| dType
== TYPE_U64
) {
2666 mkOp2(OP_SUB
, iType
, val0
, val1
, val0
);
2667 mkOp2(OP_SHR
, iType
, val1
, val0
, loadImm(NULL
, 31));
2668 mkOp2(OP_MERGE
, dType
, newDefs
[0], val0
, val1
);
2669 } else if (::isFloatType(dType
))
2670 mkOp2(OP_SUB
, iType
, newDefs
[0], val0
, val1
);
2672 mkOp2(OP_SUB
, iType
, newDefs
[0], val1
, val0
);
2676 case nir_op_b32csel
: {
2678 LValues
&newDefs
= convert(&insn
->dest
);
2679 mkCmp(OP_SLCT
, CC_NE
, dType
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[1]), getSrc(&insn
->src
[2]), getSrc(&insn
->src
[0]));
2682 case nir_op_ibitfield_extract
:
2683 case nir_op_ubitfield_extract
: {
2685 Value
*tmp
= getSSA();
2686 LValues
&newDefs
= convert(&insn
->dest
);
2687 mkOp3(OP_INSBF
, dType
, tmp
, getSrc(&insn
->src
[2]), loadImm(NULL
, 0x808), getSrc(&insn
->src
[1]));
2688 mkOp2(OP_EXTBF
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), tmp
);
2693 LValues
&newDefs
= convert(&insn
->dest
);
2694 mkOp2(OP_BMSK
, dType
, newDefs
[0], getSrc(&insn
->src
[1]), getSrc(&insn
->src
[0]))->subOp
= NV50_IR_SUBOP_BMSK_W
;
2697 case nir_op_bitfield_insert
: {
2699 LValues
&newDefs
= convert(&insn
->dest
);
2700 LValue
*temp
= getSSA();
2701 mkOp3(OP_INSBF
, TYPE_U32
, temp
, getSrc(&insn
->src
[3]), mkImm(0x808), getSrc(&insn
->src
[2]));
2702 mkOp3(OP_INSBF
, dType
, newDefs
[0], getSrc(&insn
->src
[1]), temp
, getSrc(&insn
->src
[0]));
2705 case nir_op_bit_count
: {
2707 LValues
&newDefs
= convert(&insn
->dest
);
2708 mkOp2(OP_POPCNT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), getSrc(&insn
->src
[0]));
2711 case nir_op_bitfield_reverse
: {
2713 LValues
&newDefs
= convert(&insn
->dest
);
2714 mkOp1(OP_BREV
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]));
2717 case nir_op_find_lsb
: {
2719 LValues
&newDefs
= convert(&insn
->dest
);
2720 Value
*tmp
= getSSA();
2721 mkOp1(OP_BREV
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]));
2722 mkOp1(OP_BFIND
, TYPE_U32
, newDefs
[0], tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
2725 case nir_op_extract_u8
: {
2727 LValues
&newDefs
= convert(&insn
->dest
);
2728 Value
*prmt
= getSSA();
2729 mkOp2(OP_OR
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x4440));
2730 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2733 case nir_op_extract_i8
: {
2735 LValues
&newDefs
= convert(&insn
->dest
);
2736 Value
*prmt
= getSSA();
2737 mkOp3(OP_MAD
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x1111), loadImm(NULL
, 0x8880));
2738 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2741 case nir_op_extract_u16
: {
2743 LValues
&newDefs
= convert(&insn
->dest
);
2744 Value
*prmt
= getSSA();
2745 mkOp3(OP_MAD
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x22), loadImm(NULL
, 0x4410));
2746 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2749 case nir_op_extract_i16
: {
2751 LValues
&newDefs
= convert(&insn
->dest
);
2752 Value
*prmt
= getSSA();
2753 mkOp3(OP_MAD
, TYPE_U32
, prmt
, getSrc(&insn
->src
[1]), loadImm(NULL
, 0x2222), loadImm(NULL
, 0x9910));
2754 mkOp3(OP_PERMT
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), prmt
, loadImm(NULL
, 0));
2759 LValues
&newDefs
= convert(&insn
->dest
);
2760 mkOp3(OP_SHF
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]),
2761 getSrc(&insn
->src
[1]), getSrc(&insn
->src
[0]))
2762 ->subOp
= NV50_IR_SUBOP_SHF_L
|
2763 NV50_IR_SUBOP_SHF_W
|
2764 NV50_IR_SUBOP_SHF_HI
;
2769 LValues
&newDefs
= convert(&insn
->dest
);
2770 mkOp3(OP_SHF
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]),
2771 getSrc(&insn
->src
[1]), getSrc(&insn
->src
[0]))
2772 ->subOp
= NV50_IR_SUBOP_SHF_R
|
2773 NV50_IR_SUBOP_SHF_W
|
2774 NV50_IR_SUBOP_SHF_LO
;
2777 // boolean conversions
2778 case nir_op_b2f32
: {
2780 LValues
&newDefs
= convert(&insn
->dest
);
2781 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1.0f
));
2784 case nir_op_b2f64
: {
2786 LValues
&newDefs
= convert(&insn
->dest
);
2787 Value
*tmp
= getSSA(4);
2788 mkOp2(OP_AND
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]), loadImm(NULL
, 0x3ff00000));
2789 mkOp2(OP_MERGE
, TYPE_U64
, newDefs
[0], loadImm(NULL
, 0), tmp
);
2793 case nir_op_i2b32
: {
2795 LValues
&newDefs
= convert(&insn
->dest
);
2797 if (typeSizeof(sTypes
[0]) == 8) {
2798 src1
= loadImm(getSSA(8), 0.0);
2802 CondCode cc
= op
== nir_op_f2b32
? CC_NEU
: CC_NE
;
2803 mkCmp(OP_SET
, cc
, TYPE_U32
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[0]), src1
);
2806 case nir_op_b2i32
: {
2808 LValues
&newDefs
= convert(&insn
->dest
);
2809 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2812 case nir_op_b2i64
: {
2814 LValues
&newDefs
= convert(&insn
->dest
);
2815 LValue
*def
= getScratch();
2816 mkOp2(OP_AND
, TYPE_U32
, def
, getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2817 mkOp2(OP_MERGE
, TYPE_S64
, newDefs
[0], def
, loadImm(NULL
, 0));
2821 ERROR("unknown nir_op %s\n", info
.name
);
2827 oldPos
= this->bb
->getEntry();
2828 oldPos
->precise
= insn
->exact
;
2831 if (unlikely(!oldPos
))
2834 while (oldPos
->next
) {
2835 oldPos
= oldPos
->next
;
2836 oldPos
->precise
= insn
->exact
;
2838 oldPos
->saturate
= insn
->dest
.saturate
;
2842 #undef DEFAULT_CHECKS
2845 Converter::visit(nir_ssa_undef_instr
*insn
)
2847 LValues
&newDefs
= convert(&insn
->def
);
2848 for (uint8_t i
= 0u; i
< insn
->def
.num_components
; ++i
) {
2849 mkOp(OP_NOP
, TYPE_NONE
, newDefs
[i
]);
2854 #define CASE_SAMPLER(ty) \
2855 case GLSL_SAMPLER_DIM_ ## ty : \
2856 if (isArray && !isShadow) \
2857 return TEX_TARGET_ ## ty ## _ARRAY; \
2858 else if (!isArray && isShadow) \
2859 return TEX_TARGET_## ty ## _SHADOW; \
2860 else if (isArray && isShadow) \
2861 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2863 return TEX_TARGET_ ## ty
2866 Converter::convert(glsl_sampler_dim dim
, bool isArray
, bool isShadow
)
2872 case GLSL_SAMPLER_DIM_3D
:
2873 return TEX_TARGET_3D
;
2874 case GLSL_SAMPLER_DIM_MS
:
2876 return TEX_TARGET_2D_MS_ARRAY
;
2877 return TEX_TARGET_2D_MS
;
2878 case GLSL_SAMPLER_DIM_RECT
:
2880 return TEX_TARGET_RECT_SHADOW
;
2881 return TEX_TARGET_RECT
;
2882 case GLSL_SAMPLER_DIM_BUF
:
2883 return TEX_TARGET_BUFFER
;
2884 case GLSL_SAMPLER_DIM_EXTERNAL
:
2885 return TEX_TARGET_2D
;
2887 ERROR("unknown glsl_sampler_dim %u\n", dim
);
2889 return TEX_TARGET_COUNT
;
2895 Converter::applyProjection(Value
*src
, Value
*proj
)
2899 return mkOp2v(OP_MUL
, TYPE_F32
, getScratch(), src
, proj
);
2903 Converter::getNIRArgCount(TexInstruction::Target
& target
)
2905 unsigned int result
= target
.getArgCount();
2906 if (target
.isCube() && target
.isArray())
2914 Converter::convert(enum gl_access_qualifier access
)
2916 if (access
& ACCESS_VOLATILE
)
2918 if (access
& ACCESS_COHERENT
)
2924 Converter::visit(nir_tex_instr
*insn
)
2928 case nir_texop_query_levels
:
2930 case nir_texop_texture_samples
:
2935 case nir_texop_txf_ms
:
2937 case nir_texop_txs
: {
2938 LValues
&newDefs
= convert(&insn
->dest
);
2939 std::vector
<Value
*> srcs
;
2940 std::vector
<Value
*> defs
;
2941 std::vector
<nir_src
*> offsets
;
2945 TexInstruction::Target target
= convert(insn
->sampler_dim
, insn
->is_array
, insn
->is_shadow
);
2946 operation op
= getOperation(insn
->op
);
2949 int biasIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_bias
);
2950 int compIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_comparator
);
2951 int coordsIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_coord
);
2952 int ddxIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddx
);
2953 int ddyIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddy
);
2954 int msIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ms_index
);
2955 int lodIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_lod
);
2956 int offsetIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_offset
);
2957 int projIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_projector
);
2958 int sampOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_offset
);
2959 int texOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_offset
);
2960 int sampHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_handle
);
2961 int texHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_handle
);
2963 bool bindless
= sampHandleIdx
!= -1 || texHandleIdx
!= -1;
2964 assert((sampHandleIdx
!= -1) == (texHandleIdx
!= -1));
2967 proj
= mkOp1v(OP_RCP
, TYPE_F32
, getScratch(), getSrc(&insn
->src
[projIdx
].src
, 0));
2969 srcs
.resize(insn
->coord_components
);
2970 for (uint8_t i
= 0u; i
< insn
->coord_components
; ++i
)
2971 srcs
[i
] = applyProjection(getSrc(&insn
->src
[coordsIdx
].src
, i
), proj
);
2973 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2974 if (insn
->coord_components
) {
2975 uint32_t argCount
= target
.getArgCount();
2980 for (uint32_t i
= 0u; i
< (argCount
- insn
->coord_components
); ++i
)
2981 srcs
.push_back(getSSA());
2984 if (insn
->op
== nir_texop_texture_samples
)
2985 srcs
.push_back(zero
);
2986 else if (!insn
->num_srcs
)
2987 srcs
.push_back(loadImm(NULL
, 0));
2989 srcs
.push_back(getSrc(&insn
->src
[biasIdx
].src
, 0));
2991 srcs
.push_back(getSrc(&insn
->src
[lodIdx
].src
, 0));
2992 else if (op
== OP_TXF
)
2995 srcs
.push_back(getSrc(&insn
->src
[msIdx
].src
, 0));
2996 if (offsetIdx
!= -1)
2997 offsets
.push_back(&insn
->src
[offsetIdx
].src
);
2999 srcs
.push_back(applyProjection(getSrc(&insn
->src
[compIdx
].src
, 0), proj
));
3000 if (texOffIdx
!= -1) {
3001 srcs
.push_back(getSrc(&insn
->src
[texOffIdx
].src
, 0));
3002 texOffIdx
= srcs
.size() - 1;
3004 if (sampOffIdx
!= -1) {
3005 srcs
.push_back(getSrc(&insn
->src
[sampOffIdx
].src
, 0));
3006 sampOffIdx
= srcs
.size() - 1;
3009 // currently we use the lower bits
3011 Value
*handle
= getSrc(&insn
->src
[sampHandleIdx
].src
, 0);
3013 mkSplit(split
, 4, handle
);
3015 srcs
.push_back(split
[0]);
3016 texOffIdx
= srcs
.size() - 1;
3019 r
= bindless
? 0xff : insn
->texture_index
;
3020 s
= bindless
? 0x1f : insn
->sampler_index
;
3022 defs
.resize(newDefs
.size());
3023 for (uint8_t d
= 0u; d
< newDefs
.size(); ++d
) {
3024 defs
[d
] = newDefs
[d
];
3027 if (target
.isMS() || (op
== OP_TEX
&& prog
->getType() != Program::TYPE_FRAGMENT
))
3030 TexInstruction
*texi
= mkTex(op
, target
.getEnum(), r
, s
, defs
, srcs
);
3031 texi
->tex
.levelZero
= lz
;
3032 texi
->tex
.mask
= mask
;
3033 texi
->tex
.bindless
= bindless
;
3035 if (texOffIdx
!= -1)
3036 texi
->tex
.rIndirectSrc
= texOffIdx
;
3037 if (sampOffIdx
!= -1)
3038 texi
->tex
.sIndirectSrc
= sampOffIdx
;
3042 if (!target
.isShadow())
3043 texi
->tex
.gatherComp
= insn
->component
;
3046 texi
->tex
.query
= TXQ_DIMS
;
3048 case nir_texop_texture_samples
:
3049 texi
->tex
.mask
= 0x4;
3050 texi
->tex
.query
= TXQ_TYPE
;
3052 case nir_texop_query_levels
:
3053 texi
->tex
.mask
= 0x8;
3054 texi
->tex
.query
= TXQ_DIMS
;
3060 texi
->tex
.useOffsets
= offsets
.size();
3061 if (texi
->tex
.useOffsets
) {
3062 for (uint8_t s
= 0; s
< texi
->tex
.useOffsets
; ++s
) {
3063 for (uint32_t c
= 0u; c
< 3; ++c
) {
3064 uint8_t s2
= std::min(c
, target
.getDim() - 1);
3065 texi
->offset
[s
][c
].set(getSrc(offsets
[s
], s2
));
3066 texi
->offset
[s
][c
].setInsn(texi
);
3071 if (op
== OP_TXG
&& offsetIdx
== -1) {
3072 if (nir_tex_instr_has_explicit_tg4_offsets(insn
)) {
3073 texi
->tex
.useOffsets
= 4;
3074 setPosition(texi
, false);
3075 for (uint8_t i
= 0; i
< 4; ++i
) {
3076 for (uint8_t j
= 0; j
< 2; ++j
) {
3077 texi
->offset
[i
][j
].set(loadImm(NULL
, insn
->tg4_offsets
[i
][j
]));
3078 texi
->offset
[i
][j
].setInsn(texi
);
3081 setPosition(texi
, true);
3085 if (ddxIdx
!= -1 && ddyIdx
!= -1) {
3086 for (uint8_t c
= 0u; c
< target
.getDim() + target
.isCube(); ++c
) {
3087 texi
->dPdx
[c
].set(getSrc(&insn
->src
[ddxIdx
].src
, c
));
3088 texi
->dPdy
[c
].set(getSrc(&insn
->src
[ddyIdx
].src
, c
));
3095 ERROR("unknown nir_texop %u\n", insn
->op
);
3106 if (prog
->dbgFlags
& NV50_IR_DEBUG_VERBOSE
)
3107 nir_print_shader(nir
, stderr
);
3109 struct nir_lower_subgroups_options subgroup_options
= {
3110 .subgroup_size
= 32,
3111 .ballot_bit_size
= 32,
3114 /* prepare for IO lowering */
3115 NIR_PASS_V(nir
, nir_opt_deref
);
3116 NIR_PASS_V(nir
, nir_lower_regs_to_ssa
);
3117 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
3119 /* codegen assumes vec4 alignment for memory */
3120 NIR_PASS_V(nir
, nir_lower_vars_to_explicit_types
, nir_var_function_temp
, function_temp_type_info
);
3121 NIR_PASS_V(nir
, nir_lower_explicit_io
, nir_var_function_temp
, nir_address_format_32bit_offset
);
3122 NIR_PASS_V(nir
, nir_remove_dead_variables
, nir_var_function_temp
, NULL
);
3124 NIR_PASS_V(nir
, nir_lower_io
,
3125 (nir_variable_mode
)(nir_var_shader_in
| nir_var_shader_out
),
3126 type_size
, (nir_lower_io_options
)0);
3128 NIR_PASS_V(nir
, nir_lower_subgroups
, &subgroup_options
);
3130 NIR_PASS_V(nir
, nir_lower_load_const_to_scalar
);
3131 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
3132 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
3134 /*TODO: improve this lowering/optimisation loop so that we can use
3135 * nir_opt_idiv_const effectively before this.
3137 NIR_PASS(progress
, nir
, nir_lower_idiv
, nir_lower_idiv_precise
);
3141 NIR_PASS(progress
, nir
, nir_copy_prop
);
3142 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
3143 NIR_PASS(progress
, nir
, nir_opt_trivial_continues
);
3144 NIR_PASS(progress
, nir
, nir_opt_cse
);
3145 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
3146 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
3147 NIR_PASS(progress
, nir
, nir_copy_prop
);
3148 NIR_PASS(progress
, nir
, nir_opt_dce
);
3149 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
3152 NIR_PASS_V(nir
, nir_lower_bool_to_int32
);
3153 NIR_PASS_V(nir
, nir_convert_from_ssa
, true);
3155 // Garbage collect dead instructions
3159 ERROR("Couldn't prase NIR!\n");
3163 if (!assignSlots()) {
3164 ERROR("Couldn't assign slots!\n");
3168 if (prog
->dbgFlags
& NV50_IR_DEBUG_BASIC
)
3169 nir_print_shader(nir
, stderr
);
3171 nir_foreach_function(function
, nir
) {
3172 if (!visit(function
))
3179 } // unnamed namespace
3184 Program::makeFromNIR(struct nv50_ir_prog_info
*info
)
3186 nir_shader
*nir
= (nir_shader
*)info
->bin
.source
;
3187 Converter
converter(this, nir
, info
);
3188 bool result
= converter
.run();
3191 LoweringHelper lowering
;
3193 tlsSize
= info
->bin
.tlsSpace
;
3197 } // namespace nv50_ir
3199 static nir_shader_compiler_options
3200 nvir_nir_shader_compiler_options(int chipset
)
3202 nir_shader_compiler_options op
= {};
3203 op
.lower_fdiv
= (chipset
>= NVISA_GV100_CHIPSET
);
3204 op
.lower_ffma
= false;
3205 op
.fuse_ffma
= false; /* nir doesn't track mad vs fma */
3206 op
.lower_flrp16
= (chipset
>= NVISA_GV100_CHIPSET
);
3207 op
.lower_flrp32
= true;
3208 op
.lower_flrp64
= true;
3209 op
.lower_fpow
= false; // TODO: nir's lowering is broken, or we could use it
3210 op
.lower_fsat
= false;
3211 op
.lower_fsqrt
= false; // TODO: only before gm200
3212 op
.lower_sincos
= false;
3213 op
.lower_fmod
= true;
3214 op
.lower_bitfield_extract
= false;
3215 op
.lower_bitfield_extract_to_shifts
= (chipset
>= NVISA_GV100_CHIPSET
);
3216 op
.lower_bitfield_insert
= false;
3217 op
.lower_bitfield_insert_to_shifts
= (chipset
>= NVISA_GV100_CHIPSET
);
3218 op
.lower_bitfield_insert_to_bitfield_select
= false;
3219 op
.lower_bitfield_reverse
= false;
3220 op
.lower_bit_count
= false;
3221 op
.lower_ifind_msb
= false;
3222 op
.lower_find_lsb
= false;
3223 op
.lower_uadd_carry
= true; // TODO
3224 op
.lower_usub_borrow
= true; // TODO
3225 op
.lower_mul_high
= false;
3226 op
.lower_negate
= false;
3227 op
.lower_sub
= true;
3228 op
.lower_scmp
= true; // TODO: not implemented yet
3229 op
.lower_vector_cmp
= false;
3230 op
.lower_idiv
= true;
3231 op
.lower_bitops
= false;
3232 op
.lower_isign
= (chipset
>= NVISA_GV100_CHIPSET
);
3233 op
.lower_fsign
= (chipset
>= NVISA_GV100_CHIPSET
);
3234 op
.lower_fdph
= false;
3235 op
.lower_fdot
= false;
3236 op
.fdot_replicates
= false; // TODO
3237 op
.lower_ffloor
= false; // TODO
3238 op
.lower_ffract
= true;
3239 op
.lower_fceil
= false; // TODO
3240 op
.lower_ftrunc
= false;
3241 op
.lower_ldexp
= true;
3242 op
.lower_pack_half_2x16
= true;
3243 op
.lower_pack_unorm_2x16
= true;
3244 op
.lower_pack_snorm_2x16
= true;
3245 op
.lower_pack_unorm_4x8
= true;
3246 op
.lower_pack_snorm_4x8
= true;
3247 op
.lower_unpack_half_2x16
= true;
3248 op
.lower_unpack_unorm_2x16
= true;
3249 op
.lower_unpack_snorm_2x16
= true;
3250 op
.lower_unpack_unorm_4x8
= true;
3251 op
.lower_unpack_snorm_4x8
= true;
3252 op
.lower_pack_split
= false;
3253 op
.lower_extract_byte
= (chipset
< NVISA_GM107_CHIPSET
);
3254 op
.lower_extract_word
= (chipset
< NVISA_GM107_CHIPSET
);
3255 op
.lower_all_io_to_temps
= false;
3256 op
.lower_all_io_to_elements
= false;
3257 op
.vertex_id_zero_based
= false;
3258 op
.lower_base_vertex
= false;
3259 op
.lower_helper_invocation
= false;
3260 op
.optimize_sample_mask_in
= false;
3261 op
.lower_cs_local_index_from_id
= true;
3262 op
.lower_cs_local_id_from_index
= false;
3263 op
.lower_device_index_to_zero
= false; // TODO
3264 op
.lower_wpos_pntc
= false; // TODO
3265 op
.lower_hadd
= true; // TODO
3266 op
.lower_add_sat
= true; // TODO
3267 op
.vectorize_io
= false;
3268 op
.lower_to_scalar
= false;
3269 op
.unify_interfaces
= false;
3270 op
.use_interpolated_input_intrinsics
= true;
3271 op
.lower_mul_2x32_64
= true; // TODO
3272 op
.lower_rotate
= (chipset
< NVISA_GV100_CHIPSET
);
3273 op
.has_imul24
= false;
3274 op
.intel_vec4
= false;
3275 op
.max_unroll_iterations
= 32;
3276 op
.lower_int64_options
= (nir_lower_int64_options
) (
3277 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_imul64
: 0) |
3278 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_isign64
: 0) |
3279 nir_lower_divmod64
|
3280 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_imul_high64
: 0) |
3281 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_mov64
: 0) |
3282 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_icmp64
: 0) |
3283 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_iabs64
: 0) |
3284 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_ineg64
: 0) |
3285 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_logic64
: 0) |
3286 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_minmax64
: 0) |
3287 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_shift64
: 0) |
3288 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_imul_2x32_64
: 0) |
3289 ((chipset
>= NVISA_GM107_CHIPSET
) ? nir_lower_extract64
: 0) |
3290 nir_lower_ufind_msb64
3292 op
.lower_doubles_options
= (nir_lower_doubles_options
) (
3293 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_drcp
: 0) |
3294 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_dsqrt
: 0) |
3295 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_drsq
: 0) |
3296 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_dfract
: 0) |
3298 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_dsub
: 0) |
3299 ((chipset
>= NVISA_GV100_CHIPSET
) ? nir_lower_ddiv
: 0)
3304 static const nir_shader_compiler_options gf100_nir_shader_compiler_options
=
3305 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET
);
3306 static const nir_shader_compiler_options gm107_nir_shader_compiler_options
=
3307 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET
);
3308 static const nir_shader_compiler_options gv100_nir_shader_compiler_options
=
3309 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET
);
3311 const nir_shader_compiler_options
*
3312 nv50_ir_nir_shader_compiler_options(int chipset
)
3314 if (chipset
>= NVISA_GV100_CHIPSET
)
3315 return &gv100_nir_shader_compiler_options
;
3316 if (chipset
>= NVISA_GM107_CHIPSET
)
3317 return &gm107_nir_shader_compiler_options
;
3318 return &gf100_nir_shader_compiler_options
;