2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Karol Herbst <kherbst@redhat.com>
25 #include "compiler/nir/nir.h"
27 #include "util/u_debug.h"
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
38 #include <tr1/unordered_map>
46 #if __cplusplus >= 201103L
48 using std::unordered_map
;
51 using std::tr1::unordered_map
;
54 using namespace nv50_ir
;
57 type_size(const struct glsl_type
*type
, bool bindless
)
59 return glsl_count_attribute_slots(type
, false);
62 class Converter
: public ConverterCommon
65 Converter(Program
*, nir_shader
*, nv50_ir_prog_info
*);
69 typedef std::vector
<LValue
*> LValues
;
70 typedef unordered_map
<unsigned, LValues
> NirDefMap
;
71 typedef unordered_map
<unsigned, nir_load_const_instr
*> ImmediateMap
;
72 typedef unordered_map
<unsigned, uint32_t> NirArrayLMemOffsets
;
73 typedef unordered_map
<unsigned, BasicBlock
*> NirBlockMap
;
75 CacheMode
convert(enum gl_access_qualifier
);
76 TexTarget
convert(glsl_sampler_dim
, bool isArray
, bool isShadow
);
77 LValues
& convert(nir_alu_dest
*);
78 BasicBlock
* convert(nir_block
*);
79 LValues
& convert(nir_dest
*);
80 SVSemantic
convert(nir_intrinsic_op
);
81 Value
* convert(nir_load_const_instr
*, uint8_t);
82 LValues
& convert(nir_register
*);
83 LValues
& convert(nir_ssa_def
*);
85 Value
* getSrc(nir_alu_src
*, uint8_t component
= 0);
86 Value
* getSrc(nir_register
*, uint8_t);
87 Value
* getSrc(nir_src
*, uint8_t, bool indirect
= false);
88 Value
* getSrc(nir_ssa_def
*, uint8_t);
90 // returned value is the constant part of the given source (either the
91 // nir_src or the selected source component of an intrinsic). Even though
92 // this is mostly an optimization to be able to skip indirects in a few
93 // cases, sometimes we require immediate values or set some fileds on
94 // instructions (e.g. tex) in order for codegen to consume those.
95 // If the found value has not a constant part, the Value gets returned
96 // through the Value parameter.
97 uint32_t getIndirect(nir_src
*, uint8_t, Value
*&);
98 // isScalar indicates that the addressing is scalar, vec4 addressing is
100 uint32_t getIndirect(nir_intrinsic_instr
*, uint8_t s
, uint8_t c
, Value
*&,
101 bool isScalar
= false);
103 uint32_t getSlotAddress(nir_intrinsic_instr
*, uint8_t idx
, uint8_t slot
);
105 void setInterpolate(nv50_ir_varying
*,
110 Instruction
*loadFrom(DataFile
, uint8_t, DataType
, Value
*def
, uint32_t base
,
111 uint8_t c
, Value
*indirect0
= NULL
,
112 Value
*indirect1
= NULL
, bool patch
= false);
113 void storeTo(nir_intrinsic_instr
*, DataFile
, operation
, DataType
,
114 Value
*src
, uint8_t idx
, uint8_t c
, Value
*indirect0
= NULL
,
115 Value
*indirect1
= NULL
);
117 bool isFloatType(nir_alu_type
);
118 bool isSignedType(nir_alu_type
);
119 bool isResultFloat(nir_op
);
120 bool isResultSigned(nir_op
);
122 DataType
getDType(nir_alu_instr
*);
123 DataType
getDType(nir_intrinsic_instr
*);
124 DataType
getDType(nir_intrinsic_instr
*, bool isSigned
);
125 DataType
getDType(nir_op
, uint8_t);
127 std::vector
<DataType
> getSTypes(nir_alu_instr
*);
128 DataType
getSType(nir_src
&, bool isFloat
, bool isSigned
);
130 operation
getOperation(nir_intrinsic_op
);
131 operation
getOperation(nir_op
);
132 operation
getOperation(nir_texop
);
133 operation
preOperationNeeded(nir_op
);
135 int getSubOp(nir_intrinsic_op
);
136 int getSubOp(nir_op
);
138 CondCode
getCondCode(nir_op
);
143 bool visit(nir_alu_instr
*);
144 bool visit(nir_block
*);
145 bool visit(nir_cf_node
*);
146 bool visit(nir_deref_instr
*);
147 bool visit(nir_function
*);
148 bool visit(nir_if
*);
149 bool visit(nir_instr
*);
150 bool visit(nir_intrinsic_instr
*);
151 bool visit(nir_jump_instr
*);
152 bool visit(nir_load_const_instr
*);
153 bool visit(nir_loop
*);
154 bool visit(nir_ssa_undef_instr
*);
155 bool visit(nir_tex_instr
*);
158 Value
* applyProjection(Value
*src
, Value
*proj
);
159 unsigned int getNIRArgCount(TexInstruction::Target
&);
162 uint16_t handleDeref(nir_deref_instr
*, Value
* & indirect
, const nir_variable
* &);
163 CacheMode
getCacheModeFromVar(const nir_variable
*);
169 ImmediateMap immediates
;
170 NirArrayLMemOffsets regToLmemOffset
;
172 unsigned int curLoopDepth
;
176 Instruction
*immInsertPos
;
178 int clipVertexOutput
;
187 Converter::Converter(Program
*prog
, nir_shader
*nir
, nv50_ir_prog_info
*info
)
188 : ConverterCommon(prog
, info
),
193 zero
= mkImm((uint32_t)0);
197 Converter::convert(nir_block
*block
)
199 NirBlockMap::iterator it
= blocks
.find(block
->index
);
200 if (it
!= blocks
.end())
203 BasicBlock
*bb
= new BasicBlock(func
);
204 blocks
[block
->index
] = bb
;
209 Converter::isFloatType(nir_alu_type type
)
211 return nir_alu_type_get_base_type(type
) == nir_type_float
;
215 Converter::isSignedType(nir_alu_type type
)
217 return nir_alu_type_get_base_type(type
) == nir_type_int
;
221 Converter::isResultFloat(nir_op op
)
223 const nir_op_info
&info
= nir_op_infos
[op
];
224 if (info
.output_type
!= nir_type_invalid
)
225 return isFloatType(info
.output_type
);
227 ERROR("isResultFloat not implemented for %s\n", nir_op_infos
[op
].name
);
233 Converter::isResultSigned(nir_op op
)
236 // there is no umul and we get wrong results if we treat all muls as signed
241 const nir_op_info
&info
= nir_op_infos
[op
];
242 if (info
.output_type
!= nir_type_invalid
)
243 return isSignedType(info
.output_type
);
244 ERROR("isResultSigned not implemented for %s\n", nir_op_infos
[op
].name
);
251 Converter::getDType(nir_alu_instr
*insn
)
253 if (insn
->dest
.dest
.is_ssa
)
254 return getDType(insn
->op
, insn
->dest
.dest
.ssa
.bit_size
);
256 return getDType(insn
->op
, insn
->dest
.dest
.reg
.reg
->bit_size
);
260 Converter::getDType(nir_intrinsic_instr
*insn
)
263 switch (insn
->intrinsic
) {
264 case nir_intrinsic_shared_atomic_imax
:
265 case nir_intrinsic_shared_atomic_imin
:
266 case nir_intrinsic_ssbo_atomic_imax
:
267 case nir_intrinsic_ssbo_atomic_imin
:
275 return getDType(insn
, isSigned
);
279 Converter::getDType(nir_intrinsic_instr
*insn
, bool isSigned
)
281 if (insn
->dest
.is_ssa
)
282 return typeOfSize(insn
->dest
.ssa
.bit_size
/ 8, false, isSigned
);
284 return typeOfSize(insn
->dest
.reg
.reg
->bit_size
/ 8, false, isSigned
);
288 Converter::getDType(nir_op op
, uint8_t bitSize
)
290 DataType ty
= typeOfSize(bitSize
/ 8, isResultFloat(op
), isResultSigned(op
));
291 if (ty
== TYPE_NONE
) {
292 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos
[op
].name
, bitSize
);
298 std::vector
<DataType
>
299 Converter::getSTypes(nir_alu_instr
*insn
)
301 const nir_op_info
&info
= nir_op_infos
[insn
->op
];
302 std::vector
<DataType
> res(info
.num_inputs
);
304 for (uint8_t i
= 0; i
< info
.num_inputs
; ++i
) {
305 if (info
.input_types
[i
] != nir_type_invalid
) {
306 res
[i
] = getSType(insn
->src
[i
].src
, isFloatType(info
.input_types
[i
]), isSignedType(info
.input_types
[i
]));
308 ERROR("getSType not implemented for %s idx %u\n", info
.name
, i
);
319 Converter::getSType(nir_src
&src
, bool isFloat
, bool isSigned
)
323 bitSize
= src
.ssa
->bit_size
;
325 bitSize
= src
.reg
.reg
->bit_size
;
327 DataType ty
= typeOfSize(bitSize
/ 8, isFloat
, isSigned
);
328 if (ty
== TYPE_NONE
) {
336 ERROR("couldn't get Type for %s with bitSize %u\n", str
, bitSize
);
343 Converter::getOperation(nir_op op
)
346 // basic ops with float and int variants
355 case nir_op_ifind_msb
:
356 case nir_op_ufind_msb
:
378 case nir_op_fddx_coarse
:
379 case nir_op_fddx_fine
:
382 case nir_op_fddy_coarse
:
383 case nir_op_fddy_fine
:
401 case nir_op_pack_64_2x32_split
:
415 case nir_op_imul_high
:
416 case nir_op_umul_high
:
458 ERROR("couldn't get operation for op %s\n", nir_op_infos
[op
].name
);
465 Converter::getOperation(nir_texop op
)
477 case nir_texop_txf_ms
:
483 case nir_texop_query_levels
:
484 case nir_texop_texture_samples
:
488 ERROR("couldn't get operation for nir_texop %u\n", op
);
495 Converter::getOperation(nir_intrinsic_op op
)
498 case nir_intrinsic_emit_vertex
:
500 case nir_intrinsic_end_primitive
:
502 case nir_intrinsic_bindless_image_atomic_add
:
503 case nir_intrinsic_image_atomic_add
:
504 case nir_intrinsic_image_deref_atomic_add
:
505 case nir_intrinsic_bindless_image_atomic_and
:
506 case nir_intrinsic_image_atomic_and
:
507 case nir_intrinsic_image_deref_atomic_and
:
508 case nir_intrinsic_bindless_image_atomic_comp_swap
:
509 case nir_intrinsic_image_atomic_comp_swap
:
510 case nir_intrinsic_image_deref_atomic_comp_swap
:
511 case nir_intrinsic_bindless_image_atomic_exchange
:
512 case nir_intrinsic_image_atomic_exchange
:
513 case nir_intrinsic_image_deref_atomic_exchange
:
514 case nir_intrinsic_bindless_image_atomic_imax
:
515 case nir_intrinsic_image_atomic_imax
:
516 case nir_intrinsic_image_deref_atomic_imax
:
517 case nir_intrinsic_bindless_image_atomic_umax
:
518 case nir_intrinsic_image_atomic_umax
:
519 case nir_intrinsic_image_deref_atomic_umax
:
520 case nir_intrinsic_bindless_image_atomic_imin
:
521 case nir_intrinsic_image_atomic_imin
:
522 case nir_intrinsic_image_deref_atomic_imin
:
523 case nir_intrinsic_bindless_image_atomic_umin
:
524 case nir_intrinsic_image_atomic_umin
:
525 case nir_intrinsic_image_deref_atomic_umin
:
526 case nir_intrinsic_bindless_image_atomic_or
:
527 case nir_intrinsic_image_atomic_or
:
528 case nir_intrinsic_image_deref_atomic_or
:
529 case nir_intrinsic_bindless_image_atomic_xor
:
530 case nir_intrinsic_image_atomic_xor
:
531 case nir_intrinsic_image_deref_atomic_xor
:
533 case nir_intrinsic_bindless_image_load
:
534 case nir_intrinsic_image_load
:
535 case nir_intrinsic_image_deref_load
:
537 case nir_intrinsic_bindless_image_samples
:
538 case nir_intrinsic_image_samples
:
539 case nir_intrinsic_image_deref_samples
:
540 case nir_intrinsic_bindless_image_size
:
541 case nir_intrinsic_image_size
:
542 case nir_intrinsic_image_deref_size
:
544 case nir_intrinsic_bindless_image_store
:
545 case nir_intrinsic_image_store
:
546 case nir_intrinsic_image_deref_store
:
549 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op
);
556 Converter::preOperationNeeded(nir_op op
)
568 Converter::getSubOp(nir_op op
)
571 case nir_op_imul_high
:
572 case nir_op_umul_high
:
573 return NV50_IR_SUBOP_MUL_HIGH
;
580 Converter::getSubOp(nir_intrinsic_op op
)
583 case nir_intrinsic_bindless_image_atomic_add
:
584 case nir_intrinsic_global_atomic_add
:
585 case nir_intrinsic_image_atomic_add
:
586 case nir_intrinsic_image_deref_atomic_add
:
587 case nir_intrinsic_shared_atomic_add
:
588 case nir_intrinsic_ssbo_atomic_add
:
589 return NV50_IR_SUBOP_ATOM_ADD
;
590 case nir_intrinsic_bindless_image_atomic_and
:
591 case nir_intrinsic_global_atomic_and
:
592 case nir_intrinsic_image_atomic_and
:
593 case nir_intrinsic_image_deref_atomic_and
:
594 case nir_intrinsic_shared_atomic_and
:
595 case nir_intrinsic_ssbo_atomic_and
:
596 return NV50_IR_SUBOP_ATOM_AND
;
597 case nir_intrinsic_bindless_image_atomic_comp_swap
:
598 case nir_intrinsic_global_atomic_comp_swap
:
599 case nir_intrinsic_image_atomic_comp_swap
:
600 case nir_intrinsic_image_deref_atomic_comp_swap
:
601 case nir_intrinsic_shared_atomic_comp_swap
:
602 case nir_intrinsic_ssbo_atomic_comp_swap
:
603 return NV50_IR_SUBOP_ATOM_CAS
;
604 case nir_intrinsic_bindless_image_atomic_exchange
:
605 case nir_intrinsic_global_atomic_exchange
:
606 case nir_intrinsic_image_atomic_exchange
:
607 case nir_intrinsic_image_deref_atomic_exchange
:
608 case nir_intrinsic_shared_atomic_exchange
:
609 case nir_intrinsic_ssbo_atomic_exchange
:
610 return NV50_IR_SUBOP_ATOM_EXCH
;
611 case nir_intrinsic_bindless_image_atomic_or
:
612 case nir_intrinsic_global_atomic_or
:
613 case nir_intrinsic_image_atomic_or
:
614 case nir_intrinsic_image_deref_atomic_or
:
615 case nir_intrinsic_shared_atomic_or
:
616 case nir_intrinsic_ssbo_atomic_or
:
617 return NV50_IR_SUBOP_ATOM_OR
;
618 case nir_intrinsic_bindless_image_atomic_imax
:
619 case nir_intrinsic_bindless_image_atomic_umax
:
620 case nir_intrinsic_global_atomic_imax
:
621 case nir_intrinsic_global_atomic_umax
:
622 case nir_intrinsic_image_atomic_imax
:
623 case nir_intrinsic_image_atomic_umax
:
624 case nir_intrinsic_image_deref_atomic_imax
:
625 case nir_intrinsic_image_deref_atomic_umax
:
626 case nir_intrinsic_shared_atomic_imax
:
627 case nir_intrinsic_shared_atomic_umax
:
628 case nir_intrinsic_ssbo_atomic_imax
:
629 case nir_intrinsic_ssbo_atomic_umax
:
630 return NV50_IR_SUBOP_ATOM_MAX
;
631 case nir_intrinsic_bindless_image_atomic_imin
:
632 case nir_intrinsic_bindless_image_atomic_umin
:
633 case nir_intrinsic_global_atomic_imin
:
634 case nir_intrinsic_global_atomic_umin
:
635 case nir_intrinsic_image_atomic_imin
:
636 case nir_intrinsic_image_atomic_umin
:
637 case nir_intrinsic_image_deref_atomic_imin
:
638 case nir_intrinsic_image_deref_atomic_umin
:
639 case nir_intrinsic_shared_atomic_imin
:
640 case nir_intrinsic_shared_atomic_umin
:
641 case nir_intrinsic_ssbo_atomic_imin
:
642 case nir_intrinsic_ssbo_atomic_umin
:
643 return NV50_IR_SUBOP_ATOM_MIN
;
644 case nir_intrinsic_bindless_image_atomic_xor
:
645 case nir_intrinsic_global_atomic_xor
:
646 case nir_intrinsic_image_atomic_xor
:
647 case nir_intrinsic_image_deref_atomic_xor
:
648 case nir_intrinsic_shared_atomic_xor
:
649 case nir_intrinsic_ssbo_atomic_xor
:
650 return NV50_IR_SUBOP_ATOM_XOR
;
652 case nir_intrinsic_group_memory_barrier
:
653 case nir_intrinsic_memory_barrier
:
654 case nir_intrinsic_memory_barrier_buffer
:
655 case nir_intrinsic_memory_barrier_image
:
656 return NV50_IR_SUBOP_MEMBAR(M
, GL
);
657 case nir_intrinsic_memory_barrier_shared
:
658 return NV50_IR_SUBOP_MEMBAR(M
, CTA
);
660 case nir_intrinsic_vote_all
:
661 return NV50_IR_SUBOP_VOTE_ALL
;
662 case nir_intrinsic_vote_any
:
663 return NV50_IR_SUBOP_VOTE_ANY
;
664 case nir_intrinsic_vote_ieq
:
665 return NV50_IR_SUBOP_VOTE_UNI
;
672 Converter::getCondCode(nir_op op
)
691 ERROR("couldn't get CondCode for op %s\n", nir_op_infos
[op
].name
);
698 Converter::convert(nir_alu_dest
*dest
)
700 return convert(&dest
->dest
);
704 Converter::convert(nir_dest
*dest
)
707 return convert(&dest
->ssa
);
708 if (dest
->reg
.indirect
) {
709 ERROR("no support for indirects.");
712 return convert(dest
->reg
.reg
);
716 Converter::convert(nir_register
*reg
)
718 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
719 if (it
!= regDefs
.end())
722 LValues
newDef(reg
->num_components
);
723 for (uint8_t i
= 0; i
< reg
->num_components
; i
++)
724 newDef
[i
] = getScratch(std::max(4, reg
->bit_size
/ 8));
725 return regDefs
[reg
->index
] = newDef
;
729 Converter::convert(nir_ssa_def
*def
)
731 NirDefMap::iterator it
= ssaDefs
.find(def
->index
);
732 if (it
!= ssaDefs
.end())
735 LValues
newDef(def
->num_components
);
736 for (uint8_t i
= 0; i
< def
->num_components
; i
++)
737 newDef
[i
] = getSSA(std::max(4, def
->bit_size
/ 8));
738 return ssaDefs
[def
->index
] = newDef
;
742 Converter::getSrc(nir_alu_src
*src
, uint8_t component
)
744 if (src
->abs
|| src
->negate
) {
745 ERROR("modifiers currently not supported on nir_alu_src\n");
748 return getSrc(&src
->src
, src
->swizzle
[component
]);
752 Converter::getSrc(nir_register
*reg
, uint8_t idx
)
754 NirDefMap::iterator it
= regDefs
.find(reg
->index
);
755 if (it
== regDefs
.end())
756 return convert(reg
)[idx
];
757 return it
->second
[idx
];
761 Converter::getSrc(nir_src
*src
, uint8_t idx
, bool indirect
)
764 return getSrc(src
->ssa
, idx
);
766 if (src
->reg
.indirect
) {
768 return getSrc(src
->reg
.indirect
, idx
);
769 ERROR("no support for indirects.");
774 return getSrc(src
->reg
.reg
, idx
);
778 Converter::getSrc(nir_ssa_def
*src
, uint8_t idx
)
780 ImmediateMap::iterator iit
= immediates
.find(src
->index
);
781 if (iit
!= immediates
.end())
782 return convert((*iit
).second
, idx
);
784 NirDefMap::iterator it
= ssaDefs
.find(src
->index
);
785 if (it
== ssaDefs
.end()) {
786 ERROR("SSA value %u not found\n", src
->index
);
790 return it
->second
[idx
];
794 Converter::getIndirect(nir_src
*src
, uint8_t idx
, Value
*&indirect
)
796 nir_const_value
*offset
= nir_src_as_const_value(*src
);
800 return offset
[0].u32
;
803 indirect
= getSrc(src
, idx
, true);
808 Converter::getIndirect(nir_intrinsic_instr
*insn
, uint8_t s
, uint8_t c
, Value
*&indirect
, bool isScalar
)
810 int32_t idx
= nir_intrinsic_base(insn
) + getIndirect(&insn
->src
[s
], c
, indirect
);
811 if (indirect
&& !isScalar
)
812 indirect
= mkOp2v(OP_SHL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), indirect
, loadImm(NULL
, 4));
817 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot
, unsigned *name
, unsigned *index
)
819 assert(name
&& index
);
821 if (slot
>= VERT_ATTRIB_MAX
) {
822 ERROR("invalid varying slot %u\n", slot
);
827 if (slot
>= VERT_ATTRIB_GENERIC0
&&
828 slot
< VERT_ATTRIB_GENERIC0
+ VERT_ATTRIB_GENERIC_MAX
) {
829 *name
= TGSI_SEMANTIC_GENERIC
;
830 *index
= slot
- VERT_ATTRIB_GENERIC0
;
834 if (slot
>= VERT_ATTRIB_TEX0
&&
835 slot
< VERT_ATTRIB_TEX0
+ VERT_ATTRIB_TEX_MAX
) {
836 *name
= TGSI_SEMANTIC_TEXCOORD
;
837 *index
= slot
- VERT_ATTRIB_TEX0
;
842 case VERT_ATTRIB_COLOR0
:
843 *name
= TGSI_SEMANTIC_COLOR
;
846 case VERT_ATTRIB_COLOR1
:
847 *name
= TGSI_SEMANTIC_COLOR
;
850 case VERT_ATTRIB_EDGEFLAG
:
851 *name
= TGSI_SEMANTIC_EDGEFLAG
;
854 case VERT_ATTRIB_FOG
:
855 *name
= TGSI_SEMANTIC_FOG
;
858 case VERT_ATTRIB_NORMAL
:
859 *name
= TGSI_SEMANTIC_NORMAL
;
862 case VERT_ATTRIB_POS
:
863 *name
= TGSI_SEMANTIC_POSITION
;
866 case VERT_ATTRIB_POINT_SIZE
:
867 *name
= TGSI_SEMANTIC_PSIZE
;
871 ERROR("unknown vert attrib slot %u\n", slot
);
878 varying_slot_to_tgsi_semantic(gl_varying_slot slot
, unsigned *name
, unsigned *index
)
880 assert(name
&& index
);
882 if (slot
>= VARYING_SLOT_TESS_MAX
) {
883 ERROR("invalid varying slot %u\n", slot
);
888 if (slot
>= VARYING_SLOT_PATCH0
) {
889 *name
= TGSI_SEMANTIC_PATCH
;
890 *index
= slot
- VARYING_SLOT_PATCH0
;
894 if (slot
>= VARYING_SLOT_VAR0
) {
895 *name
= TGSI_SEMANTIC_GENERIC
;
896 *index
= slot
- VARYING_SLOT_VAR0
;
900 if (slot
>= VARYING_SLOT_TEX0
&& slot
<= VARYING_SLOT_TEX7
) {
901 *name
= TGSI_SEMANTIC_TEXCOORD
;
902 *index
= slot
- VARYING_SLOT_TEX0
;
907 case VARYING_SLOT_BFC0
:
908 *name
= TGSI_SEMANTIC_BCOLOR
;
911 case VARYING_SLOT_BFC1
:
912 *name
= TGSI_SEMANTIC_BCOLOR
;
915 case VARYING_SLOT_CLIP_DIST0
:
916 *name
= TGSI_SEMANTIC_CLIPDIST
;
919 case VARYING_SLOT_CLIP_DIST1
:
920 *name
= TGSI_SEMANTIC_CLIPDIST
;
923 case VARYING_SLOT_CLIP_VERTEX
:
924 *name
= TGSI_SEMANTIC_CLIPVERTEX
;
927 case VARYING_SLOT_COL0
:
928 *name
= TGSI_SEMANTIC_COLOR
;
931 case VARYING_SLOT_COL1
:
932 *name
= TGSI_SEMANTIC_COLOR
;
935 case VARYING_SLOT_EDGE
:
936 *name
= TGSI_SEMANTIC_EDGEFLAG
;
939 case VARYING_SLOT_FACE
:
940 *name
= TGSI_SEMANTIC_FACE
;
943 case VARYING_SLOT_FOGC
:
944 *name
= TGSI_SEMANTIC_FOG
;
947 case VARYING_SLOT_LAYER
:
948 *name
= TGSI_SEMANTIC_LAYER
;
951 case VARYING_SLOT_PNTC
:
952 *name
= TGSI_SEMANTIC_PCOORD
;
955 case VARYING_SLOT_POS
:
956 *name
= TGSI_SEMANTIC_POSITION
;
959 case VARYING_SLOT_PRIMITIVE_ID
:
960 *name
= TGSI_SEMANTIC_PRIMID
;
963 case VARYING_SLOT_PSIZ
:
964 *name
= TGSI_SEMANTIC_PSIZE
;
967 case VARYING_SLOT_TESS_LEVEL_INNER
:
968 *name
= TGSI_SEMANTIC_TESSINNER
;
971 case VARYING_SLOT_TESS_LEVEL_OUTER
:
972 *name
= TGSI_SEMANTIC_TESSOUTER
;
975 case VARYING_SLOT_VIEWPORT
:
976 *name
= TGSI_SEMANTIC_VIEWPORT_INDEX
;
980 ERROR("unknown varying slot %u\n", slot
);
987 frag_result_to_tgsi_semantic(unsigned slot
, unsigned *name
, unsigned *index
)
989 if (slot
>= FRAG_RESULT_DATA0
) {
990 *name
= TGSI_SEMANTIC_COLOR
;
991 *index
= slot
- FRAG_RESULT_COLOR
- 2; // intentional
996 case FRAG_RESULT_COLOR
:
997 *name
= TGSI_SEMANTIC_COLOR
;
1000 case FRAG_RESULT_DEPTH
:
1001 *name
= TGSI_SEMANTIC_POSITION
;
1004 case FRAG_RESULT_SAMPLE_MASK
:
1005 *name
= TGSI_SEMANTIC_SAMPLEMASK
;
1009 ERROR("unknown frag result slot %u\n", slot
);
1016 Converter::setInterpolate(nv50_ir_varying
*var
,
1022 case INTERP_MODE_FLAT
:
1025 case INTERP_MODE_NONE
:
1026 if (semantic
== TGSI_SEMANTIC_COLOR
)
1028 else if (semantic
== TGSI_SEMANTIC_POSITION
)
1031 case INTERP_MODE_NOPERSPECTIVE
:
1034 case INTERP_MODE_SMOOTH
:
1037 var
->centroid
= centroid
;
1041 calcSlots(const glsl_type
*type
, Program::Type stage
, const shader_info
&info
,
1042 bool input
, const nir_variable
*var
)
1044 if (!type
->is_array())
1045 return type
->count_attribute_slots(false);
1049 case Program::TYPE_GEOMETRY
:
1050 slots
= type
->uniform_locations();
1052 slots
/= info
.gs
.vertices_in
;
1054 case Program::TYPE_TESSELLATION_CONTROL
:
1055 case Program::TYPE_TESSELLATION_EVAL
:
1056 // remove first dimension
1057 if (var
->data
.patch
|| (!input
&& stage
== Program::TYPE_TESSELLATION_EVAL
))
1058 slots
= type
->uniform_locations();
1060 slots
= type
->fields
.array
->uniform_locations();
1063 slots
= type
->count_attribute_slots(false);
1070 bool Converter::assignSlots() {
1074 info
->io
.viewportId
= -1;
1075 info
->numInputs
= 0;
1076 info
->numOutputs
= 0;
1078 // we have to fixup the uniform locations for arrays
1079 unsigned numImages
= 0;
1080 nir_foreach_variable(var
, &nir
->uniforms
) {
1081 const glsl_type
*type
= var
->type
;
1082 if (!type
->without_array()->is_image())
1084 var
->data
.driver_location
= numImages
;
1085 numImages
+= type
->is_array() ? type
->arrays_of_arrays_size() : 1;
1088 info
->numSysVals
= 0;
1089 for (uint8_t i
= 0; i
< SYSTEM_VALUE_MAX
; ++i
) {
1090 if (!(nir
->info
.system_values_read
& 1ull << i
))
1093 info
->sv
[info
->numSysVals
].sn
= tgsi_get_sysval_semantic(i
);
1094 info
->sv
[info
->numSysVals
].si
= 0;
1095 info
->sv
[info
->numSysVals
].input
= 0; // TODO inferSysValDirection(sn);
1098 case SYSTEM_VALUE_INSTANCE_ID
:
1099 info
->io
.instanceId
= info
->numSysVals
;
1101 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1102 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1103 info
->sv
[info
->numSysVals
].patch
= 1;
1105 case SYSTEM_VALUE_VERTEX_ID
:
1106 info
->io
.vertexId
= info
->numSysVals
;
1112 info
->numSysVals
+= 1;
1115 if (prog
->getType() == Program::TYPE_COMPUTE
)
1118 nir_foreach_variable(var
, &nir
->inputs
) {
1119 const glsl_type
*type
= var
->type
;
1120 int slot
= var
->data
.location
;
1121 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, true, var
);
1122 uint32_t comp
= type
->is_array() ? type
->without_array()->component_slots()
1123 : type
->component_slots();
1124 uint32_t frac
= var
->data
.location_frac
;
1125 uint32_t vary
= var
->data
.driver_location
;
1127 if (glsl_base_type_is_64bit(type
->without_array()->base_type
)) {
1132 assert(vary
+ slots
<= PIPE_MAX_SHADER_INPUTS
);
1134 switch(prog
->getType()) {
1135 case Program::TYPE_FRAGMENT
:
1136 varying_slot_to_tgsi_semantic((gl_varying_slot
)slot
, &name
, &index
);
1137 for (uint16_t i
= 0; i
< slots
; ++i
) {
1138 setInterpolate(&info
->in
[vary
+ i
], var
->data
.interpolation
,
1139 var
->data
.centroid
| var
->data
.sample
, name
);
1142 case Program::TYPE_GEOMETRY
:
1143 varying_slot_to_tgsi_semantic((gl_varying_slot
)slot
, &name
, &index
);
1145 case Program::TYPE_TESSELLATION_CONTROL
:
1146 case Program::TYPE_TESSELLATION_EVAL
:
1147 varying_slot_to_tgsi_semantic((gl_varying_slot
)slot
, &name
, &index
);
1148 if (var
->data
.patch
&& name
== TGSI_SEMANTIC_PATCH
)
1149 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1151 case Program::TYPE_VERTEX
:
1152 vert_attrib_to_tgsi_semantic((gl_vert_attrib
)slot
, &name
, &index
);
1154 case TGSI_SEMANTIC_EDGEFLAG
:
1155 info
->io
.edgeFlagIn
= vary
;
1162 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1166 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1167 info
->in
[vary
].id
= vary
;
1168 info
->in
[vary
].patch
= var
->data
.patch
;
1169 info
->in
[vary
].sn
= name
;
1170 info
->in
[vary
].si
= index
+ i
;
1171 if (glsl_base_type_is_64bit(type
->without_array()->base_type
))
1173 info
->in
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) >> 0x4);
1175 info
->in
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) & 0xf);
1177 info
->in
[vary
].mask
|= ((1 << comp
) - 1) << frac
;
1179 info
->numInputs
= std::max
<uint8_t>(info
->numInputs
, vary
);
1182 nir_foreach_variable(var
, &nir
->outputs
) {
1183 const glsl_type
*type
= var
->type
;
1184 int slot
= var
->data
.location
;
1185 uint16_t slots
= calcSlots(type
, prog
->getType(), nir
->info
, false, var
);
1186 uint32_t comp
= type
->is_array() ? type
->without_array()->component_slots()
1187 : type
->component_slots();
1188 uint32_t frac
= var
->data
.location_frac
;
1189 uint32_t vary
= var
->data
.driver_location
;
1191 if (glsl_base_type_is_64bit(type
->without_array()->base_type
)) {
1196 assert(vary
< PIPE_MAX_SHADER_OUTPUTS
);
1198 switch(prog
->getType()) {
1199 case Program::TYPE_FRAGMENT
:
1200 frag_result_to_tgsi_semantic((gl_frag_result
)slot
, &name
, &index
);
1202 case TGSI_SEMANTIC_COLOR
:
1203 if (!var
->data
.fb_fetch_output
)
1204 info
->prop
.fp
.numColourResults
++;
1205 info
->prop
.fp
.separateFragData
= true;
1206 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1207 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1208 index
= index
== 0 ? var
->data
.index
: index
;
1210 case TGSI_SEMANTIC_POSITION
:
1211 info
->io
.fragDepth
= vary
;
1212 info
->prop
.fp
.writesDepth
= true;
1214 case TGSI_SEMANTIC_SAMPLEMASK
:
1215 info
->io
.sampleMask
= vary
;
1221 case Program::TYPE_GEOMETRY
:
1222 case Program::TYPE_TESSELLATION_CONTROL
:
1223 case Program::TYPE_TESSELLATION_EVAL
:
1224 case Program::TYPE_VERTEX
:
1225 varying_slot_to_tgsi_semantic((gl_varying_slot
)slot
, &name
, &index
);
1227 if (var
->data
.patch
&& name
!= TGSI_SEMANTIC_TESSINNER
&&
1228 name
!= TGSI_SEMANTIC_TESSOUTER
)
1229 info
->numPatchConstants
= MAX2(info
->numPatchConstants
, index
+ slots
);
1232 case TGSI_SEMANTIC_CLIPDIST
:
1233 info
->io
.genUserClip
= -1;
1235 case TGSI_SEMANTIC_CLIPVERTEX
:
1236 clipVertexOutput
= vary
;
1238 case TGSI_SEMANTIC_EDGEFLAG
:
1239 info
->io
.edgeFlagOut
= vary
;
1241 case TGSI_SEMANTIC_POSITION
:
1242 if (clipVertexOutput
< 0)
1243 clipVertexOutput
= vary
;
1250 ERROR("unknown shader type %u in assignSlots\n", prog
->getType());
1254 for (uint16_t i
= 0u; i
< slots
; ++i
, ++vary
) {
1255 info
->out
[vary
].id
= vary
;
1256 info
->out
[vary
].patch
= var
->data
.patch
;
1257 info
->out
[vary
].sn
= name
;
1258 info
->out
[vary
].si
= index
+ i
;
1259 if (glsl_base_type_is_64bit(type
->without_array()->base_type
))
1261 info
->out
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) >> 0x4);
1263 info
->out
[vary
].mask
|= (((1 << (comp
* 2)) - 1) << (frac
* 2) & 0xf);
1265 info
->out
[vary
].mask
|= ((1 << comp
) - 1) << frac
;
1267 if (nir
->info
.outputs_read
& 1ull << slot
)
1268 info
->out
[vary
].oread
= 1;
1270 info
->numOutputs
= std::max
<uint8_t>(info
->numOutputs
, vary
);
1273 if (info
->io
.genUserClip
> 0) {
1274 info
->io
.clipDistances
= info
->io
.genUserClip
;
1276 const unsigned int nOut
= (info
->io
.genUserClip
+ 3) / 4;
1278 for (unsigned int n
= 0; n
< nOut
; ++n
) {
1279 unsigned int i
= info
->numOutputs
++;
1280 info
->out
[i
].id
= i
;
1281 info
->out
[i
].sn
= TGSI_SEMANTIC_CLIPDIST
;
1282 info
->out
[i
].si
= n
;
1283 info
->out
[i
].mask
= ((1 << info
->io
.clipDistances
) - 1) >> (n
* 4);
1287 return info
->assignSlots(info
) == 0;
1291 Converter::getSlotAddress(nir_intrinsic_instr
*insn
, uint8_t idx
, uint8_t slot
)
1294 int offset
= nir_intrinsic_component(insn
);
1297 if (nir_intrinsic_infos
[insn
->intrinsic
].has_dest
)
1298 ty
= getDType(insn
);
1300 ty
= getSType(insn
->src
[0], false, false);
1302 switch (insn
->intrinsic
) {
1303 case nir_intrinsic_load_input
:
1304 case nir_intrinsic_load_interpolated_input
:
1305 case nir_intrinsic_load_per_vertex_input
:
1308 case nir_intrinsic_load_output
:
1309 case nir_intrinsic_load_per_vertex_output
:
1310 case nir_intrinsic_store_output
:
1311 case nir_intrinsic_store_per_vertex_output
:
1315 ERROR("unknown intrinsic in getSlotAddress %s",
1316 nir_intrinsic_infos
[insn
->intrinsic
].name
);
1322 if (typeSizeof(ty
) == 8) {
1334 assert(!input
|| idx
< PIPE_MAX_SHADER_INPUTS
);
1335 assert(input
|| idx
< PIPE_MAX_SHADER_OUTPUTS
);
1337 const nv50_ir_varying
*vary
= input
? info
->in
: info
->out
;
1338 return vary
[idx
].slot
[slot
] * 4;
1342 Converter::loadFrom(DataFile file
, uint8_t i
, DataType ty
, Value
*def
,
1343 uint32_t base
, uint8_t c
, Value
*indirect0
,
1344 Value
*indirect1
, bool patch
)
1346 unsigned int tySize
= typeSizeof(ty
);
1349 (file
== FILE_MEMORY_CONST
|| file
== FILE_MEMORY_BUFFER
|| indirect0
)) {
1350 Value
*lo
= getSSA();
1351 Value
*hi
= getSSA();
1354 mkLoad(TYPE_U32
, lo
,
1355 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
),
1357 loi
->setIndirect(0, 1, indirect1
);
1358 loi
->perPatch
= patch
;
1361 mkLoad(TYPE_U32
, hi
,
1362 mkSymbol(file
, i
, TYPE_U32
, base
+ c
* tySize
+ 4),
1364 hii
->setIndirect(0, 1, indirect1
);
1365 hii
->perPatch
= patch
;
1367 return mkOp2(OP_MERGE
, ty
, def
, lo
, hi
);
1370 mkLoad(ty
, def
, mkSymbol(file
, i
, ty
, base
+ c
* tySize
), indirect0
);
1371 ld
->setIndirect(0, 1, indirect1
);
1372 ld
->perPatch
= patch
;
1378 Converter::storeTo(nir_intrinsic_instr
*insn
, DataFile file
, operation op
,
1379 DataType ty
, Value
*src
, uint8_t idx
, uint8_t c
,
1380 Value
*indirect0
, Value
*indirect1
)
1382 uint8_t size
= typeSizeof(ty
);
1383 uint32_t address
= getSlotAddress(insn
, idx
, c
);
1385 if (size
== 8 && indirect0
) {
1387 mkSplit(split
, 4, src
);
1389 if (op
== OP_EXPORT
) {
1390 split
[0] = mkMov(getSSA(), split
[0], ty
)->getDef(0);
1391 split
[1] = mkMov(getSSA(), split
[1], ty
)->getDef(0);
1394 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
), indirect0
,
1395 split
[0])->perPatch
= info
->out
[idx
].patch
;
1396 mkStore(op
, TYPE_U32
, mkSymbol(file
, 0, TYPE_U32
, address
+ 4), indirect0
,
1397 split
[1])->perPatch
= info
->out
[idx
].patch
;
1399 if (op
== OP_EXPORT
)
1400 src
= mkMov(getSSA(size
), src
, ty
)->getDef(0);
1401 mkStore(op
, ty
, mkSymbol(file
, 0, ty
, address
), indirect0
,
1402 src
)->perPatch
= info
->out
[idx
].patch
;
1407 Converter::parseNIR()
1409 info
->bin
.tlsSpace
= 0;
1410 info
->io
.clipDistances
= nir
->info
.clip_distance_array_size
;
1411 info
->io
.cullDistances
= nir
->info
.cull_distance_array_size
;
1413 switch(prog
->getType()) {
1414 case Program::TYPE_COMPUTE
:
1415 info
->prop
.cp
.numThreads
[0] = nir
->info
.cs
.local_size
[0];
1416 info
->prop
.cp
.numThreads
[1] = nir
->info
.cs
.local_size
[1];
1417 info
->prop
.cp
.numThreads
[2] = nir
->info
.cs
.local_size
[2];
1418 info
->bin
.smemSize
= nir
->info
.cs
.shared_size
;
1420 case Program::TYPE_FRAGMENT
:
1421 info
->prop
.fp
.earlyFragTests
= nir
->info
.fs
.early_fragment_tests
;
1422 info
->prop
.fp
.persampleInvocation
=
1423 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_ID
) ||
1424 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1425 info
->prop
.fp
.postDepthCoverage
= nir
->info
.fs
.post_depth_coverage
;
1426 info
->prop
.fp
.readsSampleLocations
=
1427 (nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_POS
);
1428 info
->prop
.fp
.usesDiscard
= nir
->info
.fs
.uses_discard
;
1429 info
->prop
.fp
.usesSampleMaskIn
=
1430 !!(nir
->info
.system_values_read
& SYSTEM_BIT_SAMPLE_MASK_IN
);
1432 case Program::TYPE_GEOMETRY
:
1433 info
->prop
.gp
.inputPrim
= nir
->info
.gs
.input_primitive
;
1434 info
->prop
.gp
.instanceCount
= nir
->info
.gs
.invocations
;
1435 info
->prop
.gp
.maxVertices
= nir
->info
.gs
.vertices_out
;
1436 info
->prop
.gp
.outputPrim
= nir
->info
.gs
.output_primitive
;
1438 case Program::TYPE_TESSELLATION_CONTROL
:
1439 case Program::TYPE_TESSELLATION_EVAL
:
1440 if (nir
->info
.tess
.primitive_mode
== GL_ISOLINES
)
1441 info
->prop
.tp
.domain
= GL_LINES
;
1443 info
->prop
.tp
.domain
= nir
->info
.tess
.primitive_mode
;
1444 info
->prop
.tp
.outputPatchSize
= nir
->info
.tess
.tcs_vertices_out
;
1445 info
->prop
.tp
.outputPrim
=
1446 nir
->info
.tess
.point_mode
? PIPE_PRIM_POINTS
: PIPE_PRIM_TRIANGLES
;
1447 info
->prop
.tp
.partitioning
= (nir
->info
.tess
.spacing
+ 1) % 3;
1448 info
->prop
.tp
.winding
= !nir
->info
.tess
.ccw
;
1450 case Program::TYPE_VERTEX
:
1451 info
->prop
.vp
.usesDrawParameters
=
1452 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_VERTEX
)) ||
1453 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_BASE_INSTANCE
)) ||
1454 (nir
->info
.system_values_read
& BITFIELD64_BIT(SYSTEM_VALUE_DRAW_ID
));
1464 Converter::visit(nir_function
*function
)
1466 assert(function
->impl
);
1468 // usually the blocks will set everything up, but main is special
1469 BasicBlock
*entry
= new BasicBlock(prog
->main
);
1470 exit
= new BasicBlock(prog
->main
);
1471 blocks
[nir_start_block(function
->impl
)->index
] = entry
;
1472 prog
->main
->setEntry(entry
);
1473 prog
->main
->setExit(exit
);
1475 setPosition(entry
, true);
1477 if (info
->io
.genUserClip
> 0) {
1478 for (int c
= 0; c
< 4; ++c
)
1479 clipVtx
[c
] = getScratch();
1482 switch (prog
->getType()) {
1483 case Program::TYPE_TESSELLATION_CONTROL
:
1485 OP_SUB
, TYPE_U32
, getSSA(),
1486 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LANEID
, 0)),
1487 mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_INVOCATION_ID
, 0)));
1489 case Program::TYPE_FRAGMENT
: {
1490 Symbol
*sv
= mkSysVal(SV_POSITION
, 3);
1491 fragCoord
[3] = mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), sv
);
1492 fp
.position
= mkOp1v(OP_RCP
, TYPE_F32
, fragCoord
[3], fragCoord
[3]);
1499 nir_foreach_register(reg
, &function
->impl
->registers
) {
1500 if (reg
->num_array_elems
) {
1501 // TODO: packed variables would be nice, but MemoryOpt fails
1502 // replace 4 with reg->num_components
1503 uint32_t size
= 4 * reg
->num_array_elems
* (reg
->bit_size
/ 8);
1504 regToLmemOffset
[reg
->index
] = info
->bin
.tlsSpace
;
1505 info
->bin
.tlsSpace
+= size
;
1509 nir_index_ssa_defs(function
->impl
);
1510 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
1515 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::TREE
);
1516 setPosition(exit
, true);
1518 if ((prog
->getType() == Program::TYPE_VERTEX
||
1519 prog
->getType() == Program::TYPE_TESSELLATION_EVAL
)
1520 && info
->io
.genUserClip
> 0)
1521 handleUserClipPlanes();
1523 // TODO: for non main function this needs to be a OP_RETURN
1524 mkOp(OP_EXIT
, TYPE_NONE
, NULL
)->terminator
= 1;
1529 Converter::visit(nir_cf_node
*node
)
1531 switch (node
->type
) {
1532 case nir_cf_node_block
:
1533 return visit(nir_cf_node_as_block(node
));
1534 case nir_cf_node_if
:
1535 return visit(nir_cf_node_as_if(node
));
1536 case nir_cf_node_loop
:
1537 return visit(nir_cf_node_as_loop(node
));
1539 ERROR("unknown nir_cf_node type %u\n", node
->type
);
1545 Converter::visit(nir_block
*block
)
1547 if (!block
->predecessors
->entries
&& block
->instr_list
.is_empty())
1550 BasicBlock
*bb
= convert(block
);
1552 setPosition(bb
, true);
1553 nir_foreach_instr(insn
, block
) {
1561 Converter::visit(nir_if
*nif
)
1563 DataType sType
= getSType(nif
->condition
, false, false);
1564 Value
*src
= getSrc(&nif
->condition
, 0);
1566 nir_block
*lastThen
= nir_if_last_then_block(nif
);
1567 nir_block
*lastElse
= nir_if_last_else_block(nif
);
1569 assert(!lastThen
->successors
[1]);
1570 assert(!lastElse
->successors
[1]);
1572 BasicBlock
*ifBB
= convert(nir_if_first_then_block(nif
));
1573 BasicBlock
*elseBB
= convert(nir_if_first_else_block(nif
));
1575 bb
->cfg
.attach(&ifBB
->cfg
, Graph::Edge::TREE
);
1576 bb
->cfg
.attach(&elseBB
->cfg
, Graph::Edge::TREE
);
1578 // we only insert joinats, if both nodes end up at the end of the if again.
1579 // the reason for this to not happens are breaks/continues/ret/... which
1580 // have their own handling
1581 if (lastThen
->successors
[0] == lastElse
->successors
[0])
1582 bb
->joinAt
= mkFlow(OP_JOINAT
, convert(lastThen
->successors
[0]),
1585 mkFlow(OP_BRA
, elseBB
, CC_EQ
, src
)->setType(sType
);
1587 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->then_list
) {
1591 setPosition(convert(lastThen
), true);
1592 if (!bb
->getExit() ||
1593 !bb
->getExit()->asFlow() ||
1594 bb
->getExit()->asFlow()->op
== OP_JOIN
) {
1595 BasicBlock
*tailBB
= convert(lastThen
->successors
[0]);
1596 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1597 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1600 foreach_list_typed(nir_cf_node
, node
, node
, &nif
->else_list
) {
1604 setPosition(convert(lastElse
), true);
1605 if (!bb
->getExit() ||
1606 !bb
->getExit()->asFlow() ||
1607 bb
->getExit()->asFlow()->op
== OP_JOIN
) {
1608 BasicBlock
*tailBB
= convert(lastElse
->successors
[0]);
1609 mkFlow(OP_BRA
, tailBB
, CC_ALWAYS
, NULL
);
1610 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::FORWARD
);
1613 if (lastThen
->successors
[0] == lastElse
->successors
[0]) {
1614 setPosition(convert(lastThen
->successors
[0]), true);
1615 mkFlow(OP_JOIN
, NULL
, CC_ALWAYS
, NULL
)->fixed
= 1;
1622 Converter::visit(nir_loop
*loop
)
1625 func
->loopNestingBound
= std::max(func
->loopNestingBound
, curLoopDepth
);
1627 BasicBlock
*loopBB
= convert(nir_loop_first_block(loop
));
1628 BasicBlock
*tailBB
=
1629 convert(nir_cf_node_as_block(nir_cf_node_next(&loop
->cf_node
)));
1630 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::TREE
);
1632 mkFlow(OP_PREBREAK
, tailBB
, CC_ALWAYS
, NULL
);
1633 setPosition(loopBB
, false);
1634 mkFlow(OP_PRECONT
, loopBB
, CC_ALWAYS
, NULL
);
1636 foreach_list_typed(nir_cf_node
, node
, node
, &loop
->body
) {
1640 Instruction
*insn
= bb
->getExit();
1641 if (bb
->cfg
.incidentCount() != 0) {
1642 if (!insn
|| !insn
->asFlow()) {
1643 mkFlow(OP_CONT
, loopBB
, CC_ALWAYS
, NULL
);
1644 bb
->cfg
.attach(&loopBB
->cfg
, Graph::Edge::BACK
);
1645 } else if (insn
&& insn
->op
== OP_BRA
&& !insn
->getPredicate() &&
1646 tailBB
->cfg
.incidentCount() == 0) {
1647 // RA doesn't like having blocks around with no incident edge,
1648 // so we create a fake one to make it happy
1649 bb
->cfg
.attach(&tailBB
->cfg
, Graph::Edge::TREE
);
1659 Converter::visit(nir_instr
*insn
)
1661 // we need an insertion point for on the fly generated immediate loads
1662 immInsertPos
= bb
->getExit();
1663 switch (insn
->type
) {
1664 case nir_instr_type_alu
:
1665 return visit(nir_instr_as_alu(insn
));
1666 case nir_instr_type_deref
:
1667 return visit(nir_instr_as_deref(insn
));
1668 case nir_instr_type_intrinsic
:
1669 return visit(nir_instr_as_intrinsic(insn
));
1670 case nir_instr_type_jump
:
1671 return visit(nir_instr_as_jump(insn
));
1672 case nir_instr_type_load_const
:
1673 return visit(nir_instr_as_load_const(insn
));
1674 case nir_instr_type_ssa_undef
:
1675 return visit(nir_instr_as_ssa_undef(insn
));
1676 case nir_instr_type_tex
:
1677 return visit(nir_instr_as_tex(insn
));
1679 ERROR("unknown nir_instr type %u\n", insn
->type
);
1686 Converter::convert(nir_intrinsic_op intr
)
1689 case nir_intrinsic_load_base_vertex
:
1690 return SV_BASEVERTEX
;
1691 case nir_intrinsic_load_base_instance
:
1692 return SV_BASEINSTANCE
;
1693 case nir_intrinsic_load_draw_id
:
1695 case nir_intrinsic_load_front_face
:
1697 case nir_intrinsic_load_helper_invocation
:
1698 return SV_THREAD_KILL
;
1699 case nir_intrinsic_load_instance_id
:
1700 return SV_INSTANCE_ID
;
1701 case nir_intrinsic_load_invocation_id
:
1702 return SV_INVOCATION_ID
;
1703 case nir_intrinsic_load_local_group_size
:
1705 case nir_intrinsic_load_local_invocation_id
:
1707 case nir_intrinsic_load_num_work_groups
:
1709 case nir_intrinsic_load_patch_vertices_in
:
1710 return SV_VERTEX_COUNT
;
1711 case nir_intrinsic_load_primitive_id
:
1712 return SV_PRIMITIVE_ID
;
1713 case nir_intrinsic_load_sample_id
:
1714 return SV_SAMPLE_INDEX
;
1715 case nir_intrinsic_load_sample_mask_in
:
1716 return SV_SAMPLE_MASK
;
1717 case nir_intrinsic_load_sample_pos
:
1718 return SV_SAMPLE_POS
;
1719 case nir_intrinsic_load_subgroup_eq_mask
:
1720 return SV_LANEMASK_EQ
;
1721 case nir_intrinsic_load_subgroup_ge_mask
:
1722 return SV_LANEMASK_GE
;
1723 case nir_intrinsic_load_subgroup_gt_mask
:
1724 return SV_LANEMASK_GT
;
1725 case nir_intrinsic_load_subgroup_le_mask
:
1726 return SV_LANEMASK_LE
;
1727 case nir_intrinsic_load_subgroup_lt_mask
:
1728 return SV_LANEMASK_LT
;
1729 case nir_intrinsic_load_subgroup_invocation
:
1731 case nir_intrinsic_load_tess_coord
:
1732 return SV_TESS_COORD
;
1733 case nir_intrinsic_load_tess_level_inner
:
1734 return SV_TESS_INNER
;
1735 case nir_intrinsic_load_tess_level_outer
:
1736 return SV_TESS_OUTER
;
1737 case nir_intrinsic_load_vertex_id
:
1738 return SV_VERTEX_ID
;
1739 case nir_intrinsic_load_work_group_id
:
1742 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1743 nir_intrinsic_infos
[intr
].name
);
1750 Converter::visit(nir_intrinsic_instr
*insn
)
1752 nir_intrinsic_op op
= insn
->intrinsic
;
1753 const nir_intrinsic_info
&opInfo
= nir_intrinsic_infos
[op
];
1756 case nir_intrinsic_load_uniform
: {
1757 LValues
&newDefs
= convert(&insn
->dest
);
1758 const DataType dType
= getDType(insn
);
1760 uint32_t coffset
= getIndirect(insn
, 0, 0, indirect
);
1761 for (uint8_t i
= 0; i
< insn
->num_components
; ++i
) {
1762 loadFrom(FILE_MEMORY_CONST
, 0, dType
, newDefs
[i
], 16 * coffset
, i
, indirect
);
1766 case nir_intrinsic_store_output
:
1767 case nir_intrinsic_store_per_vertex_output
: {
1769 DataType dType
= getSType(insn
->src
[0], false, false);
1770 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_store_output
? 1 : 2, 0, indirect
);
1772 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
1773 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
1777 Value
*src
= getSrc(&insn
->src
[0], i
);
1778 switch (prog
->getType()) {
1779 case Program::TYPE_FRAGMENT
: {
1780 if (info
->out
[idx
].sn
== TGSI_SEMANTIC_POSITION
) {
1781 // TGSI uses a different interface than NIR, TGSI stores that
1782 // value in the z component, NIR in X
1784 src
= mkOp1v(OP_SAT
, TYPE_F32
, getScratch(), src
);
1788 case Program::TYPE_GEOMETRY
:
1789 case Program::TYPE_VERTEX
: {
1790 if (info
->io
.genUserClip
> 0 && idx
== (uint32_t)clipVertexOutput
) {
1791 mkMov(clipVtx
[i
], src
);
1800 storeTo(insn
, FILE_SHADER_OUTPUT
, OP_EXPORT
, dType
, src
, idx
, i
+ offset
, indirect
);
1804 case nir_intrinsic_load_input
:
1805 case nir_intrinsic_load_interpolated_input
:
1806 case nir_intrinsic_load_output
: {
1807 LValues
&newDefs
= convert(&insn
->dest
);
1810 if (prog
->getType() == Program::TYPE_FRAGMENT
&&
1811 op
== nir_intrinsic_load_output
) {
1812 std::vector
<Value
*> defs
, srcs
;
1815 srcs
.push_back(getSSA());
1816 srcs
.push_back(getSSA());
1817 Value
*x
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 0));
1818 Value
*y
= mkOp1v(OP_RDSV
, TYPE_F32
, getSSA(), mkSysVal(SV_POSITION
, 1));
1819 mkCvt(OP_CVT
, TYPE_U32
, srcs
[0], TYPE_F32
, x
)->rnd
= ROUND_Z
;
1820 mkCvt(OP_CVT
, TYPE_U32
, srcs
[1], TYPE_F32
, y
)->rnd
= ROUND_Z
;
1822 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_LAYER
, 0)));
1823 srcs
.push_back(mkOp1v(OP_RDSV
, TYPE_U32
, getSSA(), mkSysVal(SV_SAMPLE_INDEX
, 0)));
1825 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
1826 defs
.push_back(newDefs
[i
]);
1830 TexInstruction
*texi
= mkTex(OP_TXF
, TEX_TARGET_2D_MS_ARRAY
, 0, 0, defs
, srcs
);
1831 texi
->tex
.levelZero
= 1;
1832 texi
->tex
.mask
= mask
;
1833 texi
->tex
.useOffsets
= 0;
1834 texi
->tex
.r
= 0xffff;
1835 texi
->tex
.s
= 0xffff;
1837 info
->prop
.fp
.readsFramebuffer
= true;
1841 const DataType dType
= getDType(insn
);
1843 bool input
= op
!= nir_intrinsic_load_output
;
1847 uint32_t idx
= getIndirect(insn
, op
== nir_intrinsic_load_interpolated_input
? 1 : 0, 0, indirect
);
1848 nv50_ir_varying
& vary
= input
? info
->in
[idx
] : info
->out
[idx
];
1850 // see load_barycentric_* handling
1851 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1852 mode
= translateInterpMode(&vary
, nvirOp
);
1853 if (op
== nir_intrinsic_load_interpolated_input
) {
1854 ImmediateValue immMode
;
1855 if (getSrc(&insn
->src
[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode
))
1856 mode
|= immMode
.reg
.data
.u32
;
1860 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
1861 uint32_t address
= getSlotAddress(insn
, idx
, i
);
1862 Symbol
*sym
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
);
1863 if (prog
->getType() == Program::TYPE_FRAGMENT
) {
1865 if (typeSizeof(dType
) == 8) {
1866 Value
*lo
= getSSA();
1867 Value
*hi
= getSSA();
1868 Instruction
*interp
;
1870 interp
= mkOp1(nvirOp
, TYPE_U32
, lo
, sym
);
1871 if (nvirOp
== OP_PINTERP
)
1872 interp
->setSrc(s
++, fp
.position
);
1873 if (mode
& NV50_IR_INTERP_OFFSET
)
1874 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1875 interp
->setInterpolate(mode
);
1876 interp
->setIndirect(0, 0, indirect
);
1878 Symbol
*sym1
= mkSymbol(input
? FILE_SHADER_INPUT
: FILE_SHADER_OUTPUT
, 0, dType
, address
+ 4);
1879 interp
= mkOp1(nvirOp
, TYPE_U32
, hi
, sym1
);
1880 if (nvirOp
== OP_PINTERP
)
1881 interp
->setSrc(s
++, fp
.position
);
1882 if (mode
& NV50_IR_INTERP_OFFSET
)
1883 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1884 interp
->setInterpolate(mode
);
1885 interp
->setIndirect(0, 0, indirect
);
1887 mkOp2(OP_MERGE
, dType
, newDefs
[i
], lo
, hi
);
1889 Instruction
*interp
= mkOp1(nvirOp
, dType
, newDefs
[i
], sym
);
1890 if (nvirOp
== OP_PINTERP
)
1891 interp
->setSrc(s
++, fp
.position
);
1892 if (mode
& NV50_IR_INTERP_OFFSET
)
1893 interp
->setSrc(s
++, getSrc(&insn
->src
[0], 0));
1894 interp
->setInterpolate(mode
);
1895 interp
->setIndirect(0, 0, indirect
);
1898 mkLoad(dType
, newDefs
[i
], sym
, indirect
)->perPatch
= vary
.patch
;
1903 case nir_intrinsic_load_kernel_input
: {
1904 assert(prog
->getType() == Program::TYPE_COMPUTE
);
1905 assert(insn
->num_components
== 1);
1907 LValues
&newDefs
= convert(&insn
->dest
);
1908 const DataType dType
= getDType(insn
);
1910 uint32_t idx
= getIndirect(insn
, 0, 0, indirect
, true);
1912 mkLoad(dType
, newDefs
[0], mkSymbol(FILE_SHADER_INPUT
, 0, dType
, idx
), indirect
);
1915 case nir_intrinsic_load_barycentric_at_offset
:
1916 case nir_intrinsic_load_barycentric_at_sample
:
1917 case nir_intrinsic_load_barycentric_centroid
:
1918 case nir_intrinsic_load_barycentric_pixel
:
1919 case nir_intrinsic_load_barycentric_sample
: {
1920 LValues
&newDefs
= convert(&insn
->dest
);
1923 if (op
== nir_intrinsic_load_barycentric_centroid
||
1924 op
== nir_intrinsic_load_barycentric_sample
) {
1925 mode
= NV50_IR_INTERP_CENTROID
;
1926 } else if (op
== nir_intrinsic_load_barycentric_at_offset
) {
1928 for (uint8_t c
= 0; c
< 2; c
++) {
1929 offs
[c
] = getScratch();
1930 mkOp2(OP_MIN
, TYPE_F32
, offs
[c
], getSrc(&insn
->src
[0], c
), loadImm(NULL
, 0.4375f
));
1931 mkOp2(OP_MAX
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, -0.5f
));
1932 mkOp2(OP_MUL
, TYPE_F32
, offs
[c
], offs
[c
], loadImm(NULL
, 4096.0f
));
1933 mkCvt(OP_CVT
, TYPE_S32
, offs
[c
], TYPE_F32
, offs
[c
]);
1935 mkOp3v(OP_INSBF
, TYPE_U32
, newDefs
[0], offs
[1], mkImm(0x1010), offs
[0]);
1937 mode
= NV50_IR_INTERP_OFFSET
;
1938 } else if (op
== nir_intrinsic_load_barycentric_pixel
) {
1939 mode
= NV50_IR_INTERP_DEFAULT
;
1940 } else if (op
== nir_intrinsic_load_barycentric_at_sample
) {
1941 info
->prop
.fp
.readsSampleLocations
= true;
1942 mkOp1(OP_PIXLD
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0], 0))->subOp
= NV50_IR_SUBOP_PIXLD_OFFSET
;
1943 mode
= NV50_IR_INTERP_OFFSET
;
1945 unreachable("all intrinsics already handled above");
1948 loadImm(newDefs
[1], mode
);
1951 case nir_intrinsic_discard
:
1952 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
);
1954 case nir_intrinsic_discard_if
: {
1955 Value
*pred
= getSSA(1, FILE_PREDICATE
);
1956 if (insn
->num_components
> 1) {
1957 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1961 mkCmp(OP_SET
, CC_NE
, TYPE_U8
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
1962 mkOp(OP_DISCARD
, TYPE_NONE
, NULL
)->setPredicate(CC_P
, pred
);
1965 case nir_intrinsic_load_base_vertex
:
1966 case nir_intrinsic_load_base_instance
:
1967 case nir_intrinsic_load_draw_id
:
1968 case nir_intrinsic_load_front_face
:
1969 case nir_intrinsic_load_helper_invocation
:
1970 case nir_intrinsic_load_instance_id
:
1971 case nir_intrinsic_load_invocation_id
:
1972 case nir_intrinsic_load_local_group_size
:
1973 case nir_intrinsic_load_local_invocation_id
:
1974 case nir_intrinsic_load_num_work_groups
:
1975 case nir_intrinsic_load_patch_vertices_in
:
1976 case nir_intrinsic_load_primitive_id
:
1977 case nir_intrinsic_load_sample_id
:
1978 case nir_intrinsic_load_sample_mask_in
:
1979 case nir_intrinsic_load_sample_pos
:
1980 case nir_intrinsic_load_subgroup_eq_mask
:
1981 case nir_intrinsic_load_subgroup_ge_mask
:
1982 case nir_intrinsic_load_subgroup_gt_mask
:
1983 case nir_intrinsic_load_subgroup_le_mask
:
1984 case nir_intrinsic_load_subgroup_lt_mask
:
1985 case nir_intrinsic_load_subgroup_invocation
:
1986 case nir_intrinsic_load_tess_coord
:
1987 case nir_intrinsic_load_tess_level_inner
:
1988 case nir_intrinsic_load_tess_level_outer
:
1989 case nir_intrinsic_load_vertex_id
:
1990 case nir_intrinsic_load_work_group_id
: {
1991 const DataType dType
= getDType(insn
);
1992 SVSemantic sv
= convert(op
);
1993 LValues
&newDefs
= convert(&insn
->dest
);
1995 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
1997 if (typeSizeof(dType
) == 8)
2002 if (sv
== SV_TID
&& info
->prop
.cp
.numThreads
[i
] == 1) {
2005 Symbol
*sym
= mkSysVal(sv
, i
);
2006 Instruction
*rdsv
= mkOp1(OP_RDSV
, TYPE_U32
, def
, sym
);
2007 if (sv
== SV_TESS_OUTER
|| sv
== SV_TESS_INNER
)
2011 if (typeSizeof(dType
) == 8)
2012 mkOp2(OP_MERGE
, dType
, newDefs
[i
], def
, loadImm(getSSA(), 0u));
2017 case nir_intrinsic_load_subgroup_size
: {
2018 LValues
&newDefs
= convert(&insn
->dest
);
2019 loadImm(newDefs
[0], 32u);
2022 case nir_intrinsic_vote_all
:
2023 case nir_intrinsic_vote_any
:
2024 case nir_intrinsic_vote_ieq
: {
2025 LValues
&newDefs
= convert(&insn
->dest
);
2026 Value
*pred
= getScratch(1, FILE_PREDICATE
);
2027 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
2028 mkOp1(OP_VOTE
, TYPE_U32
, pred
, pred
)->subOp
= getSubOp(op
);
2029 mkCvt(OP_CVT
, TYPE_U32
, newDefs
[0], TYPE_U8
, pred
);
2032 case nir_intrinsic_ballot
: {
2033 LValues
&newDefs
= convert(&insn
->dest
);
2034 Value
*pred
= getSSA(1, FILE_PREDICATE
);
2035 mkCmp(OP_SET
, CC_NE
, TYPE_U32
, pred
, TYPE_U32
, getSrc(&insn
->src
[0], 0), zero
);
2036 mkOp1(OP_VOTE
, TYPE_U32
, newDefs
[0], pred
)->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
2039 case nir_intrinsic_read_first_invocation
:
2040 case nir_intrinsic_read_invocation
: {
2041 LValues
&newDefs
= convert(&insn
->dest
);
2042 const DataType dType
= getDType(insn
);
2043 Value
*tmp
= getScratch();
2045 if (op
== nir_intrinsic_read_first_invocation
) {
2046 mkOp1(OP_VOTE
, TYPE_U32
, tmp
, mkImm(1))->subOp
= NV50_IR_SUBOP_VOTE_ANY
;
2047 mkOp2(OP_EXTBF
, TYPE_U32
, tmp
, tmp
, mkImm(0x2000))->subOp
= NV50_IR_SUBOP_EXTBF_REV
;
2048 mkOp1(OP_BFIND
, TYPE_U32
, tmp
, tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
2050 tmp
= getSrc(&insn
->src
[1], 0);
2052 for (uint8_t i
= 0; i
< insn
->num_components
; ++i
) {
2053 mkOp3(OP_SHFL
, dType
, newDefs
[i
], getSrc(&insn
->src
[0], i
), tmp
, mkImm(0x1f))
2054 ->subOp
= NV50_IR_SUBOP_SHFL_IDX
;
2058 case nir_intrinsic_load_per_vertex_input
: {
2059 const DataType dType
= getDType(insn
);
2060 LValues
&newDefs
= convert(&insn
->dest
);
2061 Value
*indirectVertex
;
2062 Value
*indirectOffset
;
2063 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
2064 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
2066 Value
*vtxBase
= mkOp2v(OP_PFETCH
, TYPE_U32
, getSSA(4, FILE_ADDRESS
),
2067 mkImm(baseVertex
), indirectVertex
);
2068 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
2069 uint32_t address
= getSlotAddress(insn
, idx
, i
);
2070 loadFrom(FILE_SHADER_INPUT
, 0, dType
, newDefs
[i
], address
, 0,
2071 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
2075 case nir_intrinsic_load_per_vertex_output
: {
2076 const DataType dType
= getDType(insn
);
2077 LValues
&newDefs
= convert(&insn
->dest
);
2078 Value
*indirectVertex
;
2079 Value
*indirectOffset
;
2080 uint32_t baseVertex
= getIndirect(&insn
->src
[0], 0, indirectVertex
);
2081 uint32_t idx
= getIndirect(insn
, 1, 0, indirectOffset
);
2082 Value
*vtxBase
= NULL
;
2085 vtxBase
= indirectVertex
;
2087 vtxBase
= loadImm(NULL
, baseVertex
);
2089 vtxBase
= mkOp2v(OP_ADD
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), outBase
, vtxBase
);
2091 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
2092 uint32_t address
= getSlotAddress(insn
, idx
, i
);
2093 loadFrom(FILE_SHADER_OUTPUT
, 0, dType
, newDefs
[i
], address
, 0,
2094 indirectOffset
, vtxBase
, info
->in
[idx
].patch
);
2098 case nir_intrinsic_emit_vertex
:
2099 if (info
->io
.genUserClip
> 0)
2100 handleUserClipPlanes();
2102 case nir_intrinsic_end_primitive
: {
2103 uint32_t idx
= nir_intrinsic_stream_id(insn
);
2104 mkOp1(getOperation(op
), TYPE_U32
, NULL
, mkImm(idx
))->fixed
= 1;
2107 case nir_intrinsic_load_ubo
: {
2108 const DataType dType
= getDType(insn
);
2109 LValues
&newDefs
= convert(&insn
->dest
);
2110 Value
*indirectIndex
;
2111 Value
*indirectOffset
;
2112 uint32_t index
= getIndirect(&insn
->src
[0], 0, indirectIndex
) + 1;
2113 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2115 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
2116 loadFrom(FILE_MEMORY_CONST
, index
, dType
, newDefs
[i
], offset
, i
,
2117 indirectOffset
, indirectIndex
);
2121 case nir_intrinsic_get_buffer_size
: {
2122 LValues
&newDefs
= convert(&insn
->dest
);
2123 const DataType dType
= getDType(insn
);
2124 Value
*indirectBuffer
;
2125 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2127 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, 0);
2128 mkOp1(OP_BUFQ
, dType
, newDefs
[0], sym
)->setIndirect(0, 0, indirectBuffer
);
2131 case nir_intrinsic_store_ssbo
: {
2132 DataType sType
= getSType(insn
->src
[0], false, false);
2133 Value
*indirectBuffer
;
2134 Value
*indirectOffset
;
2135 uint32_t buffer
= getIndirect(&insn
->src
[1], 0, indirectBuffer
);
2136 uint32_t offset
= getIndirect(&insn
->src
[2], 0, indirectOffset
);
2138 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
2139 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2141 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, sType
,
2142 offset
+ i
* typeSizeof(sType
));
2143 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
))
2144 ->setIndirect(0, 1, indirectBuffer
);
2146 info
->io
.globalAccess
|= 0x2;
2149 case nir_intrinsic_load_ssbo
: {
2150 const DataType dType
= getDType(insn
);
2151 LValues
&newDefs
= convert(&insn
->dest
);
2152 Value
*indirectBuffer
;
2153 Value
*indirectOffset
;
2154 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2155 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2157 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
)
2158 loadFrom(FILE_MEMORY_BUFFER
, buffer
, dType
, newDefs
[i
], offset
, i
,
2159 indirectOffset
, indirectBuffer
);
2161 info
->io
.globalAccess
|= 0x1;
2164 case nir_intrinsic_shared_atomic_add
:
2165 case nir_intrinsic_shared_atomic_and
:
2166 case nir_intrinsic_shared_atomic_comp_swap
:
2167 case nir_intrinsic_shared_atomic_exchange
:
2168 case nir_intrinsic_shared_atomic_or
:
2169 case nir_intrinsic_shared_atomic_imax
:
2170 case nir_intrinsic_shared_atomic_imin
:
2171 case nir_intrinsic_shared_atomic_umax
:
2172 case nir_intrinsic_shared_atomic_umin
:
2173 case nir_intrinsic_shared_atomic_xor
: {
2174 const DataType dType
= getDType(insn
);
2175 LValues
&newDefs
= convert(&insn
->dest
);
2176 Value
*indirectOffset
;
2177 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2178 Symbol
*sym
= mkSymbol(FILE_MEMORY_SHARED
, 0, dType
, offset
);
2179 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2180 if (op
== nir_intrinsic_shared_atomic_comp_swap
)
2181 atom
->setSrc(2, getSrc(&insn
->src
[2], 0));
2182 atom
->setIndirect(0, 0, indirectOffset
);
2183 atom
->subOp
= getSubOp(op
);
2186 case nir_intrinsic_ssbo_atomic_add
:
2187 case nir_intrinsic_ssbo_atomic_and
:
2188 case nir_intrinsic_ssbo_atomic_comp_swap
:
2189 case nir_intrinsic_ssbo_atomic_exchange
:
2190 case nir_intrinsic_ssbo_atomic_or
:
2191 case nir_intrinsic_ssbo_atomic_imax
:
2192 case nir_intrinsic_ssbo_atomic_imin
:
2193 case nir_intrinsic_ssbo_atomic_umax
:
2194 case nir_intrinsic_ssbo_atomic_umin
:
2195 case nir_intrinsic_ssbo_atomic_xor
: {
2196 const DataType dType
= getDType(insn
);
2197 LValues
&newDefs
= convert(&insn
->dest
);
2198 Value
*indirectBuffer
;
2199 Value
*indirectOffset
;
2200 uint32_t buffer
= getIndirect(&insn
->src
[0], 0, indirectBuffer
);
2201 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2203 Symbol
*sym
= mkSymbol(FILE_MEMORY_BUFFER
, buffer
, dType
, offset
);
2204 Instruction
*atom
= mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
,
2205 getSrc(&insn
->src
[2], 0));
2206 if (op
== nir_intrinsic_ssbo_atomic_comp_swap
)
2207 atom
->setSrc(2, getSrc(&insn
->src
[3], 0));
2208 atom
->setIndirect(0, 0, indirectOffset
);
2209 atom
->setIndirect(0, 1, indirectBuffer
);
2210 atom
->subOp
= getSubOp(op
);
2212 info
->io
.globalAccess
|= 0x2;
2215 case nir_intrinsic_global_atomic_add
:
2216 case nir_intrinsic_global_atomic_and
:
2217 case nir_intrinsic_global_atomic_comp_swap
:
2218 case nir_intrinsic_global_atomic_exchange
:
2219 case nir_intrinsic_global_atomic_or
:
2220 case nir_intrinsic_global_atomic_imax
:
2221 case nir_intrinsic_global_atomic_imin
:
2222 case nir_intrinsic_global_atomic_umax
:
2223 case nir_intrinsic_global_atomic_umin
:
2224 case nir_intrinsic_global_atomic_xor
: {
2225 const DataType dType
= getDType(insn
);
2226 LValues
&newDefs
= convert(&insn
->dest
);
2228 uint32_t offset
= getIndirect(&insn
->src
[0], 0, address
);
2230 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, dType
, offset
);
2232 mkOp2(OP_ATOM
, dType
, newDefs
[0], sym
, getSrc(&insn
->src
[1], 0));
2233 atom
->setIndirect(0, 0, address
);
2234 atom
->subOp
= getSubOp(op
);
2236 info
->io
.globalAccess
|= 0x2;
2239 case nir_intrinsic_bindless_image_atomic_add
:
2240 case nir_intrinsic_bindless_image_atomic_and
:
2241 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2242 case nir_intrinsic_bindless_image_atomic_exchange
:
2243 case nir_intrinsic_bindless_image_atomic_imax
:
2244 case nir_intrinsic_bindless_image_atomic_umax
:
2245 case nir_intrinsic_bindless_image_atomic_imin
:
2246 case nir_intrinsic_bindless_image_atomic_umin
:
2247 case nir_intrinsic_bindless_image_atomic_or
:
2248 case nir_intrinsic_bindless_image_atomic_xor
:
2249 case nir_intrinsic_bindless_image_load
:
2250 case nir_intrinsic_bindless_image_samples
:
2251 case nir_intrinsic_bindless_image_size
:
2252 case nir_intrinsic_bindless_image_store
: {
2253 std::vector
<Value
*> srcs
, defs
;
2254 Value
*indirect
= getSrc(&insn
->src
[0], 0);
2258 TexInstruction::Target target
=
2259 convert(nir_intrinsic_image_dim(insn
), !!nir_intrinsic_image_array(insn
), false);
2260 unsigned int argCount
= getNIRArgCount(target
);
2261 uint16_t location
= 0;
2263 if (opInfo
.has_dest
) {
2264 LValues
&newDefs
= convert(&insn
->dest
);
2265 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
) {
2266 defs
.push_back(newDefs
[i
]);
2272 case nir_intrinsic_bindless_image_atomic_add
:
2273 case nir_intrinsic_bindless_image_atomic_and
:
2274 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2275 case nir_intrinsic_bindless_image_atomic_exchange
:
2276 case nir_intrinsic_bindless_image_atomic_imax
:
2277 case nir_intrinsic_bindless_image_atomic_umax
:
2278 case nir_intrinsic_bindless_image_atomic_imin
:
2279 case nir_intrinsic_bindless_image_atomic_umin
:
2280 case nir_intrinsic_bindless_image_atomic_or
:
2281 case nir_intrinsic_bindless_image_atomic_xor
:
2282 ty
= getDType(insn
);
2284 info
->io
.globalAccess
|= 0x2;
2286 case nir_intrinsic_bindless_image_load
:
2288 info
->io
.globalAccess
|= 0x1;
2290 case nir_intrinsic_bindless_image_store
:
2293 info
->io
.globalAccess
|= 0x2;
2295 case nir_intrinsic_bindless_image_samples
:
2299 case nir_intrinsic_bindless_image_size
:
2303 unreachable("unhandled image opcode");
2308 if (opInfo
.num_srcs
>= 2)
2309 for (unsigned int i
= 0u; i
< argCount
; ++i
)
2310 srcs
.push_back(getSrc(&insn
->src
[1], i
));
2312 // the sampler is just another src added after coords
2313 if (opInfo
.num_srcs
>= 3 && target
.isMS())
2314 srcs
.push_back(getSrc(&insn
->src
[2], 0));
2316 if (opInfo
.num_srcs
>= 4) {
2317 unsigned components
= opInfo
.src_components
[3] ? opInfo
.src_components
[3] : insn
->num_components
;
2318 for (uint8_t i
= 0u; i
< components
; ++i
)
2319 srcs
.push_back(getSrc(&insn
->src
[3], i
));
2322 if (opInfo
.num_srcs
>= 5)
2323 // 1 for aotmic swap
2324 for (uint8_t i
= 0u; i
< opInfo
.src_components
[4]; ++i
)
2325 srcs
.push_back(getSrc(&insn
->src
[4], i
));
2327 TexInstruction
*texi
= mkTex(getOperation(op
), target
.getEnum(), location
, 0, defs
, srcs
);
2328 texi
->tex
.bindless
= false;
2329 texi
->tex
.format
= nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn
));
2330 texi
->tex
.mask
= mask
;
2331 texi
->tex
.bindless
= true;
2332 texi
->cache
= convert(nir_intrinsic_access(insn
));
2334 texi
->subOp
= getSubOp(op
);
2337 texi
->setIndirectR(indirect
);
2341 case nir_intrinsic_image_deref_atomic_add
:
2342 case nir_intrinsic_image_deref_atomic_and
:
2343 case nir_intrinsic_image_deref_atomic_comp_swap
:
2344 case nir_intrinsic_image_deref_atomic_exchange
:
2345 case nir_intrinsic_image_deref_atomic_imax
:
2346 case nir_intrinsic_image_deref_atomic_umax
:
2347 case nir_intrinsic_image_deref_atomic_imin
:
2348 case nir_intrinsic_image_deref_atomic_umin
:
2349 case nir_intrinsic_image_deref_atomic_or
:
2350 case nir_intrinsic_image_deref_atomic_xor
:
2351 case nir_intrinsic_image_deref_load
:
2352 case nir_intrinsic_image_deref_samples
:
2353 case nir_intrinsic_image_deref_size
:
2354 case nir_intrinsic_image_deref_store
: {
2355 const nir_variable
*tex
;
2356 std::vector
<Value
*> srcs
, defs
;
2361 nir_deref_instr
*deref
= nir_src_as_deref(insn
->src
[0]);
2362 const glsl_type
*type
= deref
->type
;
2363 TexInstruction::Target target
=
2364 convert((glsl_sampler_dim
)type
->sampler_dimensionality
,
2365 type
->sampler_array
, type
->sampler_shadow
);
2366 unsigned int argCount
= getNIRArgCount(target
);
2367 uint16_t location
= handleDeref(deref
, indirect
, tex
);
2369 if (opInfo
.has_dest
) {
2370 LValues
&newDefs
= convert(&insn
->dest
);
2371 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
) {
2372 defs
.push_back(newDefs
[i
]);
2378 case nir_intrinsic_image_deref_atomic_add
:
2379 case nir_intrinsic_image_deref_atomic_and
:
2380 case nir_intrinsic_image_deref_atomic_comp_swap
:
2381 case nir_intrinsic_image_deref_atomic_exchange
:
2382 case nir_intrinsic_image_deref_atomic_imax
:
2383 case nir_intrinsic_image_deref_atomic_umax
:
2384 case nir_intrinsic_image_deref_atomic_imin
:
2385 case nir_intrinsic_image_deref_atomic_umin
:
2386 case nir_intrinsic_image_deref_atomic_or
:
2387 case nir_intrinsic_image_deref_atomic_xor
:
2388 ty
= getDType(insn
);
2390 info
->io
.globalAccess
|= 0x2;
2392 case nir_intrinsic_image_deref_load
:
2394 info
->io
.globalAccess
|= 0x1;
2396 case nir_intrinsic_image_deref_store
:
2399 info
->io
.globalAccess
|= 0x2;
2401 case nir_intrinsic_image_deref_samples
:
2405 case nir_intrinsic_image_deref_size
:
2409 unreachable("unhandled image opcode");
2414 if (opInfo
.num_srcs
>= 2)
2415 for (unsigned int i
= 0u; i
< argCount
; ++i
)
2416 srcs
.push_back(getSrc(&insn
->src
[1], i
));
2418 // the sampler is just another src added after coords
2419 if (opInfo
.num_srcs
>= 3 && target
.isMS())
2420 srcs
.push_back(getSrc(&insn
->src
[2], 0));
2422 if (opInfo
.num_srcs
>= 4) {
2423 unsigned components
= opInfo
.src_components
[3] ? opInfo
.src_components
[3] : insn
->num_components
;
2424 for (uint8_t i
= 0u; i
< components
; ++i
)
2425 srcs
.push_back(getSrc(&insn
->src
[3], i
));
2428 if (opInfo
.num_srcs
>= 5)
2429 // 1 for aotmic swap
2430 for (uint8_t i
= 0u; i
< opInfo
.src_components
[4]; ++i
)
2431 srcs
.push_back(getSrc(&insn
->src
[4], i
));
2433 TexInstruction
*texi
= mkTex(getOperation(op
), target
.getEnum(), location
, 0, defs
, srcs
);
2434 texi
->tex
.bindless
= false;
2435 texi
->tex
.format
= nv50_ir::TexInstruction::translateImgFormat(tex
->data
.image
.format
);
2436 texi
->tex
.mask
= mask
;
2437 texi
->cache
= getCacheModeFromVar(tex
);
2439 texi
->subOp
= getSubOp(op
);
2442 texi
->setIndirectR(indirect
);
2446 case nir_intrinsic_store_shared
: {
2447 DataType sType
= getSType(insn
->src
[0], false, false);
2448 Value
*indirectOffset
;
2449 uint32_t offset
= getIndirect(&insn
->src
[1], 0, indirectOffset
);
2451 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
) {
2452 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2454 Symbol
*sym
= mkSymbol(FILE_MEMORY_SHARED
, 0, sType
, offset
+ i
* typeSizeof(sType
));
2455 mkStore(OP_STORE
, sType
, sym
, indirectOffset
, getSrc(&insn
->src
[0], i
));
2459 case nir_intrinsic_load_shared
: {
2460 const DataType dType
= getDType(insn
);
2461 LValues
&newDefs
= convert(&insn
->dest
);
2462 Value
*indirectOffset
;
2463 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2465 for (uint8_t i
= 0u; i
< insn
->num_components
; ++i
)
2466 loadFrom(FILE_MEMORY_SHARED
, 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2470 case nir_intrinsic_control_barrier
: {
2471 // TODO: add flag to shader_info
2472 info
->numBarriers
= 1;
2473 Instruction
*bar
= mkOp2(OP_BAR
, TYPE_U32
, NULL
, mkImm(0), mkImm(0));
2475 bar
->subOp
= NV50_IR_SUBOP_BAR_SYNC
;
2478 case nir_intrinsic_group_memory_barrier
:
2479 case nir_intrinsic_memory_barrier
:
2480 case nir_intrinsic_memory_barrier_buffer
:
2481 case nir_intrinsic_memory_barrier_image
:
2482 case nir_intrinsic_memory_barrier_shared
: {
2483 Instruction
*bar
= mkOp(OP_MEMBAR
, TYPE_NONE
, NULL
);
2485 bar
->subOp
= getSubOp(op
);
2488 case nir_intrinsic_memory_barrier_tcs_patch
:
2490 case nir_intrinsic_shader_clock
: {
2491 const DataType dType
= getDType(insn
);
2492 LValues
&newDefs
= convert(&insn
->dest
);
2494 loadImm(newDefs
[0], 0u);
2495 mkOp1(OP_RDSV
, dType
, newDefs
[1], mkSysVal(SV_CLOCK
, 0))->fixed
= 1;
2498 case nir_intrinsic_load_global
: {
2499 const DataType dType
= getDType(insn
);
2500 LValues
&newDefs
= convert(&insn
->dest
);
2501 Value
*indirectOffset
;
2502 uint32_t offset
= getIndirect(&insn
->src
[0], 0, indirectOffset
);
2504 for (auto i
= 0u; i
< insn
->num_components
; ++i
)
2505 loadFrom(FILE_MEMORY_GLOBAL
, 0, dType
, newDefs
[i
], offset
, i
, indirectOffset
);
2507 info
->io
.globalAccess
|= 0x1;
2510 case nir_intrinsic_store_global
: {
2511 DataType sType
= getSType(insn
->src
[0], false, false);
2513 for (auto i
= 0u; i
< insn
->num_components
; ++i
) {
2514 if (!((1u << i
) & nir_intrinsic_write_mask(insn
)))
2516 if (typeSizeof(sType
) == 8) {
2518 mkSplit(split
, 4, getSrc(&insn
->src
[0], i
));
2520 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
));
2521 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[0]);
2523 sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, TYPE_U32
, i
* typeSizeof(sType
) + 4);
2524 mkStore(OP_STORE
, TYPE_U32
, sym
, getSrc(&insn
->src
[1], 0), split
[1]);
2526 Symbol
*sym
= mkSymbol(FILE_MEMORY_GLOBAL
, 0, sType
, i
* typeSizeof(sType
));
2527 mkStore(OP_STORE
, sType
, sym
, getSrc(&insn
->src
[1], 0), getSrc(&insn
->src
[0], i
));
2531 info
->io
.globalAccess
|= 0x2;
2535 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos
[op
].name
);
2543 Converter::visit(nir_jump_instr
*insn
)
2545 switch (insn
->type
) {
2546 case nir_jump_return
:
2547 // TODO: this only works in the main function
2548 mkFlow(OP_BRA
, exit
, CC_ALWAYS
, NULL
);
2549 bb
->cfg
.attach(&exit
->cfg
, Graph::Edge::CROSS
);
2551 case nir_jump_break
:
2552 case nir_jump_continue
: {
2553 bool isBreak
= insn
->type
== nir_jump_break
;
2554 nir_block
*block
= insn
->instr
.block
;
2555 assert(!block
->successors
[1]);
2556 BasicBlock
*target
= convert(block
->successors
[0]);
2557 mkFlow(isBreak
? OP_BREAK
: OP_CONT
, target
, CC_ALWAYS
, NULL
);
2558 bb
->cfg
.attach(&target
->cfg
, isBreak
? Graph::Edge::CROSS
: Graph::Edge::BACK
);
2562 ERROR("unknown nir_jump_type %u\n", insn
->type
);
2570 Converter::convert(nir_load_const_instr
*insn
, uint8_t idx
)
2575 setPosition(immInsertPos
, true);
2577 setPosition(bb
, false);
2579 switch (insn
->def
.bit_size
) {
2581 val
= loadImm(getSSA(8), insn
->value
[idx
].u64
);
2584 val
= loadImm(getSSA(4), insn
->value
[idx
].u32
);
2587 val
= loadImm(getSSA(2), insn
->value
[idx
].u16
);
2590 val
= loadImm(getSSA(1), insn
->value
[idx
].u8
);
2593 unreachable("unhandled bit size!\n");
2595 setPosition(bb
, true);
2600 Converter::visit(nir_load_const_instr
*insn
)
2602 assert(insn
->def
.bit_size
<= 64);
2603 immediates
[insn
->def
.index
] = insn
;
2607 #define DEFAULT_CHECKS \
2608 if (insn->dest.dest.ssa.num_components > 1) { \
2609 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2612 if (insn->dest.write_mask != 1) { \
2613 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2617 Converter::visit(nir_alu_instr
*insn
)
2619 const nir_op op
= insn
->op
;
2620 const nir_op_info
&info
= nir_op_infos
[op
];
2621 DataType dType
= getDType(insn
);
2622 const std::vector
<DataType
> sTypes
= getSTypes(insn
);
2624 Instruction
*oldPos
= this->bb
->getExit();
2635 case nir_op_fddx_coarse
:
2636 case nir_op_fddx_fine
:
2638 case nir_op_fddy_coarse
:
2639 case nir_op_fddy_fine
:
2658 case nir_op_imul_high
:
2659 case nir_op_umul_high
:
2664 case nir_op_pack_64_2x32_split
:
2679 LValues
&newDefs
= convert(&insn
->dest
);
2680 operation preOp
= preOperationNeeded(op
);
2681 if (preOp
!= OP_NOP
) {
2682 assert(info
.num_inputs
< 2);
2683 Value
*tmp
= getSSA(typeSizeof(dType
));
2684 Instruction
*i0
= mkOp(preOp
, dType
, tmp
);
2685 Instruction
*i1
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2686 if (info
.num_inputs
) {
2687 i0
->setSrc(0, getSrc(&insn
->src
[0]));
2690 i1
->subOp
= getSubOp(op
);
2692 Instruction
*i
= mkOp(getOperation(op
), dType
, newDefs
[0]);
2693 for (unsigned s
= 0u; s
< info
.num_inputs
; ++s
) {
2694 i
->setSrc(s
, getSrc(&insn
->src
[s
]));
2696 i
->subOp
= getSubOp(op
);
2700 case nir_op_ifind_msb
:
2701 case nir_op_ufind_msb
: {
2703 LValues
&newDefs
= convert(&insn
->dest
);
2705 mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2708 case nir_op_fround_even
: {
2710 LValues
&newDefs
= convert(&insn
->dest
);
2711 mkCvt(OP_CVT
, dType
, newDefs
[0], dType
, getSrc(&insn
->src
[0]))->rnd
= ROUND_NI
;
2714 // convert instructions
2728 case nir_op_u2u64
: {
2730 LValues
&newDefs
= convert(&insn
->dest
);
2731 Instruction
*i
= mkOp1(getOperation(op
), dType
, newDefs
[0], getSrc(&insn
->src
[0]));
2732 if (op
== nir_op_f2i32
|| op
== nir_op_f2i64
|| op
== nir_op_f2u32
|| op
== nir_op_f2u64
)
2734 i
->sType
= sTypes
[0];
2737 // compare instructions
2747 case nir_op_ine32
: {
2749 LValues
&newDefs
= convert(&insn
->dest
);
2750 Instruction
*i
= mkCmp(getOperation(op
),
2755 getSrc(&insn
->src
[0]),
2756 getSrc(&insn
->src
[1]));
2757 if (info
.num_inputs
== 3)
2758 i
->setSrc(2, getSrc(&insn
->src
[2]));
2759 i
->sType
= sTypes
[0];
2762 // those are weird ALU ops and need special handling, because
2763 // 1. they are always componend based
2764 // 2. they basically just merge multiple values into one data type
2766 if (!insn
->dest
.dest
.is_ssa
&& insn
->dest
.dest
.reg
.reg
->num_array_elems
) {
2767 nir_reg_dest
& reg
= insn
->dest
.dest
.reg
;
2768 uint32_t goffset
= regToLmemOffset
[reg
.reg
->index
];
2769 uint8_t comps
= reg
.reg
->num_components
;
2770 uint8_t size
= reg
.reg
->bit_size
/ 8;
2771 uint8_t csize
= 4 * size
; // TODO after fixing MemoryOpts: comps * size;
2772 uint32_t aoffset
= csize
* reg
.base_offset
;
2773 Value
*indirect
= NULL
;
2776 indirect
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
),
2777 getSrc(reg
.indirect
, 0), mkImm(csize
));
2779 for (uint8_t i
= 0u; i
< comps
; ++i
) {
2780 if (!((1u << i
) & insn
->dest
.write_mask
))
2783 Symbol
*sym
= mkSymbol(FILE_MEMORY_LOCAL
, 0, dType
, goffset
+ aoffset
+ i
* size
);
2784 mkStore(OP_STORE
, dType
, sym
, indirect
, getSrc(&insn
->src
[0], i
));
2787 } else if (!insn
->src
[0].src
.is_ssa
&& insn
->src
[0].src
.reg
.reg
->num_array_elems
) {
2788 LValues
&newDefs
= convert(&insn
->dest
);
2789 nir_reg_src
& reg
= insn
->src
[0].src
.reg
;
2790 uint32_t goffset
= regToLmemOffset
[reg
.reg
->index
];
2791 // uint8_t comps = reg.reg->num_components;
2792 uint8_t size
= reg
.reg
->bit_size
/ 8;
2793 uint8_t csize
= 4 * size
; // TODO after fixing MemoryOpts: comps * size;
2794 uint32_t aoffset
= csize
* reg
.base_offset
;
2795 Value
*indirect
= NULL
;
2798 indirect
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(4, FILE_ADDRESS
), getSrc(reg
.indirect
, 0), mkImm(csize
));
2800 for (uint8_t i
= 0u; i
< newDefs
.size(); ++i
)
2801 loadFrom(FILE_MEMORY_LOCAL
, 0, dType
, newDefs
[i
], goffset
+ aoffset
, i
, indirect
);
2805 LValues
&newDefs
= convert(&insn
->dest
);
2806 for (LValues::size_type c
= 0u; c
< newDefs
.size(); ++c
) {
2807 mkMov(newDefs
[c
], getSrc(&insn
->src
[0], c
), dType
);
2815 case nir_op_vec16
: {
2816 LValues
&newDefs
= convert(&insn
->dest
);
2817 for (LValues::size_type c
= 0u; c
< newDefs
.size(); ++c
) {
2818 mkMov(newDefs
[c
], getSrc(&insn
->src
[c
]), dType
);
2823 case nir_op_pack_64_2x32
: {
2824 LValues
&newDefs
= convert(&insn
->dest
);
2825 Instruction
*merge
= mkOp(OP_MERGE
, dType
, newDefs
[0]);
2826 merge
->setSrc(0, getSrc(&insn
->src
[0], 0));
2827 merge
->setSrc(1, getSrc(&insn
->src
[0], 1));
2830 case nir_op_pack_half_2x16_split
: {
2831 LValues
&newDefs
= convert(&insn
->dest
);
2832 Value
*tmpH
= getSSA();
2833 Value
*tmpL
= getSSA();
2835 mkCvt(OP_CVT
, TYPE_F16
, tmpL
, TYPE_F32
, getSrc(&insn
->src
[0]));
2836 mkCvt(OP_CVT
, TYPE_F16
, tmpH
, TYPE_F32
, getSrc(&insn
->src
[1]));
2837 mkOp3(OP_INSBF
, TYPE_U32
, newDefs
[0], tmpH
, mkImm(0x1010), tmpL
);
2840 case nir_op_unpack_half_2x16_split_x
:
2841 case nir_op_unpack_half_2x16_split_y
: {
2842 LValues
&newDefs
= convert(&insn
->dest
);
2843 Instruction
*cvt
= mkCvt(OP_CVT
, TYPE_F32
, newDefs
[0], TYPE_F16
, getSrc(&insn
->src
[0]));
2844 if (op
== nir_op_unpack_half_2x16_split_y
)
2848 case nir_op_unpack_64_2x32
: {
2849 LValues
&newDefs
= convert(&insn
->dest
);
2850 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, newDefs
[1]);
2853 case nir_op_unpack_64_2x32_split_x
: {
2854 LValues
&newDefs
= convert(&insn
->dest
);
2855 mkOp1(OP_SPLIT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]))->setDef(1, getSSA());
2858 case nir_op_unpack_64_2x32_split_y
: {
2859 LValues
&newDefs
= convert(&insn
->dest
);
2860 mkOp1(OP_SPLIT
, dType
, getSSA(), getSrc(&insn
->src
[0]))->setDef(1, newDefs
[0]);
2863 // special instructions
2865 case nir_op_isign
: {
2868 if (::isFloatType(dType
))
2873 LValues
&newDefs
= convert(&insn
->dest
);
2874 LValue
*val0
= getScratch();
2875 LValue
*val1
= getScratch();
2876 mkCmp(OP_SET
, CC_GT
, iType
, val0
, dType
, getSrc(&insn
->src
[0]), zero
);
2877 mkCmp(OP_SET
, CC_LT
, iType
, val1
, dType
, getSrc(&insn
->src
[0]), zero
);
2879 if (dType
== TYPE_F64
) {
2880 mkOp2(OP_SUB
, iType
, val0
, val0
, val1
);
2881 mkCvt(OP_CVT
, TYPE_F64
, newDefs
[0], iType
, val0
);
2882 } else if (dType
== TYPE_S64
|| dType
== TYPE_U64
) {
2883 mkOp2(OP_SUB
, iType
, val0
, val1
, val0
);
2884 mkOp2(OP_SHR
, iType
, val1
, val0
, loadImm(NULL
, 31));
2885 mkOp2(OP_MERGE
, dType
, newDefs
[0], val0
, val1
);
2886 } else if (::isFloatType(dType
))
2887 mkOp2(OP_SUB
, iType
, newDefs
[0], val0
, val1
);
2889 mkOp2(OP_SUB
, iType
, newDefs
[0], val1
, val0
);
2893 case nir_op_b32csel
: {
2895 LValues
&newDefs
= convert(&insn
->dest
);
2896 mkCmp(OP_SLCT
, CC_NE
, dType
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[1]), getSrc(&insn
->src
[2]), getSrc(&insn
->src
[0]));
2899 case nir_op_ibitfield_extract
:
2900 case nir_op_ubitfield_extract
: {
2902 Value
*tmp
= getSSA();
2903 LValues
&newDefs
= convert(&insn
->dest
);
2904 mkOp3(OP_INSBF
, dType
, tmp
, getSrc(&insn
->src
[2]), loadImm(NULL
, 0x808), getSrc(&insn
->src
[1]));
2905 mkOp2(OP_EXTBF
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), tmp
);
2910 LValues
&newDefs
= convert(&insn
->dest
);
2911 mkOp3(OP_INSBF
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 0x808), getSrc(&insn
->src
[1]));
2914 case nir_op_bitfield_insert
: {
2916 LValues
&newDefs
= convert(&insn
->dest
);
2917 LValue
*temp
= getSSA();
2918 mkOp3(OP_INSBF
, TYPE_U32
, temp
, getSrc(&insn
->src
[3]), mkImm(0x808), getSrc(&insn
->src
[2]));
2919 mkOp3(OP_INSBF
, dType
, newDefs
[0], getSrc(&insn
->src
[1]), temp
, getSrc(&insn
->src
[0]));
2922 case nir_op_bit_count
: {
2924 LValues
&newDefs
= convert(&insn
->dest
);
2925 mkOp2(OP_POPCNT
, dType
, newDefs
[0], getSrc(&insn
->src
[0]), getSrc(&insn
->src
[0]));
2928 case nir_op_bitfield_reverse
: {
2930 LValues
&newDefs
= convert(&insn
->dest
);
2931 mkOp2(OP_EXTBF
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), mkImm(0x2000))->subOp
= NV50_IR_SUBOP_EXTBF_REV
;
2934 case nir_op_find_lsb
: {
2936 LValues
&newDefs
= convert(&insn
->dest
);
2937 Value
*tmp
= getSSA();
2938 mkOp2(OP_EXTBF
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]), mkImm(0x2000))->subOp
= NV50_IR_SUBOP_EXTBF_REV
;
2939 mkOp1(OP_BFIND
, TYPE_U32
, newDefs
[0], tmp
)->subOp
= NV50_IR_SUBOP_BFIND_SAMT
;
2942 // boolean conversions
2943 case nir_op_b2f32
: {
2945 LValues
&newDefs
= convert(&insn
->dest
);
2946 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1.0f
));
2949 case nir_op_b2f64
: {
2951 LValues
&newDefs
= convert(&insn
->dest
);
2952 Value
*tmp
= getSSA(4);
2953 mkOp2(OP_AND
, TYPE_U32
, tmp
, getSrc(&insn
->src
[0]), loadImm(NULL
, 0x3ff00000));
2954 mkOp2(OP_MERGE
, TYPE_U64
, newDefs
[0], loadImm(NULL
, 0), tmp
);
2958 case nir_op_i2b32
: {
2960 LValues
&newDefs
= convert(&insn
->dest
);
2962 if (typeSizeof(sTypes
[0]) == 8) {
2963 src1
= loadImm(getSSA(8), 0.0);
2967 CondCode cc
= op
== nir_op_f2b32
? CC_NEU
: CC_NE
;
2968 mkCmp(OP_SET
, cc
, TYPE_U32
, newDefs
[0], sTypes
[0], getSrc(&insn
->src
[0]), src1
);
2971 case nir_op_b2i32
: {
2973 LValues
&newDefs
= convert(&insn
->dest
);
2974 mkOp2(OP_AND
, TYPE_U32
, newDefs
[0], getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2977 case nir_op_b2i64
: {
2979 LValues
&newDefs
= convert(&insn
->dest
);
2980 LValue
*def
= getScratch();
2981 mkOp2(OP_AND
, TYPE_U32
, def
, getSrc(&insn
->src
[0]), loadImm(NULL
, 1));
2982 mkOp2(OP_MERGE
, TYPE_S64
, newDefs
[0], def
, loadImm(NULL
, 0));
2986 ERROR("unknown nir_op %s\n", info
.name
);
2991 oldPos
= this->bb
->getEntry();
2992 oldPos
->precise
= insn
->exact
;
2995 if (unlikely(!oldPos
))
2998 while (oldPos
->next
) {
2999 oldPos
= oldPos
->next
;
3000 oldPos
->precise
= insn
->exact
;
3002 oldPos
->saturate
= insn
->dest
.saturate
;
3006 #undef DEFAULT_CHECKS
3009 Converter::visit(nir_ssa_undef_instr
*insn
)
3011 LValues
&newDefs
= convert(&insn
->def
);
3012 for (uint8_t i
= 0u; i
< insn
->def
.num_components
; ++i
) {
3013 mkOp(OP_NOP
, TYPE_NONE
, newDefs
[i
]);
3018 #define CASE_SAMPLER(ty) \
3019 case GLSL_SAMPLER_DIM_ ## ty : \
3020 if (isArray && !isShadow) \
3021 return TEX_TARGET_ ## ty ## _ARRAY; \
3022 else if (!isArray && isShadow) \
3023 return TEX_TARGET_## ty ## _SHADOW; \
3024 else if (isArray && isShadow) \
3025 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
3027 return TEX_TARGET_ ## ty
3030 Converter::convert(glsl_sampler_dim dim
, bool isArray
, bool isShadow
)
3036 case GLSL_SAMPLER_DIM_3D
:
3037 return TEX_TARGET_3D
;
3038 case GLSL_SAMPLER_DIM_MS
:
3040 return TEX_TARGET_2D_MS_ARRAY
;
3041 return TEX_TARGET_2D_MS
;
3042 case GLSL_SAMPLER_DIM_RECT
:
3044 return TEX_TARGET_RECT_SHADOW
;
3045 return TEX_TARGET_RECT
;
3046 case GLSL_SAMPLER_DIM_BUF
:
3047 return TEX_TARGET_BUFFER
;
3048 case GLSL_SAMPLER_DIM_EXTERNAL
:
3049 return TEX_TARGET_2D
;
3051 ERROR("unknown glsl_sampler_dim %u\n", dim
);
3053 return TEX_TARGET_COUNT
;
3059 Converter::applyProjection(Value
*src
, Value
*proj
)
3063 return mkOp2v(OP_MUL
, TYPE_F32
, getScratch(), src
, proj
);
3067 Converter::getNIRArgCount(TexInstruction::Target
& target
)
3069 unsigned int result
= target
.getArgCount();
3070 if (target
.isCube() && target
.isArray())
3078 Converter::handleDeref(nir_deref_instr
*deref
, Value
* &indirect
, const nir_variable
* &tex
)
3080 typedef std::pair
<uint32_t,Value
*> DerefPair
;
3081 std::list
<DerefPair
> derefs
;
3083 uint16_t result
= 0;
3084 while (deref
->deref_type
!= nir_deref_type_var
) {
3085 switch (deref
->deref_type
) {
3086 case nir_deref_type_array
: {
3088 uint8_t size
= type_size(deref
->type
, true);
3089 result
+= size
* getIndirect(&deref
->arr
.index
, 0, indirect
);
3092 derefs
.push_front(std::make_pair(size
, indirect
));
3097 case nir_deref_type_struct
: {
3098 result
+= nir_deref_instr_parent(deref
)->type
->struct_location_offset(deref
->strct
.index
);
3101 case nir_deref_type_var
:
3103 unreachable("nir_deref_type_var reached in handleDeref!");
3106 deref
= nir_deref_instr_parent(deref
);
3110 for (std::list
<DerefPair
>::const_iterator it
= derefs
.begin(); it
!= derefs
.end(); ++it
) {
3111 Value
*offset
= mkOp2v(OP_MUL
, TYPE_U32
, getSSA(), loadImm(getSSA(), it
->first
), it
->second
);
3113 indirect
= mkOp2v(OP_ADD
, TYPE_U32
, getSSA(), indirect
, offset
);
3118 tex
= nir_deref_instr_get_variable(deref
);
3121 return result
+ tex
->data
.driver_location
;
3125 Converter::convert(enum gl_access_qualifier access
)
3128 case ACCESS_VOLATILE
:
3130 case ACCESS_COHERENT
:
3138 Converter::getCacheModeFromVar(const nir_variable
*var
)
3140 return convert(var
->data
.access
);
3144 Converter::visit(nir_tex_instr
*insn
)
3148 case nir_texop_query_levels
:
3150 case nir_texop_texture_samples
:
3155 case nir_texop_txf_ms
:
3157 case nir_texop_txs
: {
3158 LValues
&newDefs
= convert(&insn
->dest
);
3159 std::vector
<Value
*> srcs
;
3160 std::vector
<Value
*> defs
;
3161 std::vector
<nir_src
*> offsets
;
3165 TexInstruction::Target target
= convert(insn
->sampler_dim
, insn
->is_array
, insn
->is_shadow
);
3166 operation op
= getOperation(insn
->op
);
3169 int biasIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_bias
);
3170 int compIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_comparator
);
3171 int coordsIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_coord
);
3172 int ddxIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddx
);
3173 int ddyIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ddy
);
3174 int msIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_ms_index
);
3175 int lodIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_lod
);
3176 int offsetIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_offset
);
3177 int projIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_projector
);
3178 int sampOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_offset
);
3179 int texOffIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_offset
);
3180 int sampHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_sampler_handle
);
3181 int texHandleIdx
= nir_tex_instr_src_index(insn
, nir_tex_src_texture_handle
);
3183 bool bindless
= sampHandleIdx
!= -1 || texHandleIdx
!= -1;
3184 assert((sampHandleIdx
!= -1) == (texHandleIdx
!= -1));
3187 proj
= mkOp1v(OP_RCP
, TYPE_F32
, getScratch(), getSrc(&insn
->src
[projIdx
].src
, 0));
3189 srcs
.resize(insn
->coord_components
);
3190 for (uint8_t i
= 0u; i
< insn
->coord_components
; ++i
)
3191 srcs
[i
] = applyProjection(getSrc(&insn
->src
[coordsIdx
].src
, i
), proj
);
3193 // sometimes we get less args than target.getArgCount, but codegen expects the latter
3194 if (insn
->coord_components
) {
3195 uint32_t argCount
= target
.getArgCount();
3200 for (uint32_t i
= 0u; i
< (argCount
- insn
->coord_components
); ++i
)
3201 srcs
.push_back(getSSA());
3204 if (insn
->op
== nir_texop_texture_samples
)
3205 srcs
.push_back(zero
);
3206 else if (!insn
->num_srcs
)
3207 srcs
.push_back(loadImm(NULL
, 0));
3209 srcs
.push_back(getSrc(&insn
->src
[biasIdx
].src
, 0));
3211 srcs
.push_back(getSrc(&insn
->src
[lodIdx
].src
, 0));
3212 else if (op
== OP_TXF
)
3215 srcs
.push_back(getSrc(&insn
->src
[msIdx
].src
, 0));
3216 if (offsetIdx
!= -1)
3217 offsets
.push_back(&insn
->src
[offsetIdx
].src
);
3219 srcs
.push_back(applyProjection(getSrc(&insn
->src
[compIdx
].src
, 0), proj
));
3220 if (texOffIdx
!= -1) {
3221 srcs
.push_back(getSrc(&insn
->src
[texOffIdx
].src
, 0));
3222 texOffIdx
= srcs
.size() - 1;
3224 if (sampOffIdx
!= -1) {
3225 srcs
.push_back(getSrc(&insn
->src
[sampOffIdx
].src
, 0));
3226 sampOffIdx
= srcs
.size() - 1;
3229 // currently we use the lower bits
3231 Value
*handle
= getSrc(&insn
->src
[sampHandleIdx
].src
, 0);
3233 mkSplit(split
, 4, handle
);
3235 srcs
.push_back(split
[0]);
3236 texOffIdx
= srcs
.size() - 1;
3239 r
= bindless
? 0xff : insn
->texture_index
;
3240 s
= bindless
? 0x1f : insn
->sampler_index
;
3242 defs
.resize(newDefs
.size());
3243 for (uint8_t d
= 0u; d
< newDefs
.size(); ++d
) {
3244 defs
[d
] = newDefs
[d
];
3247 if (target
.isMS() || (op
== OP_TEX
&& prog
->getType() != Program::TYPE_FRAGMENT
))
3250 TexInstruction
*texi
= mkTex(op
, target
.getEnum(), r
, s
, defs
, srcs
);
3251 texi
->tex
.levelZero
= lz
;
3252 texi
->tex
.mask
= mask
;
3253 texi
->tex
.bindless
= bindless
;
3255 if (texOffIdx
!= -1)
3256 texi
->tex
.rIndirectSrc
= texOffIdx
;
3257 if (sampOffIdx
!= -1)
3258 texi
->tex
.sIndirectSrc
= sampOffIdx
;
3262 if (!target
.isShadow())
3263 texi
->tex
.gatherComp
= insn
->component
;
3266 texi
->tex
.query
= TXQ_DIMS
;
3268 case nir_texop_texture_samples
:
3269 texi
->tex
.mask
= 0x4;
3270 texi
->tex
.query
= TXQ_TYPE
;
3272 case nir_texop_query_levels
:
3273 texi
->tex
.mask
= 0x8;
3274 texi
->tex
.query
= TXQ_DIMS
;
3280 texi
->tex
.useOffsets
= offsets
.size();
3281 if (texi
->tex
.useOffsets
) {
3282 for (uint8_t s
= 0; s
< texi
->tex
.useOffsets
; ++s
) {
3283 for (uint32_t c
= 0u; c
< 3; ++c
) {
3284 uint8_t s2
= std::min(c
, target
.getDim() - 1);
3285 texi
->offset
[s
][c
].set(getSrc(offsets
[s
], s2
));
3286 texi
->offset
[s
][c
].setInsn(texi
);
3291 if (op
== OP_TXG
&& offsetIdx
== -1) {
3292 if (nir_tex_instr_has_explicit_tg4_offsets(insn
)) {
3293 texi
->tex
.useOffsets
= 4;
3294 setPosition(texi
, false);
3295 for (uint8_t i
= 0; i
< 4; ++i
) {
3296 for (uint8_t j
= 0; j
< 2; ++j
) {
3297 texi
->offset
[i
][j
].set(loadImm(NULL
, insn
->tg4_offsets
[i
][j
]));
3298 texi
->offset
[i
][j
].setInsn(texi
);
3301 setPosition(texi
, true);
3305 if (ddxIdx
!= -1 && ddyIdx
!= -1) {
3306 for (uint8_t c
= 0u; c
< target
.getDim() + target
.isCube(); ++c
) {
3307 texi
->dPdx
[c
].set(getSrc(&insn
->src
[ddxIdx
].src
, c
));
3308 texi
->dPdy
[c
].set(getSrc(&insn
->src
[ddyIdx
].src
, c
));
3315 ERROR("unknown nir_texop %u\n", insn
->op
);
3322 Converter::visit(nir_deref_instr
*deref
)
3324 // we just ignore those, because images intrinsics are the only place where
3325 // we should end up with deref sources and those have to backtrack anyway
3326 // to get the nir_variable. This code just exists to handle some special
3328 switch (deref
->deref_type
) {
3329 case nir_deref_type_array
:
3330 case nir_deref_type_struct
:
3331 case nir_deref_type_var
:
3334 ERROR("unknown nir_deref_instr %u\n", deref
->deref_type
);
3345 if (prog
->dbgFlags
& NV50_IR_DEBUG_VERBOSE
)
3346 nir_print_shader(nir
, stderr
);
3348 struct nir_lower_subgroups_options subgroup_options
= {
3349 .subgroup_size
= 32,
3350 .ballot_bit_size
= 32,
3353 NIR_PASS_V(nir
, nir_lower_io
, nir_var_all
, type_size
, (nir_lower_io_options
)0);
3354 NIR_PASS_V(nir
, nir_lower_subgroups
, &subgroup_options
);
3355 NIR_PASS_V(nir
, nir_lower_regs_to_ssa
);
3356 NIR_PASS_V(nir
, nir_lower_load_const_to_scalar
);
3357 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
3358 NIR_PASS_V(nir
, nir_lower_alu_to_scalar
, NULL
, NULL
);
3359 NIR_PASS_V(nir
, nir_lower_phis_to_scalar
);
3363 NIR_PASS(progress
, nir
, nir_copy_prop
);
3364 NIR_PASS(progress
, nir
, nir_opt_remove_phis
);
3365 NIR_PASS(progress
, nir
, nir_opt_trivial_continues
);
3366 NIR_PASS(progress
, nir
, nir_opt_cse
);
3367 NIR_PASS(progress
, nir
, nir_opt_algebraic
);
3368 NIR_PASS(progress
, nir
, nir_opt_constant_folding
);
3369 NIR_PASS(progress
, nir
, nir_copy_prop
);
3370 NIR_PASS(progress
, nir
, nir_opt_dce
);
3371 NIR_PASS(progress
, nir
, nir_opt_dead_cf
);
3374 NIR_PASS_V(nir
, nir_lower_bool_to_int32
);
3375 NIR_PASS_V(nir
, nir_lower_locals_to_regs
);
3376 NIR_PASS_V(nir
, nir_remove_dead_variables
, nir_var_function_temp
);
3377 NIR_PASS_V(nir
, nir_convert_from_ssa
, true);
3379 // Garbage collect dead instructions
3383 ERROR("Couldn't prase NIR!\n");
3387 if (!assignSlots()) {
3388 ERROR("Couldn't assign slots!\n");
3392 if (prog
->dbgFlags
& NV50_IR_DEBUG_BASIC
)
3393 nir_print_shader(nir
, stderr
);
3395 nir_foreach_function(function
, nir
) {
3396 if (!visit(function
))
3403 } // unnamed namespace
3408 Program::makeFromNIR(struct nv50_ir_prog_info
*info
)
3410 nir_shader
*nir
= (nir_shader
*)info
->bin
.source
;
3411 Converter
converter(this, nir
, info
);
3412 bool result
= converter
.run();
3415 LoweringHelper lowering
;
3417 tlsSize
= info
->bin
.tlsSpace
;
3421 } // namespace nv50_ir