2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
55 LLVMValueRef main_function
;
56 LLVMBasicBlockRef continue_block
;
57 LLVMBasicBlockRef break_block
;
63 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
64 nir_deref_instr
*deref_instr
,
65 enum ac_descriptor_type desc_type
,
66 const nir_instr
*instr
,
67 bool image
, bool write
);
70 build_store_values_extended(struct ac_llvm_context
*ac
,
73 unsigned value_stride
,
76 LLVMBuilderRef builder
= ac
->builder
;
79 for (i
= 0; i
< value_count
; i
++) {
80 LLVMValueRef ptr
= values
[i
* value_stride
];
81 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
82 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
83 LLVMBuildStore(builder
, value
, ptr
);
87 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
88 const nir_ssa_def
*def
)
90 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
91 if (def
->num_components
> 1) {
92 type
= LLVMVectorType(type
, def
->num_components
);
97 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
100 return nir
->ssa_defs
[src
.ssa
->index
];
104 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
106 LLVMValueRef ptr
= get_src(ctx
, src
);
107 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
108 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
110 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
112 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
113 LLVMPointerType(type
, addr_space
), "");
116 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
117 const struct nir_block
*b
)
119 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
120 return (LLVMBasicBlockRef
)entry
->data
;
123 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
125 unsigned num_components
)
127 LLVMValueRef value
= get_src(ctx
, src
.src
);
128 bool need_swizzle
= false;
131 unsigned src_components
= ac_get_llvm_num_components(value
);
132 for (unsigned i
= 0; i
< num_components
; ++i
) {
133 assert(src
.swizzle
[i
] < src_components
);
134 if (src
.swizzle
[i
] != i
)
138 if (need_swizzle
|| num_components
!= src_components
) {
139 LLVMValueRef masks
[] = {
140 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
141 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
142 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
143 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
145 if (src_components
> 1 && num_components
== 1) {
146 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
148 } else if (src_components
== 1 && num_components
> 1) {
149 LLVMValueRef values
[] = {value
, value
, value
, value
};
150 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
152 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
153 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
162 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
163 LLVMIntPredicate pred
, LLVMValueRef src0
,
166 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
167 return LLVMBuildSelect(ctx
->builder
, result
,
168 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
172 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
173 LLVMRealPredicate pred
, LLVMValueRef src0
,
177 src0
= ac_to_float(ctx
, src0
);
178 src1
= ac_to_float(ctx
, src1
);
179 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
180 return LLVMBuildSelect(ctx
->builder
, result
,
181 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
185 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
187 LLVMTypeRef result_type
,
191 LLVMValueRef params
[] = {
192 ac_to_float(ctx
, src0
),
195 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
196 ac_get_elem_bits(ctx
, result_type
));
197 assert(length
< sizeof(name
));
198 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
201 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
203 LLVMTypeRef result_type
,
204 LLVMValueRef src0
, LLVMValueRef src1
)
207 LLVMValueRef params
[] = {
208 ac_to_float(ctx
, src0
),
209 ac_to_float(ctx
, src1
),
212 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
213 ac_get_elem_bits(ctx
, result_type
));
214 assert(length
< sizeof(name
));
215 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
218 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
220 LLVMTypeRef result_type
,
221 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
224 LLVMValueRef params
[] = {
225 ac_to_float(ctx
, src0
),
226 ac_to_float(ctx
, src1
),
227 ac_to_float(ctx
, src2
),
230 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
231 ac_get_elem_bits(ctx
, result_type
));
232 assert(length
< sizeof(name
));
233 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
236 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
237 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
239 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
240 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
242 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
244 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
245 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
246 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
247 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
248 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
249 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
252 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
254 return LLVMBuildSelect(ctx
->builder
, v
,
255 ac_to_integer_or_pointer(ctx
, src1
),
256 ac_to_integer_or_pointer(ctx
, src2
), "");
259 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
262 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
265 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
267 LLVMValueRef src0
, LLVMValueRef src1
)
269 LLVMTypeRef ret_type
;
270 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
272 LLVMValueRef params
[] = { src0
, src1
};
273 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
276 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
277 params
, 2, AC_FUNC_ATTR_READNONE
);
279 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
280 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
284 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
288 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
289 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
291 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
295 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
299 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
301 unreachable("Unsupported bit size.");
305 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
308 src0
= ac_to_float(ctx
, src0
);
309 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
310 return LLVMBuildSExt(ctx
->builder
,
311 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
315 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
319 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
323 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
325 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
329 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
331 unreachable("Unsupported bit size.");
335 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
338 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
339 return LLVMBuildSExt(ctx
->builder
,
340 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
344 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
348 LLVMValueRef cond
= NULL
;
350 src0
= ac_to_float(ctx
, src0
);
351 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
353 if (ctx
->chip_class
>= GFX8
) {
354 LLVMValueRef args
[2];
355 /* Check if the result is a denormal - and flush to 0 if so. */
357 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
358 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
361 /* need to convert back up to f32 */
362 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
364 if (ctx
->chip_class
>= GFX8
)
365 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
368 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
369 * so compare the result and flush to 0 if it's smaller.
371 LLVMValueRef temp
, cond2
;
372 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
373 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
374 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
376 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
377 temp
, ctx
->f32_0
, "");
378 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
379 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
384 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
385 LLVMValueRef src0
, LLVMValueRef src1
)
387 LLVMValueRef dst64
, result
;
388 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
389 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
391 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
392 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
393 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
397 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
398 LLVMValueRef src0
, LLVMValueRef src1
)
400 LLVMValueRef dst64
, result
;
401 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
402 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
404 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
405 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
406 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
410 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
411 LLVMValueRef bits
, LLVMValueRef offset
)
413 /* mask = ((1 << bits) - 1) << offset */
414 return LLVMBuildShl(ctx
->builder
,
415 LLVMBuildSub(ctx
->builder
,
416 LLVMBuildShl(ctx
->builder
,
423 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
424 LLVMValueRef mask
, LLVMValueRef insert
,
428 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
429 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
431 return LLVMBuildXor(ctx
->builder
, base
,
432 LLVMBuildAnd(ctx
->builder
, mask
,
433 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
436 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
438 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
439 LLVMValueRef args
[2]))
441 LLVMValueRef comp
[2];
443 src0
= ac_to_float(ctx
, src0
);
444 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
445 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
447 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
450 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
453 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
454 LLVMValueRef temps
[2], val
;
457 for (i
= 0; i
< 2; i
++) {
458 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
459 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
460 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
461 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
463 return ac_build_gather_values(ctx
, temps
, 2);
466 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
474 if (op
== nir_op_fddx_fine
)
475 mask
= AC_TID_MASK_LEFT
;
476 else if (op
== nir_op_fddy_fine
)
477 mask
= AC_TID_MASK_TOP
;
479 mask
= AC_TID_MASK_TOP_LEFT
;
481 /* for DDX we want to next X pixel, DDY next Y pixel. */
482 if (op
== nir_op_fddx_fine
||
483 op
== nir_op_fddx_coarse
||
489 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
493 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
495 LLVMValueRef src
[4], result
= NULL
;
496 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
497 unsigned src_components
;
498 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
500 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
507 case nir_op_pack_half_2x16
:
508 case nir_op_pack_snorm_2x16
:
509 case nir_op_pack_unorm_2x16
:
512 case nir_op_unpack_half_2x16
:
515 case nir_op_cube_face_coord
:
516 case nir_op_cube_face_index
:
520 src_components
= num_components
;
523 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
524 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
531 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
532 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
533 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
534 /* fneg will be optimized by backend compiler with sign
535 * bit removed via XOR. This is probably a LLVM bug.
537 result
= ac_build_canonicalize(&ctx
->ac
, result
,
538 instr
->dest
.dest
.ssa
.bit_size
);
542 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
545 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
548 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
551 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
552 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
553 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
556 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
557 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
558 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
561 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
564 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
567 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
570 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
573 /* lower_fmod only lower 16-bit and 32-bit fmod */
574 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
575 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
576 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
577 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
578 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
579 ac_to_float_type(&ctx
->ac
, def_type
), result
);
580 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
581 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
584 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
587 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
590 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
593 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
594 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
595 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
598 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
599 result
= ac_build_fdiv(&ctx
->ac
, LLVMConstReal(LLVMTypeOf(src
[0]), 1.0), src
[0]);
602 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
605 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
608 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
611 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
612 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
613 LLVMTypeOf(src
[0]), "");
614 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
615 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
616 LLVMTypeOf(src
[0]), "");
617 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
620 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
621 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
622 LLVMTypeOf(src
[0]), "");
623 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
624 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
625 LLVMTypeOf(src
[0]), "");
626 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
629 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
630 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
631 LLVMTypeOf(src
[0]), "");
632 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
633 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
634 LLVMTypeOf(src
[0]), "");
635 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
638 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
641 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
644 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
647 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
650 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
653 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
656 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
659 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
662 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
665 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
668 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
669 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
670 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
671 /* fabs will be optimized by backend compiler with sign
672 * bit removed via AND.
674 result
= ac_build_canonicalize(&ctx
->ac
, result
,
675 instr
->dest
.dest
.ssa
.bit_size
);
679 result
= emit_iabs(&ctx
->ac
, src
[0]);
682 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
685 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
688 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
691 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
694 result
= ac_build_isign(&ctx
->ac
, src
[0],
695 instr
->dest
.dest
.ssa
.bit_size
);
698 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
699 result
= ac_build_fsign(&ctx
->ac
, src
[0],
700 instr
->dest
.dest
.ssa
.bit_size
);
703 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
704 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
707 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
708 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
711 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
712 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
714 case nir_op_fround_even
:
715 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
716 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
719 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
720 result
= ac_build_fract(&ctx
->ac
, src
[0],
721 instr
->dest
.dest
.ssa
.bit_size
);
724 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
725 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
728 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
729 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
732 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
733 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
736 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
737 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
740 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
741 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
744 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
745 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
746 result
= ac_build_fdiv(&ctx
->ac
, LLVMConstReal(LLVMTypeOf(result
), 1.0), result
);
748 case nir_op_frexp_exp
:
749 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
750 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
751 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
752 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
753 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
756 case nir_op_frexp_sig
:
757 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
758 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
759 instr
->dest
.dest
.ssa
.bit_size
);
762 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
763 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
766 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
767 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
768 if (ctx
->ac
.chip_class
< GFX9
&&
769 instr
->dest
.dest
.ssa
.bit_size
== 32) {
770 /* Only pre-GFX9 chips do not flush denorms. */
771 result
= ac_build_canonicalize(&ctx
->ac
, result
,
772 instr
->dest
.dest
.ssa
.bit_size
);
776 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
777 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
778 if (ctx
->ac
.chip_class
< GFX9
&&
779 instr
->dest
.dest
.ssa
.bit_size
== 32) {
780 /* Only pre-GFX9 chips do not flush denorms. */
781 result
= ac_build_canonicalize(&ctx
->ac
, result
,
782 instr
->dest
.dest
.ssa
.bit_size
);
786 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
787 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
788 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
791 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
792 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
793 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
794 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
795 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
797 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
800 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
802 case nir_op_bitfield_select
:
803 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
806 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
809 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
811 case nir_op_bitfield_reverse
:
812 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
814 case nir_op_bit_count
:
815 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
820 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
821 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
822 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
828 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
829 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
835 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
836 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
841 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
846 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
848 case nir_op_f2f16_rtz
:
849 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
850 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
851 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
852 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
853 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
854 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
856 case nir_op_f2f16_rtne
:
860 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
861 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
862 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
864 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
870 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
871 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
873 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
879 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
880 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
882 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
885 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
887 case nir_op_find_lsb
:
888 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
890 case nir_op_ufind_msb
:
891 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
893 case nir_op_ifind_msb
:
894 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
896 case nir_op_uadd_carry
:
897 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
899 case nir_op_usub_borrow
:
900 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
905 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
908 result
= emit_f2b(&ctx
->ac
, src
[0]);
914 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
917 result
= emit_i2b(&ctx
->ac
, src
[0]);
919 case nir_op_fquantize2f16
:
920 result
= emit_f2f16(&ctx
->ac
, src
[0]);
922 case nir_op_umul_high
:
923 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
925 case nir_op_imul_high
:
926 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
928 case nir_op_pack_half_2x16
:
929 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
931 case nir_op_pack_snorm_2x16
:
932 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
934 case nir_op_pack_unorm_2x16
:
935 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
937 case nir_op_unpack_half_2x16
:
938 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
942 case nir_op_fddx_fine
:
943 case nir_op_fddy_fine
:
944 case nir_op_fddx_coarse
:
945 case nir_op_fddy_coarse
:
946 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
949 case nir_op_unpack_64_2x32_split_x
: {
950 assert(ac_get_llvm_num_components(src
[0]) == 1);
951 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
954 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
959 case nir_op_unpack_64_2x32_split_y
: {
960 assert(ac_get_llvm_num_components(src
[0]) == 1);
961 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
964 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
969 case nir_op_pack_64_2x32_split
: {
970 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
971 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
975 case nir_op_pack_32_2x16_split
: {
976 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
977 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
981 case nir_op_unpack_32_2x16_split_x
: {
982 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
985 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
990 case nir_op_unpack_32_2x16_split_y
: {
991 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
994 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
999 case nir_op_cube_face_coord
: {
1000 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1001 LLVMValueRef results
[2];
1003 for (unsigned chan
= 0; chan
< 3; chan
++)
1004 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1005 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1006 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1007 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1008 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1009 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1010 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1011 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1012 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1013 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1014 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1015 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1016 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1020 case nir_op_cube_face_index
: {
1021 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1023 for (unsigned chan
= 0; chan
< 3; chan
++)
1024 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1025 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1026 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1031 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1032 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1033 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1034 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1037 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1038 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1041 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1042 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1045 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1046 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1047 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1048 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1051 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1052 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1055 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1056 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1058 case nir_op_fmed3
: {
1059 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1060 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1061 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1062 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1063 instr
->dest
.dest
.ssa
.bit_size
);
1066 case nir_op_imed3
: {
1067 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1068 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1069 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1070 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1073 case nir_op_umed3
: {
1074 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1075 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1076 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1077 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1082 fprintf(stderr
, "Unknown NIR alu instr: ");
1083 nir_print_instr(&instr
->instr
, stderr
);
1084 fprintf(stderr
, "\n");
1089 assert(instr
->dest
.dest
.is_ssa
);
1090 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1091 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1095 static void visit_load_const(struct ac_nir_context
*ctx
,
1096 const nir_load_const_instr
*instr
)
1098 LLVMValueRef values
[4], value
= NULL
;
1099 LLVMTypeRef element_type
=
1100 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1102 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1103 switch (instr
->def
.bit_size
) {
1105 values
[i
] = LLVMConstInt(element_type
,
1106 instr
->value
[i
].u8
, false);
1109 values
[i
] = LLVMConstInt(element_type
,
1110 instr
->value
[i
].u16
, false);
1113 values
[i
] = LLVMConstInt(element_type
,
1114 instr
->value
[i
].u32
, false);
1117 values
[i
] = LLVMConstInt(element_type
,
1118 instr
->value
[i
].u64
, false);
1122 "unsupported nir load_const bit_size: %d\n",
1123 instr
->def
.bit_size
);
1127 if (instr
->def
.num_components
> 1) {
1128 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1132 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1136 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1139 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1140 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1143 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1144 /* On GFX8, the descriptor contains the size in bytes,
1145 * but TXQ must return the size in elements.
1146 * The stride is always non-zero for resources using TXQ.
1148 LLVMValueRef stride
=
1149 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1151 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1152 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1153 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1154 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1156 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1161 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1162 * incorrectly forces nearest filtering if the texture format is integer.
1163 * The only effect it has on Gather4, which always returns 4 texels for
1164 * bilinear filtering, is that the final coordinates are off by 0.5 of
1167 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1168 * or (0.5 / size) from the normalized coordinates.
1170 * However, cube textures with 8_8_8_8 data formats require a different
1171 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1172 * precision in 32-bit data formats, so it needs to be applied dynamically at
1173 * runtime. In this case, return an i1 value that indicates whether the
1174 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1176 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1178 struct ac_image_args
*args
,
1179 const nir_tex_instr
*instr
)
1181 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1182 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1183 LLVMValueRef wa_8888
= NULL
;
1184 LLVMValueRef half_texel
[2];
1185 LLVMValueRef result
;
1187 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1189 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1190 LLVMValueRef formats
;
1191 LLVMValueRef data_format
;
1192 LLVMValueRef wa_formats
;
1194 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1196 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1197 LLVMConstInt(ctx
->i32
, 20, false), "");
1198 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1199 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1200 wa_8888
= LLVMBuildICmp(
1201 ctx
->builder
, LLVMIntEQ
, data_format
,
1202 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1205 uint32_t wa_num_format
=
1206 stype
== GLSL_TYPE_UINT
?
1207 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1208 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1209 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1210 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1212 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1213 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1215 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1216 args
->resource
= LLVMBuildInsertElement(
1217 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1220 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1222 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1224 struct ac_image_args resinfo
= {};
1225 LLVMBasicBlockRef bbs
[2];
1227 LLVMValueRef unnorm
= NULL
;
1228 LLVMValueRef default_offset
= ctx
->f32_0
;
1229 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1231 /* In vulkan, whether the sampler uses unnormalized
1232 * coordinates or not is a dynamic property of the
1233 * sampler. Hence, to figure out whether or not we
1234 * need to divide by the texture size, we need to test
1235 * the sampler at runtime. This tests the bit set by
1236 * radv_init_sampler().
1238 LLVMValueRef sampler0
=
1239 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1240 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1241 LLVMConstInt(ctx
->i32
, 15, false), "");
1242 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1243 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1244 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1247 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1248 if (wa_8888
|| unnorm
) {
1249 assert(!(wa_8888
&& unnorm
));
1250 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1251 /* Skip the texture size query entirely if we don't need it. */
1252 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1253 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1256 /* Query the texture size. */
1257 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1258 resinfo
.opcode
= ac_image_get_resinfo
;
1259 resinfo
.dmask
= 0xf;
1260 resinfo
.lod
= ctx
->i32_0
;
1261 resinfo
.resource
= args
->resource
;
1262 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1263 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1265 /* Compute -0.5 / size. */
1266 for (unsigned c
= 0; c
< 2; c
++) {
1268 LLVMBuildExtractElement(ctx
->builder
, size
,
1269 LLVMConstInt(ctx
->i32
, c
, 0), "");
1270 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1271 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1272 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1273 LLVMConstReal(ctx
->f32
, -0.5), "");
1276 if (wa_8888
|| unnorm
) {
1277 ac_build_endif(ctx
, 2000);
1279 for (unsigned c
= 0; c
< 2; c
++) {
1280 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1281 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1287 for (unsigned c
= 0; c
< 2; c
++) {
1289 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1290 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1293 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1294 result
= ac_build_image_opcode(ctx
, args
);
1296 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1297 LLVMValueRef tmp
, tmp2
;
1299 /* if the cube workaround is in place, f2i the result. */
1300 for (unsigned c
= 0; c
< 4; c
++) {
1301 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1302 if (stype
== GLSL_TYPE_UINT
)
1303 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1305 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1306 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1307 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1308 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1309 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1310 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1316 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1318 nir_deref_instr
*texture_deref_instr
= NULL
;
1320 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1321 switch (instr
->src
[i
].src_type
) {
1322 case nir_tex_src_texture_deref
:
1323 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1329 return texture_deref_instr
;
1332 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1333 const nir_tex_instr
*instr
,
1334 struct ac_image_args
*args
)
1336 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1337 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1339 return ac_build_buffer_load_format(&ctx
->ac
,
1343 util_last_bit(mask
),
1347 args
->opcode
= ac_image_sample
;
1349 switch (instr
->op
) {
1351 case nir_texop_txf_ms
:
1352 case nir_texop_samples_identical
:
1353 args
->opcode
= args
->level_zero
||
1354 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1355 ac_image_load
: ac_image_load_mip
;
1356 args
->level_zero
= false;
1359 case nir_texop_query_levels
:
1360 args
->opcode
= ac_image_get_resinfo
;
1362 args
->lod
= ctx
->ac
.i32_0
;
1363 args
->level_zero
= false;
1366 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1368 args
->level_zero
= true;
1372 args
->opcode
= ac_image_gather4
;
1373 args
->level_zero
= true;
1376 args
->opcode
= ac_image_get_lod
;
1378 case nir_texop_fragment_fetch
:
1379 case nir_texop_fragment_mask_fetch
:
1380 args
->opcode
= ac_image_load
;
1381 args
->level_zero
= false;
1387 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1388 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1389 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1390 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1391 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1392 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1393 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1397 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1398 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1399 if ((args
->dim
== ac_image_2darray
||
1400 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1401 args
->coords
[1] = ctx
->ac
.i32_0
;
1405 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1406 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1407 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1408 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1409 /* Prevent texture instructions with implicit derivatives from being
1410 * sinked into branches. */
1411 switch (instr
->op
) {
1415 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1422 return ac_build_image_opcode(&ctx
->ac
, args
);
1425 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1426 nir_intrinsic_instr
*instr
)
1428 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1429 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1431 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1432 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1436 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1437 nir_intrinsic_instr
*instr
)
1439 LLVMValueRef ptr
, addr
;
1440 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1441 unsigned index
= nir_intrinsic_base(instr
);
1443 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1444 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1446 /* Load constant values from user SGPRS when possible, otherwise
1447 * fallback to the default path that loads directly from memory.
1449 if (LLVMIsConstant(src0
) &&
1450 instr
->dest
.ssa
.bit_size
== 32) {
1451 unsigned count
= instr
->dest
.ssa
.num_components
;
1452 unsigned offset
= index
;
1454 offset
+= LLVMConstIntGetZExtValue(src0
);
1457 offset
-= ctx
->args
->base_inline_push_consts
;
1459 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1460 if (offset
+ count
<= num_inline_push_consts
) {
1461 LLVMValueRef push_constants
[num_inline_push_consts
];
1462 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1463 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1464 ctx
->args
->inline_push_consts
[i
]);
1465 return ac_build_gather_values(&ctx
->ac
,
1466 push_constants
+ offset
,
1471 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1472 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1474 if (instr
->dest
.ssa
.bit_size
== 8) {
1475 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1476 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), 4 * load_dwords
);
1477 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1478 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1480 LLVMValueRef params
[3];
1481 if (load_dwords
> 1) {
1482 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i32
, 2), "");
1483 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1484 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1486 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1487 params
[0] = ctx
->ac
.i32_0
;
1491 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1493 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1494 if (instr
->dest
.ssa
.num_components
> 1)
1495 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), instr
->dest
.ssa
.num_components
), "");
1497 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1498 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1499 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt16TypeInContext(ctx
->ac
.context
), 2 * load_dwords
);
1500 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1501 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1502 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1503 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1504 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1505 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1506 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1507 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1508 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1509 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1510 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1511 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1512 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1513 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1516 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1518 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1521 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1522 const nir_intrinsic_instr
*instr
)
1524 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1526 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1529 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1531 uint32_t new_mask
= 0;
1532 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1533 if (mask
& (1u << i
))
1534 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1538 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1539 unsigned start
, unsigned count
)
1541 LLVMValueRef mask
[] = {
1542 ctx
->i32_0
, ctx
->i32_1
,
1543 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1545 unsigned src_elements
= ac_get_llvm_num_components(src
);
1547 if (count
== src_elements
) {
1550 } else if (count
== 1) {
1551 assert(start
< src_elements
);
1552 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1554 assert(start
+ count
<= src_elements
);
1556 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1557 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1561 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1562 enum gl_access_qualifier access
,
1563 bool may_store_unaligned
,
1564 bool writeonly_memory
)
1566 unsigned cache_policy
= 0;
1568 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1569 * store opcodes not aligned to a dword are affected. The only way to
1570 * get unaligned stores is through shader images.
1572 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1573 /* If this is write-only, don't keep data in L1 to prevent
1574 * evicting L1 cache lines that may be needed by other
1578 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1579 cache_policy
|= ac_glc
;
1582 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1583 cache_policy
|= ac_slc
;
1585 return cache_policy
;
1588 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1589 nir_intrinsic_instr
*instr
)
1591 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1592 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1593 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1594 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1595 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1596 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1598 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
,
1599 get_src(ctx
, instr
->src
[1]), true);
1600 LLVMValueRef base_data
= src_data
;
1601 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1602 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1606 LLVMValueRef data
, offset
;
1607 LLVMTypeRef data_type
;
1609 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1611 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1612 * writes into a 2-element and a 1-element write. */
1614 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1615 writemask
|= 1 << (start
+ 2);
1618 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1620 /* we can only store 4 DWords at the same time.
1621 * can only happen for 64 Bit vectors. */
1622 if (num_bytes
> 16) {
1623 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1628 /* check alignment of 16 Bit stores */
1629 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1630 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1634 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1636 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1637 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1639 if (num_bytes
== 1) {
1640 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1641 offset
, ctx
->ac
.i32_0
,
1643 } else if (num_bytes
== 2) {
1644 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1645 offset
, ctx
->ac
.i32_0
,
1648 int num_channels
= num_bytes
/ 4;
1650 switch (num_bytes
) {
1651 case 16: /* v4f32 */
1652 data_type
= ctx
->ac
.v4f32
;
1654 case 12: /* v3f32 */
1655 data_type
= ctx
->ac
.v3f32
;
1658 data_type
= ctx
->ac
.v2f32
;
1661 data_type
= ctx
->ac
.f32
;
1664 unreachable("Malformed vector store.");
1666 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1668 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1669 num_channels
, offset
,
1676 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1677 LLVMValueRef descriptor
,
1678 LLVMValueRef offset
,
1679 LLVMValueRef compare
,
1680 LLVMValueRef exchange
)
1682 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1683 if (ctx
->abi
->robust_buffer_access
) {
1684 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1686 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1687 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1689 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1691 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1694 LLVMValueRef ptr_parts
[2] = {
1695 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1696 LLVMBuildAnd(ctx
->ac
.builder
,
1697 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1698 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1701 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1702 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1704 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1706 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1707 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1708 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1709 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1711 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1712 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1714 if (ctx
->abi
->robust_buffer_access
) {
1715 ac_build_endif(&ctx
->ac
, -1);
1717 LLVMBasicBlockRef incoming_blocks
[2] = {
1722 LLVMValueRef incoming_values
[2] = {
1723 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1726 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1727 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1734 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1735 const nir_intrinsic_instr
*instr
)
1737 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1739 char name
[64], type
[8];
1740 LLVMValueRef params
[6], descriptor
;
1743 switch (instr
->intrinsic
) {
1744 case nir_intrinsic_ssbo_atomic_add
:
1747 case nir_intrinsic_ssbo_atomic_imin
:
1750 case nir_intrinsic_ssbo_atomic_umin
:
1753 case nir_intrinsic_ssbo_atomic_imax
:
1756 case nir_intrinsic_ssbo_atomic_umax
:
1759 case nir_intrinsic_ssbo_atomic_and
:
1762 case nir_intrinsic_ssbo_atomic_or
:
1765 case nir_intrinsic_ssbo_atomic_xor
:
1768 case nir_intrinsic_ssbo_atomic_exchange
:
1771 case nir_intrinsic_ssbo_atomic_comp_swap
:
1778 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1779 get_src(ctx
, instr
->src
[0]),
1782 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1783 return_type
== ctx
->ac
.i64
) {
1784 return emit_ssbo_comp_swap_64(ctx
, descriptor
,
1785 get_src(ctx
, instr
->src
[1]),
1786 get_src(ctx
, instr
->src
[2]),
1787 get_src(ctx
, instr
->src
[3]));
1789 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1790 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1792 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1793 params
[arg_count
++] = descriptor
;
1795 if (LLVM_VERSION_MAJOR
>= 9) {
1796 /* XXX: The new raw/struct atomic intrinsics are buggy with
1797 * LLVM 8, see r358579.
1799 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1800 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1801 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1803 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1804 snprintf(name
, sizeof(name
),
1805 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1807 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1808 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1809 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1811 assert(return_type
== ctx
->ac
.i32
);
1812 snprintf(name
, sizeof(name
),
1813 "llvm.amdgcn.buffer.atomic.%s", op
);
1816 return ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1820 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1821 const nir_intrinsic_instr
*instr
)
1823 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1824 int num_components
= instr
->num_components
;
1825 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1826 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1828 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1829 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
,
1830 get_src(ctx
, instr
->src
[0]), false);
1831 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1833 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1834 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1836 LLVMValueRef results
[4];
1837 for (int i
= 0; i
< num_components
;) {
1838 int num_elems
= num_components
- i
;
1839 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1841 if (num_elems
* elem_size_bytes
> 16)
1842 num_elems
= 16 / elem_size_bytes
;
1843 int load_bytes
= num_elems
* elem_size_bytes
;
1845 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
1849 if (load_bytes
== 1) {
1850 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
1856 } else if (load_bytes
== 2) {
1857 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
1864 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
1865 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
1867 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
1868 vindex
, offset
, immoffset
, 0,
1869 cache_policy
, can_speculate
, false);
1872 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
1873 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
1874 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
1876 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
1877 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
1879 for (unsigned j
= 0; j
< num_elems
; j
++) {
1880 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
1885 return ac_build_gather_values(&ctx
->ac
, results
, num_components
);
1888 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
1889 const nir_intrinsic_instr
*instr
)
1892 LLVMValueRef rsrc
= get_src(ctx
, instr
->src
[0]);
1893 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1894 int num_components
= instr
->num_components
;
1896 if (ctx
->abi
->load_ubo
)
1897 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
1899 if (instr
->dest
.ssa
.bit_size
== 64)
1900 num_components
*= 2;
1902 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
1903 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1904 LLVMValueRef results
[num_components
];
1905 for (unsigned i
= 0; i
< num_components
; ++i
) {
1906 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
1909 if (load_bytes
== 1) {
1910 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
1917 assert(load_bytes
== 2);
1918 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
1926 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
1928 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
1929 NULL
, 0, 0, true, true);
1931 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
1934 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
1935 get_def_type(ctx
, &instr
->dest
.ssa
), "");
1939 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
1940 bool vs_in
, unsigned *vertex_index_out
,
1941 LLVMValueRef
*vertex_index_ref
,
1942 unsigned *const_out
, LLVMValueRef
*indir_out
)
1944 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
1945 nir_deref_path path
;
1946 unsigned idx_lvl
= 1;
1948 nir_deref_path_init(&path
, instr
, NULL
);
1950 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
1951 if (vertex_index_ref
) {
1952 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
1953 if (vertex_index_out
)
1954 *vertex_index_out
= 0;
1956 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
1961 uint32_t const_offset
= 0;
1962 LLVMValueRef offset
= NULL
;
1964 if (var
->data
.compact
) {
1965 assert(instr
->deref_type
== nir_deref_type_array
);
1966 const_offset
= nir_src_as_uint(instr
->arr
.index
);
1970 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
1971 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
1972 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
1973 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
1975 for (unsigned i
= 0; i
< index
; i
++) {
1976 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
1977 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
1979 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
1980 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
1981 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
1982 const_offset
+= size
*
1983 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
1985 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
1986 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
1988 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
1993 unreachable("Uhandled deref type in get_deref_instr_offset");
1997 nir_deref_path_finish(&path
);
1999 if (const_offset
&& offset
)
2000 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2001 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2004 *const_out
= const_offset
;
2005 *indir_out
= offset
;
2008 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2009 nir_intrinsic_instr
*instr
,
2012 LLVMValueRef result
;
2013 LLVMValueRef vertex_index
= NULL
;
2014 LLVMValueRef indir_index
= NULL
;
2015 unsigned const_index
= 0;
2017 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2019 unsigned location
= var
->data
.location
;
2020 unsigned driver_location
= var
->data
.driver_location
;
2021 const bool is_patch
= var
->data
.patch
||
2022 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2023 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2024 const bool is_compact
= var
->data
.compact
;
2026 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2027 false, NULL
, is_patch
? NULL
: &vertex_index
,
2028 &const_index
, &indir_index
);
2030 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2032 LLVMTypeRef src_component_type
;
2033 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2034 src_component_type
= LLVMGetElementType(dest_type
);
2036 src_component_type
= dest_type
;
2038 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2039 vertex_index
, indir_index
,
2040 const_index
, location
, driver_location
,
2041 var
->data
.location_frac
,
2042 instr
->num_components
,
2043 is_patch
, is_compact
, load_inputs
);
2044 if (instr
->dest
.ssa
.bit_size
== 16) {
2045 result
= ac_to_integer(&ctx
->ac
, result
);
2046 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2048 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2052 type_scalar_size_bytes(const struct glsl_type
*type
)
2054 assert(glsl_type_is_vector_or_scalar(type
) ||
2055 glsl_type_is_matrix(type
));
2056 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2059 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2060 nir_intrinsic_instr
*instr
)
2062 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2063 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2065 LLVMValueRef values
[8];
2067 int ve
= instr
->dest
.ssa
.num_components
;
2069 LLVMValueRef indir_index
;
2071 unsigned const_index
;
2072 unsigned stride
= 4;
2073 int mode
= deref
->mode
;
2076 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2077 var
->data
.mode
== nir_var_shader_in
;
2078 idx
= var
->data
.driver_location
;
2079 comp
= var
->data
.location_frac
;
2080 mode
= var
->data
.mode
;
2082 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2083 &const_index
, &indir_index
);
2085 if (var
->data
.compact
) {
2087 const_index
+= comp
;
2092 if (instr
->dest
.ssa
.bit_size
== 64 &&
2093 (deref
->mode
== nir_var_shader_in
||
2094 deref
->mode
== nir_var_shader_out
||
2095 deref
->mode
== nir_var_function_temp
))
2099 case nir_var_shader_in
:
2100 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2101 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2102 return load_tess_varyings(ctx
, instr
, true);
2105 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2106 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2107 LLVMValueRef indir_index
;
2108 unsigned const_index
, vertex_index
;
2109 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2110 &const_index
, &indir_index
);
2111 assert(indir_index
== NULL
);
2113 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2114 var
->data
.driver_location
,
2115 var
->data
.location_frac
,
2116 instr
->num_components
, vertex_index
, const_index
, type
);
2119 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2121 unsigned count
= glsl_count_attribute_slots(
2123 ctx
->stage
== MESA_SHADER_VERTEX
);
2125 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2126 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2127 stride
, false, true);
2129 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2133 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2136 case nir_var_function_temp
:
2137 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2139 unsigned count
= glsl_count_attribute_slots(
2142 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2143 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2144 stride
, true, true);
2146 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2150 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2154 case nir_var_shader_out
:
2155 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2156 return load_tess_varyings(ctx
, instr
, false);
2159 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2160 var
->data
.fb_fetch_output
&&
2161 ctx
->abi
->emit_fbfetch
)
2162 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2164 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2166 unsigned count
= glsl_count_attribute_slots(
2169 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2170 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2171 stride
, true, true);
2173 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2177 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2178 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2183 case nir_var_mem_global
: {
2184 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2185 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2186 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2187 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2189 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2190 if (stride
!= natural_stride
) {
2191 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(result_type
),
2192 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2193 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2195 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2196 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2197 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2198 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2200 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2202 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2203 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2204 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2205 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2210 unreachable("unhandle variable mode");
2212 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2213 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2217 visit_store_var(struct ac_nir_context
*ctx
,
2218 nir_intrinsic_instr
*instr
)
2220 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2221 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2223 LLVMValueRef temp_ptr
, value
;
2226 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2227 int writemask
= instr
->const_index
[0];
2228 LLVMValueRef indir_index
;
2229 unsigned const_index
;
2232 get_deref_offset(ctx
, deref
, false,
2233 NULL
, NULL
, &const_index
, &indir_index
);
2234 idx
= var
->data
.driver_location
;
2235 comp
= var
->data
.location_frac
;
2237 if (var
->data
.compact
) {
2238 const_index
+= comp
;
2243 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2244 (deref
->mode
== nir_var_shader_out
||
2245 deref
->mode
== nir_var_function_temp
)) {
2247 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2248 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2251 writemask
= widen_mask(writemask
, 2);
2254 writemask
= writemask
<< comp
;
2256 switch (deref
->mode
) {
2257 case nir_var_shader_out
:
2259 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2260 LLVMValueRef vertex_index
= NULL
;
2261 LLVMValueRef indir_index
= NULL
;
2262 unsigned const_index
= 0;
2263 const bool is_patch
= var
->data
.patch
||
2264 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2265 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2267 get_deref_offset(ctx
, deref
, false, NULL
,
2268 is_patch
? NULL
: &vertex_index
,
2269 &const_index
, &indir_index
);
2271 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2272 vertex_index
, indir_index
,
2273 const_index
, src
, writemask
);
2277 for (unsigned chan
= 0; chan
< 8; chan
++) {
2279 if (!(writemask
& (1 << chan
)))
2282 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2284 if (var
->data
.compact
)
2287 unsigned count
= glsl_count_attribute_slots(
2290 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2291 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2292 stride
, true, true);
2294 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2295 value
, indir_index
, "");
2296 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2297 count
, stride
, tmp_vec
);
2300 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2302 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2306 case nir_var_function_temp
:
2307 for (unsigned chan
= 0; chan
< 8; chan
++) {
2308 if (!(writemask
& (1 << chan
)))
2311 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2313 unsigned count
= glsl_count_attribute_slots(
2316 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2317 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2320 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2321 value
, indir_index
, "");
2322 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2325 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2327 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2332 case nir_var_mem_global
: {
2333 int writemask
= instr
->const_index
[0];
2334 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2335 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2337 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2338 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2339 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2341 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2342 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2343 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2345 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2346 stride
== natural_stride
) {
2347 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2348 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2349 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2351 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2352 LLVMGetElementType(LLVMTypeOf(address
)), "");
2353 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2355 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(LLVMTypeOf(val
)),
2356 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2357 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2358 for (unsigned chan
= 0; chan
< 4; chan
++) {
2359 if (!(writemask
& (1 << chan
)))
2362 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2364 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2365 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2367 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2368 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2369 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2380 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2383 case GLSL_SAMPLER_DIM_BUF
:
2385 case GLSL_SAMPLER_DIM_1D
:
2386 return array
? 2 : 1;
2387 case GLSL_SAMPLER_DIM_2D
:
2388 return array
? 3 : 2;
2389 case GLSL_SAMPLER_DIM_MS
:
2390 return array
? 4 : 3;
2391 case GLSL_SAMPLER_DIM_3D
:
2392 case GLSL_SAMPLER_DIM_CUBE
:
2394 case GLSL_SAMPLER_DIM_RECT
:
2395 case GLSL_SAMPLER_DIM_SUBPASS
:
2397 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2405 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2406 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2407 LLVMValueRef coord_z
,
2408 LLVMValueRef sample_index
,
2409 LLVMValueRef fmask_desc_ptr
)
2411 unsigned sample_chan
= coord_z
? 3 : 2;
2412 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2413 addr
[sample_chan
] = sample_index
;
2415 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2416 return addr
[sample_chan
];
2419 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2421 assert(instr
->src
[0].is_ssa
);
2422 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2425 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2426 const nir_intrinsic_instr
*instr
,
2427 enum ac_descriptor_type desc_type
,
2430 nir_deref_instr
*deref_instr
=
2431 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2432 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2434 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, true, write
);
2437 static void get_image_coords(struct ac_nir_context
*ctx
,
2438 const nir_intrinsic_instr
*instr
,
2439 struct ac_image_args
*args
,
2440 enum glsl_sampler_dim dim
,
2443 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2444 LLVMValueRef masks
[] = {
2445 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2446 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2448 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2451 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2452 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2453 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2454 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2455 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2456 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2457 count
= image_type_to_components_count(dim
, is_array
);
2459 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2460 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2461 LLVMValueRef fmask_load_address
[3];
2463 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2464 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2466 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2468 fmask_load_address
[2] = NULL
;
2470 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2471 fmask_load_address
[0],
2472 fmask_load_address
[1],
2473 fmask_load_address
[2],
2475 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2476 AC_DESC_FMASK
, &instr
->instr
, true, false));
2478 if (count
== 1 && !gfx9_1d
) {
2479 if (instr
->src
[1].ssa
->num_components
)
2480 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2482 args
->coords
[0] = src0
;
2487 for (chan
= 0; chan
< count
; ++chan
) {
2488 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2493 args
->coords
[2] = args
->coords
[1];
2494 args
->coords
[1] = ctx
->ac
.i32_0
;
2496 args
->coords
[1] = ctx
->ac
.i32_0
;
2499 if (ctx
->ac
.chip_class
== GFX9
&&
2500 dim
== GLSL_SAMPLER_DIM_2D
&&
2502 /* The hw can't bind a slice of a 3D image as a 2D
2503 * image, because it ignores BASE_ARRAY if the target
2504 * is 3D. The workaround is to read BASE_ARRAY and set
2505 * it as the 3rd address operand for all 2D images.
2507 LLVMValueRef first_layer
, const5
, mask
;
2509 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2510 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2511 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2512 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2514 args
->coords
[count
] = first_layer
;
2520 args
->coords
[count
] = sample_index
;
2526 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2527 const nir_intrinsic_instr
*instr
,
2528 bool write
, bool atomic
)
2530 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, AC_DESC_BUFFER
, write
);
2531 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2532 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2533 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2534 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2536 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2537 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2538 elem_count
, stride
, "");
2540 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2541 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2546 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2547 const nir_intrinsic_instr
*instr
,
2552 enum glsl_sampler_dim dim
;
2553 enum gl_access_qualifier access
;
2556 dim
= nir_intrinsic_image_dim(instr
);
2557 access
= nir_intrinsic_access(instr
);
2558 is_array
= nir_intrinsic_image_array(instr
);
2560 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2561 const struct glsl_type
*type
= image_deref
->type
;
2562 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2563 dim
= glsl_get_sampler_dim(type
);
2564 access
= var
->data
.access
;
2565 is_array
= glsl_sampler_type_is_array(type
);
2568 struct ac_image_args args
= {};
2570 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2572 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2573 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2574 unsigned num_channels
= util_last_bit(mask
);
2575 LLVMValueRef rsrc
, vindex
;
2577 rsrc
= get_image_buffer_descriptor(ctx
, instr
, false, false);
2578 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2581 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2582 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2583 ctx
->ac
.i32_0
, num_channels
,
2586 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2588 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2589 res
= ac_to_integer(&ctx
->ac
, res
);
2591 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2593 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2594 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2595 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2596 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2598 args
.lod
= get_src(ctx
, instr
->src
[3]);
2600 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2602 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2607 static void visit_image_store(struct ac_nir_context
*ctx
,
2608 nir_intrinsic_instr
*instr
,
2613 enum glsl_sampler_dim dim
;
2614 enum gl_access_qualifier access
;
2617 dim
= nir_intrinsic_image_dim(instr
);
2618 access
= nir_intrinsic_access(instr
);
2619 is_array
= nir_intrinsic_image_array(instr
);
2621 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2622 const struct glsl_type
*type
= image_deref
->type
;
2623 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2624 dim
= glsl_get_sampler_dim(type
);
2625 access
= var
->data
.access
;
2626 is_array
= glsl_sampler_type_is_array(type
);
2629 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2630 struct ac_image_args args
= {};
2632 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2634 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2635 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, true, false);
2636 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2637 unsigned src_channels
= ac_get_llvm_num_components(src
);
2638 LLVMValueRef vindex
;
2640 if (src_channels
== 3)
2641 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2643 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2644 get_src(ctx
, instr
->src
[1]),
2647 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2648 ctx
->ac
.i32_0
, src_channels
,
2651 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2653 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2654 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2655 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, true);
2656 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2657 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2659 args
.lod
= get_src(ctx
, instr
->src
[4]);
2662 ac_build_image_opcode(&ctx
->ac
, &args
);
2667 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2668 const nir_intrinsic_instr
*instr
,
2671 LLVMValueRef params
[7];
2672 int param_count
= 0;
2674 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2675 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2676 const char *atomic_name
;
2677 char intrinsic_name
[64];
2678 enum ac_atomic_op atomic_subop
;
2679 ASSERTED
int length
;
2681 enum glsl_sampler_dim dim
;
2684 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2685 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2686 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2687 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2688 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2689 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2691 dim
= nir_intrinsic_image_dim(instr
);
2692 is_array
= nir_intrinsic_image_array(instr
);
2694 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2695 dim
= glsl_get_sampler_dim(type
);
2696 is_array
= glsl_sampler_type_is_array(type
);
2699 switch (instr
->intrinsic
) {
2700 case nir_intrinsic_bindless_image_atomic_add
:
2701 case nir_intrinsic_image_deref_atomic_add
:
2702 atomic_name
= "add";
2703 atomic_subop
= ac_atomic_add
;
2705 case nir_intrinsic_bindless_image_atomic_imin
:
2706 case nir_intrinsic_image_deref_atomic_imin
:
2707 atomic_name
= "smin";
2708 atomic_subop
= ac_atomic_smin
;
2710 case nir_intrinsic_bindless_image_atomic_umin
:
2711 case nir_intrinsic_image_deref_atomic_umin
:
2712 atomic_name
= "umin";
2713 atomic_subop
= ac_atomic_umin
;
2715 case nir_intrinsic_bindless_image_atomic_imax
:
2716 case nir_intrinsic_image_deref_atomic_imax
:
2717 atomic_name
= "smax";
2718 atomic_subop
= ac_atomic_smax
;
2720 case nir_intrinsic_bindless_image_atomic_umax
:
2721 case nir_intrinsic_image_deref_atomic_umax
:
2722 atomic_name
= "umax";
2723 atomic_subop
= ac_atomic_umax
;
2725 case nir_intrinsic_bindless_image_atomic_and
:
2726 case nir_intrinsic_image_deref_atomic_and
:
2727 atomic_name
= "and";
2728 atomic_subop
= ac_atomic_and
;
2730 case nir_intrinsic_bindless_image_atomic_or
:
2731 case nir_intrinsic_image_deref_atomic_or
:
2733 atomic_subop
= ac_atomic_or
;
2735 case nir_intrinsic_bindless_image_atomic_xor
:
2736 case nir_intrinsic_image_deref_atomic_xor
:
2737 atomic_name
= "xor";
2738 atomic_subop
= ac_atomic_xor
;
2740 case nir_intrinsic_bindless_image_atomic_exchange
:
2741 case nir_intrinsic_image_deref_atomic_exchange
:
2742 atomic_name
= "swap";
2743 atomic_subop
= ac_atomic_swap
;
2745 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2746 case nir_intrinsic_image_deref_atomic_comp_swap
:
2747 atomic_name
= "cmpswap";
2748 atomic_subop
= 0; /* not used */
2750 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2751 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2752 atomic_name
= "inc";
2753 atomic_subop
= ac_atomic_inc_wrap
;
2754 /* ATOMIC_INC instruction does:
2755 * value = (value + 1) % (data + 1)
2757 * value = (value + 1) % data
2758 * So replace 'data' by 'data - 1'.
2760 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2761 LLVMBuildSub(ctx
->ac
.builder
,
2762 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2766 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2767 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2768 atomic_name
= "dec";
2769 atomic_subop
= ac_atomic_dec_wrap
;
2776 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
2777 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
2779 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2780 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, true, true);
2781 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2782 ctx
->ac
.i32_0
, ""); /* vindex */
2783 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
2784 if (LLVM_VERSION_MAJOR
>= 9) {
2785 /* XXX: The new raw/struct atomic intrinsics are buggy
2786 * with LLVM 8, see r358579.
2788 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
2789 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
2791 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2792 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
2794 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
2796 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2797 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
2800 assert(length
< sizeof(intrinsic_name
));
2801 return ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
2802 params
, param_count
, 0);
2804 struct ac_image_args args
= {};
2805 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
2806 args
.atomic
= atomic_subop
;
2807 args
.data
[0] = params
[0];
2809 args
.data
[1] = params
[1];
2810 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, true);
2811 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2812 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2814 return ac_build_image_opcode(&ctx
->ac
, &args
);
2818 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
2819 const nir_intrinsic_instr
*instr
)
2821 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2823 return ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
2826 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
2827 const nir_intrinsic_instr
*instr
,
2832 enum glsl_sampler_dim dim
;
2835 dim
= nir_intrinsic_image_dim(instr
);
2836 is_array
= nir_intrinsic_image_array(instr
);
2838 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2839 dim
= glsl_get_sampler_dim(type
);
2840 is_array
= glsl_sampler_type_is_array(type
);
2843 if (dim
== GLSL_SAMPLER_DIM_BUF
)
2844 return get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, AC_DESC_BUFFER
, false), true);
2846 struct ac_image_args args
= { 0 };
2848 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2850 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2851 args
.opcode
= ac_image_get_resinfo
;
2852 args
.lod
= ctx
->ac
.i32_0
;
2853 args
.attributes
= AC_FUNC_ATTR_READNONE
;
2855 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2857 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
2859 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
2860 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
2861 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
2862 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
2863 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
2865 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
2866 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
2867 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
2874 static void emit_membar(struct ac_llvm_context
*ac
,
2875 const nir_intrinsic_instr
*instr
)
2877 unsigned wait_flags
= 0;
2879 switch (instr
->intrinsic
) {
2880 case nir_intrinsic_memory_barrier
:
2881 case nir_intrinsic_group_memory_barrier
:
2882 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
2884 case nir_intrinsic_memory_barrier_buffer
:
2885 case nir_intrinsic_memory_barrier_image
:
2886 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
2888 case nir_intrinsic_memory_barrier_shared
:
2889 wait_flags
= AC_WAIT_LGKM
;
2895 ac_build_waitcnt(ac
, wait_flags
);
2898 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
2900 /* GFX6 only (thanks to a hw bug workaround):
2901 * The real barrier instruction isn’t needed, because an entire patch
2902 * always fits into a single wave.
2904 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
2905 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
2908 ac_build_s_barrier(ac
);
2911 static void emit_discard(struct ac_nir_context
*ctx
,
2912 const nir_intrinsic_instr
*instr
)
2916 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
2917 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2918 get_src(ctx
, instr
->src
[0]),
2921 assert(instr
->intrinsic
== nir_intrinsic_discard
);
2922 cond
= ctx
->ac
.i1false
;
2925 ctx
->abi
->emit_kill(ctx
->abi
, cond
);
2929 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
2931 LLVMValueRef result
;
2932 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
2933 result
= LLVMBuildAnd(ctx
->ac
.builder
,
2934 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
2935 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
2937 if (ctx
->ac
.wave_size
== 32)
2938 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
2939 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
2941 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
2945 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
2947 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
2948 LLVMValueRef result
;
2949 result
= LLVMBuildAnd(ctx
->ac
.builder
,
2950 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
2951 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
2952 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
2954 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
2959 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
2961 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
2962 return LLVMBuildAnd(ctx
->ac
.builder
,
2963 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
2964 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
2966 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
2971 visit_first_invocation(struct ac_nir_context
*ctx
)
2973 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
2974 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
2976 /* The second argument is whether cttz(0) should be defined, but we do not care. */
2977 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
2978 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
2979 ctx
->ac
.iN_wavemask
, args
, 2,
2980 AC_FUNC_ATTR_NOUNWIND
|
2981 AC_FUNC_ATTR_READNONE
);
2983 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
2987 visit_load_shared(struct ac_nir_context
*ctx
,
2988 const nir_intrinsic_instr
*instr
)
2990 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
2992 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
2993 instr
->dest
.ssa
.bit_size
);
2995 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
2996 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
2997 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
2998 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3001 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3002 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3006 visit_store_shared(struct ac_nir_context
*ctx
,
3007 const nir_intrinsic_instr
*instr
)
3009 LLVMValueRef derived_ptr
, data
,index
;
3010 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3012 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3013 instr
->src
[0].ssa
->bit_size
);
3014 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3016 int writemask
= nir_intrinsic_write_mask(instr
);
3017 for (int chan
= 0; chan
< 4; chan
++) {
3018 if (!(writemask
& (1 << chan
))) {
3021 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3022 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3023 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3024 LLVMBuildStore(builder
, data
, derived_ptr
);
3028 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3029 const nir_intrinsic_instr
*instr
,
3030 LLVMValueRef ptr
, int src_idx
)
3032 LLVMValueRef result
;
3033 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3035 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3037 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3038 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3039 if (deref
->mode
== nir_var_mem_global
) {
3040 /* use "singlethread" sync scope to implement relaxed ordering */
3041 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3043 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3044 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3048 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3049 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3050 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3051 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3052 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3054 LLVMAtomicRMWBinOp op
;
3055 switch (instr
->intrinsic
) {
3056 case nir_intrinsic_shared_atomic_add
:
3057 case nir_intrinsic_deref_atomic_add
:
3058 op
= LLVMAtomicRMWBinOpAdd
;
3060 case nir_intrinsic_shared_atomic_umin
:
3061 case nir_intrinsic_deref_atomic_umin
:
3062 op
= LLVMAtomicRMWBinOpUMin
;
3064 case nir_intrinsic_shared_atomic_umax
:
3065 case nir_intrinsic_deref_atomic_umax
:
3066 op
= LLVMAtomicRMWBinOpUMax
;
3068 case nir_intrinsic_shared_atomic_imin
:
3069 case nir_intrinsic_deref_atomic_imin
:
3070 op
= LLVMAtomicRMWBinOpMin
;
3072 case nir_intrinsic_shared_atomic_imax
:
3073 case nir_intrinsic_deref_atomic_imax
:
3074 op
= LLVMAtomicRMWBinOpMax
;
3076 case nir_intrinsic_shared_atomic_and
:
3077 case nir_intrinsic_deref_atomic_and
:
3078 op
= LLVMAtomicRMWBinOpAnd
;
3080 case nir_intrinsic_shared_atomic_or
:
3081 case nir_intrinsic_deref_atomic_or
:
3082 op
= LLVMAtomicRMWBinOpOr
;
3084 case nir_intrinsic_shared_atomic_xor
:
3085 case nir_intrinsic_deref_atomic_xor
:
3086 op
= LLVMAtomicRMWBinOpXor
;
3088 case nir_intrinsic_shared_atomic_exchange
:
3089 case nir_intrinsic_deref_atomic_exchange
:
3090 op
= LLVMAtomicRMWBinOpXchg
;
3096 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3101 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3103 LLVMValueRef values
[2];
3104 LLVMValueRef pos
[2];
3106 pos
[0] = ac_to_float(&ctx
->ac
,
3107 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3108 pos
[1] = ac_to_float(&ctx
->ac
,
3109 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3111 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3112 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3113 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3116 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3117 enum glsl_interp_mode interp
, unsigned location
)
3120 case INTERP_MODE_FLAT
:
3123 case INTERP_MODE_SMOOTH
:
3124 case INTERP_MODE_NONE
:
3125 if (location
== INTERP_CENTER
)
3126 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3127 else if (location
== INTERP_CENTROID
)
3128 return ctx
->abi
->persp_centroid
;
3129 else if (location
== INTERP_SAMPLE
)
3130 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3132 case INTERP_MODE_NOPERSPECTIVE
:
3133 if (location
== INTERP_CENTER
)
3134 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3135 else if (location
== INTERP_CENTROID
)
3136 return ctx
->abi
->linear_centroid
;
3137 else if (location
== INTERP_SAMPLE
)
3138 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3144 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3147 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3148 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3151 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3153 LLVMValueRef offset
)
3155 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3156 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3157 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3159 LLVMValueRef ij_out
[2];
3160 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3163 * take the I then J parameters, and the DDX/Y for it, and
3164 * calculate the IJ inputs for the interpolator.
3165 * temp1 = ddx * offset/sample.x + I;
3166 * interp_param.I = ddy * offset/sample.y + temp1;
3167 * temp1 = ddx * offset/sample.x + J;
3168 * interp_param.J = ddy * offset/sample.y + temp1;
3170 for (unsigned i
= 0; i
< 2; i
++) {
3171 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3172 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3173 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3174 ddxy_out
, ix_ll
, "");
3175 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3176 ddxy_out
, iy_ll
, "");
3177 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3178 interp_param
, ix_ll
, "");
3179 LLVMValueRef temp1
, temp2
;
3181 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3184 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3185 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3187 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3188 temp2
, ctx
->ac
.i32
, "");
3190 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3191 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3194 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3197 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3198 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3201 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3203 LLVMValueRef sample_id
)
3205 if (ctx
->abi
->interp_at_sample_force_center
)
3206 return barycentric_center(ctx
, mode
);
3208 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3210 /* fetch sample ID */
3211 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3213 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3214 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3215 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3216 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3217 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3218 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3220 return barycentric_offset(ctx
, mode
, offset
);
3224 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3227 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3228 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3231 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3233 return LLVMBuildBitCast(ctx
->ac
.builder
,
3234 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3238 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3239 LLVMValueRef interp_param
,
3240 unsigned index
, unsigned comp_start
,
3241 unsigned num_components
,
3244 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3246 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
,
3247 interp_param
, ctx
->ac
.v2f32
, "");
3248 LLVMValueRef i
= LLVMBuildExtractElement(
3249 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_0
, "");
3250 LLVMValueRef j
= LLVMBuildExtractElement(
3251 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_1
, "");
3253 LLVMValueRef values
[4];
3254 assert(bitsize
== 16 || bitsize
== 32);
3255 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3256 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3257 if (bitsize
== 16) {
3258 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3259 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3261 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3262 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3266 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3269 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3270 nir_intrinsic_instr
*instr
)
3272 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3274 /* We only lower inputs for fragment shaders ATM */
3275 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3277 assert(offset
[0].i32
== 0);
3279 unsigned component
= nir_intrinsic_component(instr
);
3280 unsigned index
= nir_intrinsic_base(instr
);
3281 unsigned vertex_id
= 2; /* P0 */
3283 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3284 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3286 switch (src0
[0].i32
) {
3297 unreachable("Invalid vertex index");
3301 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3302 LLVMValueRef values
[8];
3304 /* Each component of a 64-bit value takes up two GL-level channels. */
3305 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3306 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3308 bit_size
== 64 ? num_components
* 2 : num_components
;
3310 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3311 if (component
+ chan
> 4)
3312 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3313 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3314 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3315 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3318 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3319 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3320 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3321 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3324 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3325 if (bit_size
== 64) {
3326 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3327 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3328 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3333 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3334 nir_intrinsic_instr
*instr
)
3336 LLVMValueRef result
= NULL
;
3338 switch (instr
->intrinsic
) {
3339 case nir_intrinsic_ballot
:
3340 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3341 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3342 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3344 case nir_intrinsic_read_invocation
:
3345 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3346 get_src(ctx
, instr
->src
[1]));
3348 case nir_intrinsic_read_first_invocation
:
3349 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3351 case nir_intrinsic_load_subgroup_invocation
:
3352 result
= ac_get_thread_id(&ctx
->ac
);
3354 case nir_intrinsic_load_work_group_id
: {
3355 LLVMValueRef values
[3];
3357 for (int i
= 0; i
< 3; i
++) {
3358 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3359 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3362 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3365 case nir_intrinsic_load_base_vertex
:
3366 case nir_intrinsic_load_first_vertex
:
3367 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3369 case nir_intrinsic_load_local_group_size
:
3370 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3372 case nir_intrinsic_load_vertex_id
:
3373 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3374 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3375 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3377 case nir_intrinsic_load_vertex_id_zero_base
: {
3378 result
= ctx
->abi
->vertex_id
;
3381 case nir_intrinsic_load_local_invocation_id
: {
3382 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3385 case nir_intrinsic_load_base_instance
:
3386 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3388 case nir_intrinsic_load_draw_id
:
3389 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3391 case nir_intrinsic_load_view_index
:
3392 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3394 case nir_intrinsic_load_invocation_id
:
3395 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3396 result
= ac_unpack_param(&ctx
->ac
,
3397 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3400 if (ctx
->ac
.chip_class
>= GFX10
) {
3401 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3402 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3403 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3405 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3409 case nir_intrinsic_load_primitive_id
:
3410 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3411 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3412 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3413 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3414 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3415 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3417 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3419 case nir_intrinsic_load_sample_id
:
3420 result
= ac_unpack_param(&ctx
->ac
,
3421 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3424 case nir_intrinsic_load_sample_pos
:
3425 result
= load_sample_pos(ctx
);
3427 case nir_intrinsic_load_sample_mask_in
:
3428 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3430 case nir_intrinsic_load_frag_coord
: {
3431 LLVMValueRef values
[4] = {
3432 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3433 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3434 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3435 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3436 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3438 result
= ac_to_integer(&ctx
->ac
,
3439 ac_build_gather_values(&ctx
->ac
, values
, 4));
3442 case nir_intrinsic_load_layer_id
:
3443 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3445 case nir_intrinsic_load_front_face
:
3446 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3448 case nir_intrinsic_load_helper_invocation
:
3449 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3451 case nir_intrinsic_load_color0
:
3452 result
= ctx
->abi
->color0
;
3454 case nir_intrinsic_load_color1
:
3455 result
= ctx
->abi
->color1
;
3457 case nir_intrinsic_load_user_data_amd
:
3458 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3459 result
= ctx
->abi
->user_data
;
3461 case nir_intrinsic_load_instance_id
:
3462 result
= ctx
->abi
->instance_id
;
3464 case nir_intrinsic_load_num_work_groups
:
3465 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3467 case nir_intrinsic_load_local_invocation_index
:
3468 result
= visit_load_local_invocation_index(ctx
);
3470 case nir_intrinsic_load_subgroup_id
:
3471 result
= visit_load_subgroup_id(ctx
);
3473 case nir_intrinsic_load_num_subgroups
:
3474 result
= visit_load_num_subgroups(ctx
);
3476 case nir_intrinsic_first_invocation
:
3477 result
= visit_first_invocation(ctx
);
3479 case nir_intrinsic_load_push_constant
:
3480 result
= visit_load_push_constant(ctx
, instr
);
3482 case nir_intrinsic_vulkan_resource_index
: {
3483 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3484 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3485 unsigned binding
= nir_intrinsic_binding(instr
);
3487 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3491 case nir_intrinsic_vulkan_resource_reindex
:
3492 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3494 case nir_intrinsic_store_ssbo
:
3495 visit_store_ssbo(ctx
, instr
);
3497 case nir_intrinsic_load_ssbo
:
3498 result
= visit_load_buffer(ctx
, instr
);
3500 case nir_intrinsic_ssbo_atomic_add
:
3501 case nir_intrinsic_ssbo_atomic_imin
:
3502 case nir_intrinsic_ssbo_atomic_umin
:
3503 case nir_intrinsic_ssbo_atomic_imax
:
3504 case nir_intrinsic_ssbo_atomic_umax
:
3505 case nir_intrinsic_ssbo_atomic_and
:
3506 case nir_intrinsic_ssbo_atomic_or
:
3507 case nir_intrinsic_ssbo_atomic_xor
:
3508 case nir_intrinsic_ssbo_atomic_exchange
:
3509 case nir_intrinsic_ssbo_atomic_comp_swap
:
3510 result
= visit_atomic_ssbo(ctx
, instr
);
3512 case nir_intrinsic_load_ubo
:
3513 result
= visit_load_ubo_buffer(ctx
, instr
);
3515 case nir_intrinsic_get_buffer_size
:
3516 result
= visit_get_buffer_size(ctx
, instr
);
3518 case nir_intrinsic_load_deref
:
3519 result
= visit_load_var(ctx
, instr
);
3521 case nir_intrinsic_store_deref
:
3522 visit_store_var(ctx
, instr
);
3524 case nir_intrinsic_load_shared
:
3525 result
= visit_load_shared(ctx
, instr
);
3527 case nir_intrinsic_store_shared
:
3528 visit_store_shared(ctx
, instr
);
3530 case nir_intrinsic_bindless_image_samples
:
3531 case nir_intrinsic_image_deref_samples
:
3532 result
= visit_image_samples(ctx
, instr
);
3534 case nir_intrinsic_bindless_image_load
:
3535 result
= visit_image_load(ctx
, instr
, true);
3537 case nir_intrinsic_image_deref_load
:
3538 result
= visit_image_load(ctx
, instr
, false);
3540 case nir_intrinsic_bindless_image_store
:
3541 visit_image_store(ctx
, instr
, true);
3543 case nir_intrinsic_image_deref_store
:
3544 visit_image_store(ctx
, instr
, false);
3546 case nir_intrinsic_bindless_image_atomic_add
:
3547 case nir_intrinsic_bindless_image_atomic_imin
:
3548 case nir_intrinsic_bindless_image_atomic_umin
:
3549 case nir_intrinsic_bindless_image_atomic_imax
:
3550 case nir_intrinsic_bindless_image_atomic_umax
:
3551 case nir_intrinsic_bindless_image_atomic_and
:
3552 case nir_intrinsic_bindless_image_atomic_or
:
3553 case nir_intrinsic_bindless_image_atomic_xor
:
3554 case nir_intrinsic_bindless_image_atomic_exchange
:
3555 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3556 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3557 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3558 result
= visit_image_atomic(ctx
, instr
, true);
3560 case nir_intrinsic_image_deref_atomic_add
:
3561 case nir_intrinsic_image_deref_atomic_imin
:
3562 case nir_intrinsic_image_deref_atomic_umin
:
3563 case nir_intrinsic_image_deref_atomic_imax
:
3564 case nir_intrinsic_image_deref_atomic_umax
:
3565 case nir_intrinsic_image_deref_atomic_and
:
3566 case nir_intrinsic_image_deref_atomic_or
:
3567 case nir_intrinsic_image_deref_atomic_xor
:
3568 case nir_intrinsic_image_deref_atomic_exchange
:
3569 case nir_intrinsic_image_deref_atomic_comp_swap
:
3570 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3571 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3572 result
= visit_image_atomic(ctx
, instr
, false);
3574 case nir_intrinsic_bindless_image_size
:
3575 result
= visit_image_size(ctx
, instr
, true);
3577 case nir_intrinsic_image_deref_size
:
3578 result
= visit_image_size(ctx
, instr
, false);
3580 case nir_intrinsic_shader_clock
:
3581 result
= ac_build_shader_clock(&ctx
->ac
);
3583 case nir_intrinsic_discard
:
3584 case nir_intrinsic_discard_if
:
3585 emit_discard(ctx
, instr
);
3587 case nir_intrinsic_memory_barrier
:
3588 case nir_intrinsic_group_memory_barrier
:
3589 case nir_intrinsic_memory_barrier_buffer
:
3590 case nir_intrinsic_memory_barrier_image
:
3591 case nir_intrinsic_memory_barrier_shared
:
3592 emit_membar(&ctx
->ac
, instr
);
3594 case nir_intrinsic_memory_barrier_tcs_patch
:
3596 case nir_intrinsic_control_barrier
:
3597 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3599 case nir_intrinsic_shared_atomic_add
:
3600 case nir_intrinsic_shared_atomic_imin
:
3601 case nir_intrinsic_shared_atomic_umin
:
3602 case nir_intrinsic_shared_atomic_imax
:
3603 case nir_intrinsic_shared_atomic_umax
:
3604 case nir_intrinsic_shared_atomic_and
:
3605 case nir_intrinsic_shared_atomic_or
:
3606 case nir_intrinsic_shared_atomic_xor
:
3607 case nir_intrinsic_shared_atomic_exchange
:
3608 case nir_intrinsic_shared_atomic_comp_swap
: {
3609 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3610 instr
->src
[1].ssa
->bit_size
);
3611 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3614 case nir_intrinsic_deref_atomic_add
:
3615 case nir_intrinsic_deref_atomic_imin
:
3616 case nir_intrinsic_deref_atomic_umin
:
3617 case nir_intrinsic_deref_atomic_imax
:
3618 case nir_intrinsic_deref_atomic_umax
:
3619 case nir_intrinsic_deref_atomic_and
:
3620 case nir_intrinsic_deref_atomic_or
:
3621 case nir_intrinsic_deref_atomic_xor
:
3622 case nir_intrinsic_deref_atomic_exchange
:
3623 case nir_intrinsic_deref_atomic_comp_swap
: {
3624 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3625 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3628 case nir_intrinsic_load_barycentric_pixel
:
3629 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3631 case nir_intrinsic_load_barycentric_centroid
:
3632 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3634 case nir_intrinsic_load_barycentric_sample
:
3635 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3637 case nir_intrinsic_load_barycentric_model
:
3638 result
= barycentric_model(ctx
);
3640 case nir_intrinsic_load_barycentric_at_offset
: {
3641 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3642 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3645 case nir_intrinsic_load_barycentric_at_sample
: {
3646 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3647 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3650 case nir_intrinsic_load_interpolated_input
: {
3651 /* We assume any indirect loads have been lowered away */
3652 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3654 assert(offset
[0].i32
== 0);
3656 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3657 unsigned index
= nir_intrinsic_base(instr
);
3658 unsigned component
= nir_intrinsic_component(instr
);
3659 result
= load_interpolated_input(ctx
, interp_param
, index
,
3661 instr
->dest
.ssa
.num_components
,
3662 instr
->dest
.ssa
.bit_size
);
3665 case nir_intrinsic_load_input
:
3666 case nir_intrinsic_load_input_vertex
:
3667 result
= load_input(ctx
, instr
);
3669 case nir_intrinsic_emit_vertex
:
3670 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3672 case nir_intrinsic_end_primitive
:
3673 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3675 case nir_intrinsic_load_tess_coord
:
3676 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3678 case nir_intrinsic_load_tess_level_outer
:
3679 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3681 case nir_intrinsic_load_tess_level_inner
:
3682 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3684 case nir_intrinsic_load_tess_level_outer_default
:
3685 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3687 case nir_intrinsic_load_tess_level_inner_default
:
3688 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3690 case nir_intrinsic_load_patch_vertices_in
:
3691 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
3693 case nir_intrinsic_vote_all
: {
3694 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3695 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3698 case nir_intrinsic_vote_any
: {
3699 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3700 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3703 case nir_intrinsic_shuffle
:
3704 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3705 get_src(ctx
, instr
->src
[1]));
3707 case nir_intrinsic_reduce
:
3708 result
= ac_build_reduce(&ctx
->ac
,
3709 get_src(ctx
, instr
->src
[0]),
3710 instr
->const_index
[0],
3711 instr
->const_index
[1]);
3713 case nir_intrinsic_inclusive_scan
:
3714 result
= ac_build_inclusive_scan(&ctx
->ac
,
3715 get_src(ctx
, instr
->src
[0]),
3716 instr
->const_index
[0]);
3718 case nir_intrinsic_exclusive_scan
:
3719 result
= ac_build_exclusive_scan(&ctx
->ac
,
3720 get_src(ctx
, instr
->src
[0]),
3721 instr
->const_index
[0]);
3723 case nir_intrinsic_quad_broadcast
: {
3724 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
3725 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3726 lane
, lane
, lane
, lane
);
3729 case nir_intrinsic_quad_swap_horizontal
:
3730 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
3732 case nir_intrinsic_quad_swap_vertical
:
3733 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
3735 case nir_intrinsic_quad_swap_diagonal
:
3736 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
3738 case nir_intrinsic_quad_swizzle_amd
: {
3739 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
3740 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3741 mask
& 0x3, (mask
>> 2) & 0x3,
3742 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
3745 case nir_intrinsic_masked_swizzle_amd
: {
3746 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
3747 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
3750 case nir_intrinsic_write_invocation_amd
:
3751 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3752 get_src(ctx
, instr
->src
[1]),
3753 get_src(ctx
, instr
->src
[2]));
3755 case nir_intrinsic_mbcnt_amd
:
3756 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3758 case nir_intrinsic_load_scratch
: {
3759 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
3760 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
3762 LLVMTypeRef comp_type
=
3763 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
3764 LLVMTypeRef vec_type
=
3765 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
3766 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
3767 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3768 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3769 LLVMPointerType(vec_type
, addr_space
), "");
3770 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
3773 case nir_intrinsic_store_scratch
: {
3774 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
3775 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
3777 LLVMTypeRef comp_type
=
3778 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
3779 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3780 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3781 LLVMPointerType(comp_type
, addr_space
), "");
3782 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3783 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
3786 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
3788 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
3789 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
3790 LLVMTypeRef vec_type
=
3791 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
3792 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
3794 LLVMPointerType(vec_type
, addr_space
),
3796 LLVMValueRef offset_src
=
3797 ac_extract_components(&ctx
->ac
, src
, start
, count
);
3798 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
3802 case nir_intrinsic_load_constant
: {
3803 unsigned base
= nir_intrinsic_base(instr
);
3804 unsigned range
= nir_intrinsic_range(instr
);
3806 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
3807 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
3808 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
3810 /* Clamp the offset to avoid out-of-bound access because global
3811 * instructions can't handle them.
3813 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
3814 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
3816 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
3818 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
3820 LLVMTypeRef comp_type
=
3821 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
3822 LLVMTypeRef vec_type
=
3823 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
3824 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
3825 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3826 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3827 LLVMPointerType(vec_type
, addr_space
), "");
3828 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
3832 fprintf(stderr
, "Unknown intrinsic: ");
3833 nir_print_instr(&instr
->instr
, stderr
);
3834 fprintf(stderr
, "\n");
3838 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
3842 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
3843 unsigned base_index
,
3844 unsigned constant_index
,
3845 LLVMValueRef dynamic_index
)
3847 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
3848 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
3849 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
3851 /* Bindless uniforms are 64bit so multiple index by 8 */
3852 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
3853 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
3855 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
3857 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
3858 NULL
, 0, 0, true, true);
3860 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
3863 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
3864 nir_deref_instr
*deref_instr
,
3865 enum ac_descriptor_type desc_type
,
3866 const nir_instr
*instr
,
3867 bool image
, bool write
)
3869 LLVMValueRef index
= NULL
;
3870 unsigned constant_index
= 0;
3871 unsigned descriptor_set
;
3872 unsigned base_index
;
3873 bool bindless
= false;
3878 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
3881 index
= get_src(ctx
, img_instr
->src
[0]);
3883 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
3884 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
3885 nir_tex_src_sampler_handle
);
3886 if (sampSrcIdx
!= -1) {
3889 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
3891 assert(tex_instr
&& !image
);
3892 base_index
= tex_instr
->sampler_index
;
3896 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
3897 if (deref_instr
->deref_type
== nir_deref_type_array
) {
3898 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
3902 if (nir_src_is_const(deref_instr
->arr
.index
)) {
3903 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
3905 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
3907 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
3908 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
3913 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
3916 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
3917 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
3918 unsigned sidx
= deref_instr
->strct
.index
;
3919 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
3920 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
3922 unreachable("Unsupported deref type");
3925 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
3927 if (deref_instr
->var
->data
.bindless
) {
3928 /* For now just assert on unhandled variable types */
3929 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
3931 base_index
= deref_instr
->var
->data
.driver_location
;
3934 index
= index
? index
: ctx
->ac
.i32_0
;
3935 index
= get_bindless_index_from_uniform(ctx
, base_index
,
3936 constant_index
, index
);
3938 base_index
= deref_instr
->var
->data
.binding
;
3941 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
3944 constant_index
, index
,
3945 desc_type
, image
, write
, bindless
);
3948 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
3951 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
3952 * filtering manually. The driver sets img7 to a mask clearing
3953 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
3954 * s_and_b32 samp0, samp0, img7
3957 * The ANISO_OVERRIDE sampler field enables this fix in TA.
3959 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
3960 LLVMValueRef res
, LLVMValueRef samp
)
3962 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3963 LLVMValueRef img7
, samp0
;
3965 if (ctx
->ac
.chip_class
>= GFX8
)
3968 img7
= LLVMBuildExtractElement(builder
, res
,
3969 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
3970 samp0
= LLVMBuildExtractElement(builder
, samp
,
3971 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
3972 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
3973 return LLVMBuildInsertElement(builder
, samp
, samp0
,
3974 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
3977 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
3978 nir_tex_instr
*instr
,
3979 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
3980 LLVMValueRef
*fmask_ptr
)
3982 nir_deref_instr
*texture_deref_instr
= NULL
;
3983 nir_deref_instr
*sampler_deref_instr
= NULL
;
3986 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
3987 switch (instr
->src
[i
].src_type
) {
3988 case nir_tex_src_texture_deref
:
3989 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
3991 case nir_tex_src_sampler_deref
:
3992 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
3994 case nir_tex_src_plane
:
3995 plane
= nir_src_as_int(instr
->src
[i
].src
);
4002 if (!sampler_deref_instr
)
4003 sampler_deref_instr
= texture_deref_instr
;
4005 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4008 assert(instr
->op
!= nir_texop_txf_ms
&&
4009 instr
->op
!= nir_texop_samples_identical
);
4010 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4012 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4015 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4016 /* The fragment mask is fetched from the compressed
4017 * multisampled surface.
4019 main_descriptor
= AC_DESC_FMASK
;
4022 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
, false, false);
4025 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
, false, false);
4026 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4027 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4029 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4030 instr
->op
== nir_texop_samples_identical
))
4031 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
, &instr
->instr
, false, false);
4034 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4037 coord
= ac_to_float(ctx
, coord
);
4038 coord
= ac_build_round(ctx
, coord
);
4039 coord
= ac_to_integer(ctx
, coord
);
4043 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4045 LLVMValueRef result
= NULL
;
4046 struct ac_image_args args
= { 0 };
4047 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4048 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4049 unsigned offset_src
= 0;
4051 tex_fetch_ptrs(ctx
, instr
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4053 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4054 switch (instr
->src
[i
].src_type
) {
4055 case nir_tex_src_coord
: {
4056 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4057 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4058 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4061 case nir_tex_src_projector
:
4063 case nir_tex_src_comparator
:
4064 if (instr
->is_shadow
) {
4065 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4066 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4069 case nir_tex_src_offset
:
4070 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4073 case nir_tex_src_bias
:
4074 if (instr
->op
== nir_texop_txb
)
4075 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4077 case nir_tex_src_lod
: {
4078 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4079 args
.level_zero
= true;
4081 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4084 case nir_tex_src_ms_index
:
4085 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4087 case nir_tex_src_ms_mcs
:
4089 case nir_tex_src_ddx
:
4090 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4092 case nir_tex_src_ddy
:
4093 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4095 case nir_tex_src_texture_offset
:
4096 case nir_tex_src_sampler_offset
:
4097 case nir_tex_src_plane
:
4103 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4104 result
= get_buffer_size(ctx
, args
.resource
, true);
4108 if (instr
->op
== nir_texop_texture_samples
) {
4109 LLVMValueRef res
, samples
, is_msaa
;
4110 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4111 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4112 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4113 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4114 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4115 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4116 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4117 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4118 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4120 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4121 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4122 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4123 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4124 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4126 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4132 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4133 LLVMValueRef offset
[3], pack
;
4134 for (unsigned chan
= 0; chan
< 3; ++chan
)
4135 offset
[chan
] = ctx
->ac
.i32_0
;
4137 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4138 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4139 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4140 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4141 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4143 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4144 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4146 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4147 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4151 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4152 * OpenGL 4.5 spec says:
4154 * "If the texture’s internal format indicates a fixed-point
4155 * depth texture, then D_t and D_ref are clamped to the
4156 * range [0, 1]; otherwise no clamping is performed."
4158 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4159 * so the depth comparison value isn't clamped for Z16 and
4160 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4161 * an explicitly clamped 32-bit float format.
4164 ctx
->ac
.chip_class
>= GFX8
&&
4165 ctx
->ac
.chip_class
<= GFX9
&&
4166 ctx
->abi
->clamp_shadow_reference
) {
4167 LLVMValueRef upgraded
, clamped
;
4169 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4170 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4171 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4172 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4173 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4174 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4175 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4179 /* pack derivatives */
4181 int num_src_deriv_channels
, num_dest_deriv_channels
;
4182 switch (instr
->sampler_dim
) {
4183 case GLSL_SAMPLER_DIM_3D
:
4184 case GLSL_SAMPLER_DIM_CUBE
:
4185 num_src_deriv_channels
= 3;
4186 num_dest_deriv_channels
= 3;
4188 case GLSL_SAMPLER_DIM_2D
:
4190 num_src_deriv_channels
= 2;
4191 num_dest_deriv_channels
= 2;
4193 case GLSL_SAMPLER_DIM_1D
:
4194 num_src_deriv_channels
= 1;
4195 if (ctx
->ac
.chip_class
== GFX9
) {
4196 num_dest_deriv_channels
= 2;
4198 num_dest_deriv_channels
= 1;
4203 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4204 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4205 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4206 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4207 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4209 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4210 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4211 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4215 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4216 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4217 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4218 if (instr
->coord_components
== 3)
4219 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4220 ac_prepare_cube_coords(&ctx
->ac
,
4221 instr
->op
== nir_texop_txd
, instr
->is_array
,
4222 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4225 /* Texture coordinates fixups */
4226 if (instr
->coord_components
> 1 &&
4227 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4229 instr
->op
!= nir_texop_txf
) {
4230 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4233 if (instr
->coord_components
> 2 &&
4234 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4235 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4236 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4237 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4239 instr
->op
!= nir_texop_txf
&&
4240 instr
->op
!= nir_texop_txf_ms
&&
4241 instr
->op
!= nir_texop_fragment_fetch
&&
4242 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4243 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4246 if (ctx
->ac
.chip_class
== GFX9
&&
4247 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4248 instr
->op
!= nir_texop_lod
) {
4249 LLVMValueRef filler
;
4250 if (instr
->op
== nir_texop_txf
)
4251 filler
= ctx
->ac
.i32_0
;
4253 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4255 if (instr
->is_array
)
4256 args
.coords
[2] = args
.coords
[1];
4257 args
.coords
[1] = filler
;
4260 /* Pack sample index */
4261 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4262 instr
->op
== nir_texop_fragment_fetch
))
4263 args
.coords
[instr
->coord_components
] = sample_index
;
4265 if (instr
->op
== nir_texop_samples_identical
) {
4266 struct ac_image_args txf_args
= { 0 };
4267 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4269 txf_args
.dmask
= 0xf;
4270 txf_args
.resource
= fmask_ptr
;
4271 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4272 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4274 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4275 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4279 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4280 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4281 instr
->op
!= nir_texop_txs
&&
4282 instr
->op
!= nir_texop_fragment_fetch
&&
4283 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4284 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4285 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4286 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4287 instr
->is_array
? args
.coords
[2] : NULL
,
4288 args
.coords
[sample_chan
], fmask_ptr
);
4291 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4292 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4293 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4294 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4295 args
.coords
[i
] = LLVMBuildAdd(
4296 ctx
->ac
.builder
, args
.coords
[i
],
4297 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4302 /* DMASK was repurposed for GATHER4. 4 components are always
4303 * returned and DMASK works like a swizzle - it selects
4304 * the component to fetch. The only valid DMASK values are
4305 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4306 * (red,red,red,red) etc.) The ISA document doesn't mention
4310 if (instr
->op
== nir_texop_tg4
) {
4311 if (instr
->is_shadow
)
4314 args
.dmask
= 1 << instr
->component
;
4317 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4318 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4319 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4322 /* Adjust the number of coordinates because we only need (x,y) for 2D
4323 * multisampled images and (x,y,layer) for 2D multisampled layered
4324 * images or for multisampled input attachments.
4326 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4327 if (args
.dim
== ac_image_2dmsaa
) {
4328 args
.dim
= ac_image_2d
;
4330 assert(args
.dim
== ac_image_2darraymsaa
);
4331 args
.dim
= ac_image_2darray
;
4335 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4337 if (instr
->op
== nir_texop_query_levels
)
4338 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4339 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4340 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4341 instr
->op
!= nir_texop_tg4
)
4342 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4343 else if (instr
->op
== nir_texop_txs
&&
4344 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4346 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4347 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4348 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4349 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4350 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4351 } else if (ctx
->ac
.chip_class
== GFX9
&&
4352 instr
->op
== nir_texop_txs
&&
4353 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4355 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4356 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4357 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4359 } else if (instr
->dest
.ssa
.num_components
!= 4)
4360 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4364 assert(instr
->dest
.is_ssa
);
4365 result
= ac_to_integer(&ctx
->ac
, result
);
4366 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4371 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4373 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4374 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4376 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4377 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4380 static void visit_post_phi(struct ac_nir_context
*ctx
,
4381 nir_phi_instr
*instr
,
4382 LLVMValueRef llvm_phi
)
4384 nir_foreach_phi_src(src
, instr
) {
4385 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4386 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4388 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4392 static void phi_post_pass(struct ac_nir_context
*ctx
)
4394 hash_table_foreach(ctx
->phis
, entry
) {
4395 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4396 (LLVMValueRef
)entry
->data
);
4401 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4402 const nir_ssa_undef_instr
*instr
)
4404 unsigned num_components
= instr
->def
.num_components
;
4405 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4408 if (num_components
== 1)
4409 undef
= LLVMGetUndef(type
);
4411 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4413 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4416 static void visit_jump(struct ac_llvm_context
*ctx
,
4417 const nir_jump_instr
*instr
)
4419 switch (instr
->type
) {
4420 case nir_jump_break
:
4421 ac_build_break(ctx
);
4423 case nir_jump_continue
:
4424 ac_build_continue(ctx
);
4427 fprintf(stderr
, "Unknown NIR jump instr: ");
4428 nir_print_instr(&instr
->instr
, stderr
);
4429 fprintf(stderr
, "\n");
4435 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4436 enum glsl_base_type type
)
4440 case GLSL_TYPE_UINT
:
4441 case GLSL_TYPE_BOOL
:
4442 case GLSL_TYPE_SUBROUTINE
:
4444 case GLSL_TYPE_INT8
:
4445 case GLSL_TYPE_UINT8
:
4447 case GLSL_TYPE_INT16
:
4448 case GLSL_TYPE_UINT16
:
4450 case GLSL_TYPE_FLOAT
:
4452 case GLSL_TYPE_FLOAT16
:
4454 case GLSL_TYPE_INT64
:
4455 case GLSL_TYPE_UINT64
:
4457 case GLSL_TYPE_DOUBLE
:
4460 unreachable("unknown GLSL type");
4465 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4466 const struct glsl_type
*type
)
4468 if (glsl_type_is_scalar(type
)) {
4469 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4472 if (glsl_type_is_vector(type
)) {
4473 return LLVMVectorType(
4474 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4475 glsl_get_vector_elements(type
));
4478 if (glsl_type_is_matrix(type
)) {
4479 return LLVMArrayType(
4480 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4481 glsl_get_matrix_columns(type
));
4484 if (glsl_type_is_array(type
)) {
4485 return LLVMArrayType(
4486 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4487 glsl_get_length(type
));
4490 assert(glsl_type_is_struct_or_ifc(type
));
4492 LLVMTypeRef member_types
[glsl_get_length(type
)];
4494 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4496 glsl_to_llvm_type(ac
,
4497 glsl_get_struct_field(type
, i
));
4500 return LLVMStructTypeInContext(ac
->context
, member_types
,
4501 glsl_get_length(type
), false);
4504 static void visit_deref(struct ac_nir_context
*ctx
,
4505 nir_deref_instr
*instr
)
4507 if (instr
->mode
!= nir_var_mem_shared
&&
4508 instr
->mode
!= nir_var_mem_global
)
4511 LLVMValueRef result
= NULL
;
4512 switch(instr
->deref_type
) {
4513 case nir_deref_type_var
: {
4514 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4515 result
= entry
->data
;
4518 case nir_deref_type_struct
:
4519 if (instr
->mode
== nir_var_mem_global
) {
4520 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4521 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4522 instr
->strct
.index
);
4523 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4524 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4526 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4527 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4530 case nir_deref_type_array
:
4531 if (instr
->mode
== nir_var_mem_global
) {
4532 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4533 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4535 if ((glsl_type_is_matrix(parent
->type
) &&
4536 glsl_matrix_type_is_row_major(parent
->type
)) ||
4537 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4538 stride
= type_scalar_size_bytes(parent
->type
);
4541 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4542 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4543 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4545 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4547 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4549 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4550 get_src(ctx
, instr
->arr
.index
));
4553 case nir_deref_type_ptr_as_array
:
4554 if (instr
->mode
== nir_var_mem_global
) {
4555 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4557 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4558 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4559 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4561 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4563 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4565 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4566 get_src(ctx
, instr
->arr
.index
));
4569 case nir_deref_type_cast
: {
4570 result
= get_src(ctx
, instr
->parent
);
4572 /* We can't use the structs from LLVM because the shader
4573 * specifies its own offsets. */
4574 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
4575 if (instr
->mode
== nir_var_mem_shared
)
4576 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
4578 unsigned address_space
;
4580 switch(instr
->mode
) {
4581 case nir_var_mem_shared
:
4582 address_space
= AC_ADDR_SPACE_LDS
;
4584 case nir_var_mem_global
:
4585 address_space
= AC_ADDR_SPACE_GLOBAL
;
4588 unreachable("Unhandled address space");
4591 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
4593 if (LLVMTypeOf(result
) != type
) {
4594 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
4595 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
4598 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
4605 unreachable("Unhandled deref_instr deref type");
4608 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4611 static void visit_cf_list(struct ac_nir_context
*ctx
,
4612 struct exec_list
*list
);
4614 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
4616 nir_foreach_instr(instr
, block
)
4618 switch (instr
->type
) {
4619 case nir_instr_type_alu
:
4620 visit_alu(ctx
, nir_instr_as_alu(instr
));
4622 case nir_instr_type_load_const
:
4623 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
4625 case nir_instr_type_intrinsic
:
4626 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
4628 case nir_instr_type_tex
:
4629 visit_tex(ctx
, nir_instr_as_tex(instr
));
4631 case nir_instr_type_phi
:
4632 visit_phi(ctx
, nir_instr_as_phi(instr
));
4634 case nir_instr_type_ssa_undef
:
4635 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
4637 case nir_instr_type_jump
:
4638 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
4640 case nir_instr_type_deref
:
4641 visit_deref(ctx
, nir_instr_as_deref(instr
));
4644 fprintf(stderr
, "Unknown NIR instr type: ");
4645 nir_print_instr(instr
, stderr
);
4646 fprintf(stderr
, "\n");
4651 _mesa_hash_table_insert(ctx
->defs
, block
,
4652 LLVMGetInsertBlock(ctx
->ac
.builder
));
4655 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
4657 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
4659 nir_block
*then_block
=
4660 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
4662 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
4664 visit_cf_list(ctx
, &if_stmt
->then_list
);
4666 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
4667 nir_block
*else_block
=
4668 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
4670 ac_build_else(&ctx
->ac
, else_block
->index
);
4671 visit_cf_list(ctx
, &if_stmt
->else_list
);
4674 ac_build_endif(&ctx
->ac
, then_block
->index
);
4677 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
4679 nir_block
*first_loop_block
=
4680 (nir_block
*) exec_list_get_head(&loop
->body
);
4682 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
4684 visit_cf_list(ctx
, &loop
->body
);
4686 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
4689 static void visit_cf_list(struct ac_nir_context
*ctx
,
4690 struct exec_list
*list
)
4692 foreach_list_typed(nir_cf_node
, node
, node
, list
)
4694 switch (node
->type
) {
4695 case nir_cf_node_block
:
4696 visit_block(ctx
, nir_cf_node_as_block(node
));
4699 case nir_cf_node_if
:
4700 visit_if(ctx
, nir_cf_node_as_if(node
));
4703 case nir_cf_node_loop
:
4704 visit_loop(ctx
, nir_cf_node_as_loop(node
));
4714 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
4715 struct ac_shader_abi
*abi
,
4716 struct nir_shader
*nir
,
4717 struct nir_variable
*variable
,
4718 gl_shader_stage stage
)
4720 unsigned output_loc
= variable
->data
.driver_location
/ 4;
4721 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
4723 /* tess ctrl has it's own load/store paths for outputs */
4724 if (stage
== MESA_SHADER_TESS_CTRL
)
4727 if (stage
== MESA_SHADER_VERTEX
||
4728 stage
== MESA_SHADER_TESS_EVAL
||
4729 stage
== MESA_SHADER_GEOMETRY
) {
4730 int idx
= variable
->data
.location
+ variable
->data
.index
;
4731 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
4732 int length
= nir
->info
.clip_distance_array_size
+
4733 nir
->info
.cull_distance_array_size
;
4742 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
4743 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
4744 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
4745 for (unsigned chan
= 0; chan
< 4; chan
++) {
4746 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
4747 ac_build_alloca_undef(ctx
, type
, "");
4753 setup_locals(struct ac_nir_context
*ctx
,
4754 struct nir_function
*func
)
4757 ctx
->num_locals
= 0;
4758 nir_foreach_variable(variable
, &func
->impl
->locals
) {
4759 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
4760 variable
->data
.driver_location
= ctx
->num_locals
* 4;
4761 variable
->data
.location_frac
= 0;
4762 ctx
->num_locals
+= attrib_count
;
4764 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
4768 for (i
= 0; i
< ctx
->num_locals
; i
++) {
4769 for (j
= 0; j
< 4; j
++) {
4770 ctx
->locals
[i
* 4 + j
] =
4771 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
4777 setup_scratch(struct ac_nir_context
*ctx
,
4778 struct nir_shader
*shader
)
4780 if (shader
->scratch_size
== 0)
4783 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
4784 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
4789 setup_constant_data(struct ac_nir_context
*ctx
,
4790 struct nir_shader
*shader
)
4792 if (!shader
->constant_data
)
4796 LLVMConstStringInContext(ctx
->ac
.context
,
4797 shader
->constant_data
,
4798 shader
->constant_data_size
,
4800 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
4802 /* We want to put the constant data in the CONST address space so that
4803 * we can use scalar loads. However, LLVM versions before 10 put these
4804 * variables in the same section as the code, which is unacceptable
4805 * for RadeonSI as it needs to relocate all the data sections after
4806 * the code sections. See https://reviews.llvm.org/D65813.
4808 unsigned address_space
=
4809 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
4811 LLVMValueRef global
=
4812 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
4816 LLVMSetInitializer(global
, data
);
4817 LLVMSetGlobalConstant(global
, true);
4818 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
4819 ctx
->constant_data
= global
;
4823 setup_shared(struct ac_nir_context
*ctx
,
4824 struct nir_shader
*nir
)
4829 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
4830 nir
->info
.cs
.shared_size
);
4833 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
4836 LLVMSetAlignment(lds
, 64 * 1024);
4838 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
4839 LLVMPointerType(ctx
->ac
.i8
,
4840 AC_ADDR_SPACE_LDS
), "");
4843 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
4844 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
4846 struct ac_nir_context ctx
= {};
4847 struct nir_function
*func
;
4853 ctx
.stage
= nir
->info
.stage
;
4854 ctx
.info
= &nir
->info
;
4856 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4858 nir_foreach_variable(variable
, &nir
->outputs
)
4859 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
4862 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4863 _mesa_key_pointer_equal
);
4864 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4865 _mesa_key_pointer_equal
);
4866 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4867 _mesa_key_pointer_equal
);
4869 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
4871 nir_index_ssa_defs(func
->impl
);
4872 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
4874 setup_locals(&ctx
, func
);
4875 setup_scratch(&ctx
, nir
);
4876 setup_constant_data(&ctx
, nir
);
4878 if (gl_shader_stage_is_compute(nir
->info
.stage
))
4879 setup_shared(&ctx
, nir
);
4881 visit_cf_list(&ctx
, &func
->impl
->body
);
4882 phi_post_pass(&ctx
);
4884 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
4885 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
4890 ralloc_free(ctx
.defs
);
4891 ralloc_free(ctx
.phis
);
4892 ralloc_free(ctx
.vars
);
4896 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
4898 bool progress
= false;
4900 /* Lower large variables to scratch first so that we won't bloat the
4901 * shader by generating large if ladders for them. We later lower
4902 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
4904 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
4905 nir_var_function_temp
,
4907 glsl_get_natural_size_align_bytes
);
4909 /* While it would be nice not to have this flag, we are constrained
4910 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
4912 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
4914 /* TODO: Indirect indexing of GS inputs is unimplemented.
4916 * TCS and TES load inputs directly from LDS or offchip memory, so
4917 * indirect indexing is trivial.
4919 nir_variable_mode indirect_mask
= 0;
4920 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
4921 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
4922 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
4923 !llvm_has_working_vgpr_indexing
)) {
4924 indirect_mask
|= nir_var_shader_in
;
4926 if (!llvm_has_working_vgpr_indexing
&&
4927 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
4928 indirect_mask
|= nir_var_shader_out
;
4930 /* TODO: We shouldn't need to do this, however LLVM isn't currently
4931 * smart enough to handle indirects without causing excess spilling
4932 * causing the gpu to hang.
4934 * See the following thread for more details of the problem:
4935 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
4937 indirect_mask
|= nir_var_function_temp
;
4939 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
4944 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
4946 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
4950 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
4952 if (var
->data
.mode
!= nir_var_shader_out
)
4955 unsigned writemask
= 0;
4956 const int location
= var
->data
.location
;
4957 unsigned first_component
= var
->data
.location_frac
;
4958 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
4960 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
4961 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
4962 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
4963 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
4969 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
4970 unsigned *cond_block_tf_writemask
,
4971 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
4973 switch (cf_node
->type
) {
4974 case nir_cf_node_block
: {
4975 nir_block
*block
= nir_cf_node_as_block(cf_node
);
4976 nir_foreach_instr(instr
, block
) {
4977 if (instr
->type
!= nir_instr_type_intrinsic
)
4980 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
4981 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
4983 /* If we find a barrier in nested control flow put this in the
4984 * too hard basket. In GLSL this is not possible but it is in
4988 *tessfactors_are_def_in_all_invocs
= false;
4992 /* The following case must be prevented:
4993 * gl_TessLevelInner = ...;
4995 * if (gl_InvocationID == 1)
4996 * gl_TessLevelInner = ...;
4998 * If you consider disjoint code segments separated by barriers, each
4999 * such segment that writes tess factor channels should write the same
5000 * channels in all codepaths within that segment.
5002 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5003 /* Accumulate the result: */
5004 *tessfactors_are_def_in_all_invocs
&=
5005 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5007 /* Analyze the next code segment from scratch. */
5008 *upper_block_tf_writemask
= 0;
5009 *cond_block_tf_writemask
= 0;
5012 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5017 case nir_cf_node_if
: {
5018 unsigned then_tessfactor_writemask
= 0;
5019 unsigned else_tessfactor_writemask
= 0;
5021 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5022 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5023 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5024 cond_block_tf_writemask
,
5025 tessfactors_are_def_in_all_invocs
, true);
5028 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5029 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5030 cond_block_tf_writemask
,
5031 tessfactors_are_def_in_all_invocs
, true);
5034 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5035 /* If both statements write the same tess factor channels,
5036 * we can say that the upper block writes them too.
5038 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5039 else_tessfactor_writemask
;
5040 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5041 else_tessfactor_writemask
;
5046 case nir_cf_node_loop
: {
5047 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5048 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5049 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5050 cond_block_tf_writemask
,
5051 tessfactors_are_def_in_all_invocs
, true);
5057 unreachable("unknown cf node type");
5062 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5064 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5066 /* The pass works as follows:
5067 * If all codepaths write tess factors, we can say that all
5068 * invocations define tess factors.
5070 * Each tess factor channel is tracked separately.
5072 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5073 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5075 /* Initial value = true. Here the pass will accumulate results from
5076 * multiple segments surrounded by barriers. If tess factors aren't
5077 * written at all, it's a shader bug and we don't care if this will be
5080 bool tessfactors_are_def_in_all_invocs
= true;
5082 nir_foreach_function(function
, nir
) {
5083 if (function
->impl
) {
5084 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5085 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5086 &cond_block_tf_writemask
,
5087 &tessfactors_are_def_in_all_invocs
,
5093 /* Accumulate the result for the last code segment separated by a
5096 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5097 tessfactors_are_def_in_all_invocs
&=
5098 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5101 return tessfactors_are_def_in_all_invocs
;