2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
54 struct hash_table
*verified_interp
;
56 LLVMValueRef main_function
;
57 LLVMBasicBlockRef continue_block
;
58 LLVMBasicBlockRef break_block
;
64 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
65 nir_deref_instr
*deref_instr
,
66 const nir_instr
*instr
,
69 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
70 nir_deref_instr
*deref_instr
,
71 enum ac_descriptor_type desc_type
,
72 const nir_instr
*instr
,
74 bool image
, bool write
);
77 build_store_values_extended(struct ac_llvm_context
*ac
,
80 unsigned value_stride
,
83 LLVMBuilderRef builder
= ac
->builder
;
86 for (i
= 0; i
< value_count
; i
++) {
87 LLVMValueRef ptr
= values
[i
* value_stride
];
88 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
89 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
90 LLVMBuildStore(builder
, value
, ptr
);
94 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
95 const nir_ssa_def
*def
)
97 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
98 if (def
->num_components
> 1) {
99 type
= LLVMVectorType(type
, def
->num_components
);
104 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
107 return nir
->ssa_defs
[src
.ssa
->index
];
111 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
113 LLVMValueRef ptr
= get_src(ctx
, src
);
114 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
115 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
117 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
119 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
120 LLVMPointerType(type
, addr_space
), "");
123 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
124 const struct nir_block
*b
)
126 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
127 return (LLVMBasicBlockRef
)entry
->data
;
130 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
132 unsigned num_components
)
134 LLVMValueRef value
= get_src(ctx
, src
.src
);
135 bool need_swizzle
= false;
138 unsigned src_components
= ac_get_llvm_num_components(value
);
139 for (unsigned i
= 0; i
< num_components
; ++i
) {
140 assert(src
.swizzle
[i
] < src_components
);
141 if (src
.swizzle
[i
] != i
)
145 if (need_swizzle
|| num_components
!= src_components
) {
146 LLVMValueRef masks
[] = {
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
150 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
152 if (src_components
> 1 && num_components
== 1) {
153 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
155 } else if (src_components
== 1 && num_components
> 1) {
156 LLVMValueRef values
[] = {value
, value
, value
, value
};
157 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
159 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
160 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
169 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
170 LLVMIntPredicate pred
, LLVMValueRef src0
,
173 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
174 return LLVMBuildSelect(ctx
->builder
, result
,
175 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
179 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
180 LLVMRealPredicate pred
, LLVMValueRef src0
,
184 src0
= ac_to_float(ctx
, src0
);
185 src1
= ac_to_float(ctx
, src1
);
186 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
187 return LLVMBuildSelect(ctx
->builder
, result
,
188 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
192 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
194 LLVMTypeRef result_type
,
198 LLVMValueRef params
[] = {
199 ac_to_float(ctx
, src0
),
202 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
203 ac_get_elem_bits(ctx
, result_type
));
204 assert(length
< sizeof(name
));
205 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
208 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
210 LLVMTypeRef result_type
,
211 LLVMValueRef src0
, LLVMValueRef src1
)
214 LLVMValueRef params
[] = {
215 ac_to_float(ctx
, src0
),
216 ac_to_float(ctx
, src1
),
219 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
220 ac_get_elem_bits(ctx
, result_type
));
221 assert(length
< sizeof(name
));
222 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
225 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
227 LLVMTypeRef result_type
,
228 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
231 LLVMValueRef params
[] = {
232 ac_to_float(ctx
, src0
),
233 ac_to_float(ctx
, src1
),
234 ac_to_float(ctx
, src2
),
237 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
238 ac_get_elem_bits(ctx
, result_type
));
239 assert(length
< sizeof(name
));
240 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
243 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
244 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
246 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
247 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
249 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMFixedVectorTypeKind
);
251 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
252 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
253 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
254 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
255 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
256 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
259 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
261 return LLVMBuildSelect(ctx
->builder
, v
,
262 ac_to_integer_or_pointer(ctx
, src1
),
263 ac_to_integer_or_pointer(ctx
, src2
), "");
266 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
269 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
272 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
274 LLVMValueRef src0
, LLVMValueRef src1
)
276 LLVMTypeRef ret_type
;
277 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
279 LLVMValueRef params
[] = { src0
, src1
};
280 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
283 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
284 params
, 2, AC_FUNC_ATTR_READNONE
);
286 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
287 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
291 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
295 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
296 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
298 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
302 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
306 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
308 unreachable("Unsupported bit size.");
312 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
315 src0
= ac_to_float(ctx
, src0
);
316 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
317 return LLVMBuildSExt(ctx
->builder
,
318 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
322 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
326 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
330 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
332 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
336 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
338 unreachable("Unsupported bit size.");
342 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
345 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
346 return LLVMBuildSExt(ctx
->builder
,
347 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
351 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
355 LLVMValueRef cond
= NULL
;
357 src0
= ac_to_float(ctx
, src0
);
358 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
360 if (ctx
->chip_class
>= GFX8
) {
361 LLVMValueRef args
[2];
362 /* Check if the result is a denormal - and flush to 0 if so. */
364 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
365 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
368 /* need to convert back up to f32 */
369 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
371 if (ctx
->chip_class
>= GFX8
)
372 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
375 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
376 * so compare the result and flush to 0 if it's smaller.
378 LLVMValueRef temp
, cond2
;
379 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
380 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
381 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
383 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
384 temp
, ctx
->f32_0
, "");
385 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
386 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
391 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
392 LLVMValueRef src0
, LLVMValueRef src1
)
394 LLVMValueRef dst64
, result
;
395 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
396 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
398 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
399 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
400 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
404 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
405 LLVMValueRef src0
, LLVMValueRef src1
)
407 LLVMValueRef dst64
, result
;
408 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
409 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
411 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
412 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
413 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
417 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
418 LLVMValueRef bits
, LLVMValueRef offset
)
420 /* mask = ((1 << bits) - 1) << offset */
421 return LLVMBuildShl(ctx
->builder
,
422 LLVMBuildSub(ctx
->builder
,
423 LLVMBuildShl(ctx
->builder
,
430 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
431 LLVMValueRef mask
, LLVMValueRef insert
,
435 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
436 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
438 return LLVMBuildXor(ctx
->builder
, base
,
439 LLVMBuildAnd(ctx
->builder
, mask
,
440 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
443 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
445 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
446 LLVMValueRef args
[2]))
448 LLVMValueRef comp
[2];
450 src0
= ac_to_float(ctx
, src0
);
451 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
452 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
454 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
457 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
460 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
461 LLVMValueRef temps
[2], val
;
464 for (i
= 0; i
< 2; i
++) {
465 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
466 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
467 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
468 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
470 return ac_build_gather_values(ctx
, temps
, 2);
473 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
481 if (op
== nir_op_fddx_fine
)
482 mask
= AC_TID_MASK_LEFT
;
483 else if (op
== nir_op_fddy_fine
)
484 mask
= AC_TID_MASK_TOP
;
486 mask
= AC_TID_MASK_TOP_LEFT
;
488 /* for DDX we want to next X pixel, DDY next Y pixel. */
489 if (op
== nir_op_fddx_fine
||
490 op
== nir_op_fddx_coarse
||
496 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
500 struct waterfall_context
{
501 LLVMBasicBlockRef phi_bb
[2];
505 /* To deal with divergent descriptors we can create a loop that handles all
506 * lanes with the same descriptor on a given iteration (henceforth a
509 * These helper create the begin and end of the loop leaving the caller
510 * to implement the body.
513 * - ctx is the usal nir context
514 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
515 * - value is the possibly divergent value for which we built the loop
516 * - divergent is whether value is actually divergent. If false we just pass
519 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
520 struct waterfall_context
*wctx
,
521 LLVMValueRef value
, bool divergent
)
523 /* If the app claims the value is divergent but it is constant we can
524 * end up with a dynamic index of NULL. */
528 wctx
->use_waterfall
= divergent
;
532 ac_build_bgnloop(&ctx
->ac
, 6000);
534 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
536 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
537 scalar_value
, "uniform_active");
539 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
540 ac_build_ifcc(&ctx
->ac
, active
, 6001);
545 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
546 struct waterfall_context
*wctx
,
549 LLVMValueRef ret
= NULL
;
550 LLVMValueRef phi_src
[2];
551 LLVMValueRef cc_phi_src
[2] = {
552 LLVMConstInt(ctx
->ac
.i32
, 0, false),
553 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
556 if (!wctx
->use_waterfall
)
559 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
561 ac_build_endif(&ctx
->ac
, 6001);
564 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
567 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
571 * By using the optimization barrier on the exit decision, we decouple
572 * the operations from the break, and hence avoid LLVM hoisting the
573 * opteration into the break block.
575 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
576 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
578 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
579 ac_build_ifcc(&ctx
->ac
, active
, 6002);
580 ac_build_break(&ctx
->ac
);
581 ac_build_endif(&ctx
->ac
, 6002);
583 ac_build_endloop(&ctx
->ac
, 6000);
587 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
589 LLVMValueRef src
[4], result
= NULL
;
590 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
591 unsigned src_components
;
592 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
593 bool saved_inexact
= false;
596 saved_inexact
= ac_disable_inexact_math(ctx
->ac
.builder
);
598 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
605 case nir_op_pack_half_2x16
:
606 case nir_op_pack_snorm_2x16
:
607 case nir_op_pack_unorm_2x16
:
610 case nir_op_unpack_half_2x16
:
613 case nir_op_cube_face_coord
:
614 case nir_op_cube_face_index
:
618 src_components
= num_components
;
621 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
622 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
629 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
630 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
631 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
632 /* fneg will be optimized by backend compiler with sign
633 * bit removed via XOR. This is probably a LLVM bug.
635 result
= ac_build_canonicalize(&ctx
->ac
, result
,
636 instr
->dest
.dest
.ssa
.bit_size
);
640 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
643 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
646 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
649 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
650 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
651 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
654 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
655 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
656 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
659 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
662 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
665 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
668 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
671 /* lower_fmod only lower 16-bit and 32-bit fmod */
672 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
673 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
674 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
675 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
676 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
677 ac_to_float_type(&ctx
->ac
, def_type
), result
);
678 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
679 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
682 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
685 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
688 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
691 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
692 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
693 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
696 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
697 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
700 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
703 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
706 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
709 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
710 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
711 LLVMTypeOf(src
[0]), "");
712 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
713 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
714 LLVMTypeOf(src
[0]), "");
715 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
718 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
719 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
720 LLVMTypeOf(src
[0]), "");
721 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
722 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
723 LLVMTypeOf(src
[0]), "");
724 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
727 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
728 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
729 LLVMTypeOf(src
[0]), "");
730 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
731 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
732 LLVMTypeOf(src
[0]), "");
733 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
736 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
739 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
742 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
745 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
748 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
751 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
754 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
757 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
760 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
763 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
766 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
767 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
768 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
769 /* fabs will be optimized by backend compiler with sign
770 * bit removed via AND.
772 result
= ac_build_canonicalize(&ctx
->ac
, result
,
773 instr
->dest
.dest
.ssa
.bit_size
);
777 result
= emit_iabs(&ctx
->ac
, src
[0]);
780 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
783 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
786 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
789 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
792 result
= ac_build_isign(&ctx
->ac
, src
[0],
793 instr
->dest
.dest
.ssa
.bit_size
);
796 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
797 result
= ac_build_fsign(&ctx
->ac
, src
[0],
798 instr
->dest
.dest
.ssa
.bit_size
);
801 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
802 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
805 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
806 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
809 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
810 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
812 case nir_op_fround_even
:
813 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
814 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
817 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
818 result
= ac_build_fract(&ctx
->ac
, src
[0],
819 instr
->dest
.dest
.ssa
.bit_size
);
822 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
823 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
826 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
827 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
830 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
831 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
834 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
835 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
838 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
839 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
842 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rsq",
843 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
845 case nir_op_frexp_exp
:
846 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
847 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
848 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
849 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
850 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
853 case nir_op_frexp_sig
:
854 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
855 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
856 instr
->dest
.dest
.ssa
.bit_size
);
859 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
860 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
863 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
864 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
865 if (ctx
->ac
.chip_class
< GFX9
&&
866 instr
->dest
.dest
.ssa
.bit_size
== 32) {
867 /* Only pre-GFX9 chips do not flush denorms. */
868 result
= ac_build_canonicalize(&ctx
->ac
, result
,
869 instr
->dest
.dest
.ssa
.bit_size
);
873 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
874 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
875 if (ctx
->ac
.chip_class
< GFX9
&&
876 instr
->dest
.dest
.ssa
.bit_size
== 32) {
877 /* Only pre-GFX9 chips do not flush denorms. */
878 result
= ac_build_canonicalize(&ctx
->ac
, result
,
879 instr
->dest
.dest
.ssa
.bit_size
);
883 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
884 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
885 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
888 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
889 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
890 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
891 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
892 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
894 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
897 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
899 case nir_op_bitfield_select
:
900 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
903 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
906 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
908 case nir_op_bitfield_reverse
:
909 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
911 case nir_op_bit_count
:
912 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
917 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
918 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
919 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
925 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
926 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
932 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
933 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
938 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
943 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
945 case nir_op_f2f16_rtz
:
946 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
947 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
948 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
949 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
950 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
951 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
953 case nir_op_f2f16_rtne
:
957 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
958 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
959 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
961 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
967 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
968 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
970 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
976 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
977 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
979 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
982 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
984 case nir_op_find_lsb
:
985 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
987 case nir_op_ufind_msb
:
988 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
990 case nir_op_ifind_msb
:
991 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
993 case nir_op_uadd_carry
:
994 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
996 case nir_op_usub_borrow
:
997 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
1002 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1005 result
= emit_f2b(&ctx
->ac
, src
[0]);
1011 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1014 result
= emit_i2b(&ctx
->ac
, src
[0]);
1016 case nir_op_fquantize2f16
:
1017 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1019 case nir_op_umul_high
:
1020 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1022 case nir_op_imul_high
:
1023 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1025 case nir_op_pack_half_2x16
:
1026 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1028 case nir_op_pack_snorm_2x16
:
1029 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1031 case nir_op_pack_unorm_2x16
:
1032 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1034 case nir_op_unpack_half_2x16
:
1035 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1039 case nir_op_fddx_fine
:
1040 case nir_op_fddy_fine
:
1041 case nir_op_fddx_coarse
:
1042 case nir_op_fddy_coarse
:
1043 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1046 case nir_op_unpack_64_2x32_split_x
: {
1047 assert(ac_get_llvm_num_components(src
[0]) == 1);
1048 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1051 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1056 case nir_op_unpack_64_2x32_split_y
: {
1057 assert(ac_get_llvm_num_components(src
[0]) == 1);
1058 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1061 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1066 case nir_op_pack_64_2x32_split
: {
1067 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1068 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1072 case nir_op_pack_32_2x16_split
: {
1073 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1074 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1078 case nir_op_unpack_32_2x16_split_x
: {
1079 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1082 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1087 case nir_op_unpack_32_2x16_split_y
: {
1088 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1091 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1096 case nir_op_cube_face_coord
: {
1097 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1098 LLVMValueRef results
[2];
1100 for (unsigned chan
= 0; chan
< 3; chan
++)
1101 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1102 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1103 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1104 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1105 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1106 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1107 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1108 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1109 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1110 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1111 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1112 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1113 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1117 case nir_op_cube_face_index
: {
1118 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1120 for (unsigned chan
= 0; chan
< 3; chan
++)
1121 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1122 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1123 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1128 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1129 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1130 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1131 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1134 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1135 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1138 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1139 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1142 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1143 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1144 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1145 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1148 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1149 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1152 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1153 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1155 case nir_op_fmed3
: {
1156 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1157 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1158 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1159 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1160 instr
->dest
.dest
.ssa
.bit_size
);
1163 case nir_op_imed3
: {
1164 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1165 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1166 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1167 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1170 case nir_op_umed3
: {
1171 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1172 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1173 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1174 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1179 fprintf(stderr
, "Unknown NIR alu instr: ");
1180 nir_print_instr(&instr
->instr
, stderr
);
1181 fprintf(stderr
, "\n");
1186 assert(instr
->dest
.dest
.is_ssa
);
1187 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1188 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1192 ac_restore_inexact_math(ctx
->ac
.builder
, saved_inexact
);
1195 static void visit_load_const(struct ac_nir_context
*ctx
,
1196 const nir_load_const_instr
*instr
)
1198 LLVMValueRef values
[4], value
= NULL
;
1199 LLVMTypeRef element_type
=
1200 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1202 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1203 switch (instr
->def
.bit_size
) {
1205 values
[i
] = LLVMConstInt(element_type
,
1206 instr
->value
[i
].u8
, false);
1209 values
[i
] = LLVMConstInt(element_type
,
1210 instr
->value
[i
].u16
, false);
1213 values
[i
] = LLVMConstInt(element_type
,
1214 instr
->value
[i
].u32
, false);
1217 values
[i
] = LLVMConstInt(element_type
,
1218 instr
->value
[i
].u64
, false);
1222 "unsupported nir load_const bit_size: %d\n",
1223 instr
->def
.bit_size
);
1227 if (instr
->def
.num_components
> 1) {
1228 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1232 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1236 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1239 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1240 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1243 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1244 /* On GFX8, the descriptor contains the size in bytes,
1245 * but TXQ must return the size in elements.
1246 * The stride is always non-zero for resources using TXQ.
1248 LLVMValueRef stride
=
1249 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1251 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1252 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1253 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1254 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1256 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1261 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1262 * incorrectly forces nearest filtering if the texture format is integer.
1263 * The only effect it has on Gather4, which always returns 4 texels for
1264 * bilinear filtering, is that the final coordinates are off by 0.5 of
1267 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1268 * or (0.5 / size) from the normalized coordinates.
1270 * However, cube textures with 8_8_8_8 data formats require a different
1271 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1272 * precision in 32-bit data formats, so it needs to be applied dynamically at
1273 * runtime. In this case, return an i1 value that indicates whether the
1274 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1276 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1278 struct ac_image_args
*args
,
1279 const nir_tex_instr
*instr
)
1281 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1282 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1283 LLVMValueRef wa_8888
= NULL
;
1284 LLVMValueRef half_texel
[2];
1285 LLVMValueRef result
;
1287 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1289 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1290 LLVMValueRef formats
;
1291 LLVMValueRef data_format
;
1292 LLVMValueRef wa_formats
;
1294 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1296 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1297 LLVMConstInt(ctx
->i32
, 20, false), "");
1298 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1299 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1300 wa_8888
= LLVMBuildICmp(
1301 ctx
->builder
, LLVMIntEQ
, data_format
,
1302 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1305 uint32_t wa_num_format
=
1306 stype
== GLSL_TYPE_UINT
?
1307 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1308 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1309 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1310 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1312 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1313 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1315 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1316 args
->resource
= LLVMBuildInsertElement(
1317 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1320 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1322 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1324 struct ac_image_args resinfo
= {};
1325 LLVMBasicBlockRef bbs
[2];
1327 LLVMValueRef unnorm
= NULL
;
1328 LLVMValueRef default_offset
= ctx
->f32_0
;
1329 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1331 /* In vulkan, whether the sampler uses unnormalized
1332 * coordinates or not is a dynamic property of the
1333 * sampler. Hence, to figure out whether or not we
1334 * need to divide by the texture size, we need to test
1335 * the sampler at runtime. This tests the bit set by
1336 * radv_init_sampler().
1338 LLVMValueRef sampler0
=
1339 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1340 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1341 LLVMConstInt(ctx
->i32
, 15, false), "");
1342 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1343 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1344 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1347 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1348 if (wa_8888
|| unnorm
) {
1349 assert(!(wa_8888
&& unnorm
));
1350 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1351 /* Skip the texture size query entirely if we don't need it. */
1352 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1353 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1356 /* Query the texture size. */
1357 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1358 resinfo
.opcode
= ac_image_get_resinfo
;
1359 resinfo
.dmask
= 0xf;
1360 resinfo
.lod
= ctx
->i32_0
;
1361 resinfo
.resource
= args
->resource
;
1362 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1363 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1365 /* Compute -0.5 / size. */
1366 for (unsigned c
= 0; c
< 2; c
++) {
1368 LLVMBuildExtractElement(ctx
->builder
, size
,
1369 LLVMConstInt(ctx
->i32
, c
, 0), "");
1370 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1371 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1372 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1373 LLVMConstReal(ctx
->f32
, -0.5), "");
1376 if (wa_8888
|| unnorm
) {
1377 ac_build_endif(ctx
, 2000);
1379 for (unsigned c
= 0; c
< 2; c
++) {
1380 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1381 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1387 for (unsigned c
= 0; c
< 2; c
++) {
1389 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1390 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1393 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1394 result
= ac_build_image_opcode(ctx
, args
);
1396 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1397 LLVMValueRef tmp
, tmp2
;
1399 /* if the cube workaround is in place, f2i the result. */
1400 for (unsigned c
= 0; c
< 4; c
++) {
1401 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1402 if (stype
== GLSL_TYPE_UINT
)
1403 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1405 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1406 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1407 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1408 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1409 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1410 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1416 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1418 nir_deref_instr
*texture_deref_instr
= NULL
;
1420 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1421 switch (instr
->src
[i
].src_type
) {
1422 case nir_tex_src_texture_deref
:
1423 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1429 return texture_deref_instr
;
1432 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1433 const nir_tex_instr
*instr
,
1434 struct ac_image_args
*args
)
1436 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1437 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1439 return ac_build_buffer_load_format(&ctx
->ac
,
1443 util_last_bit(mask
),
1447 args
->opcode
= ac_image_sample
;
1449 switch (instr
->op
) {
1451 case nir_texop_txf_ms
:
1452 case nir_texop_samples_identical
:
1453 args
->opcode
= args
->level_zero
||
1454 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1455 ac_image_load
: ac_image_load_mip
;
1456 args
->level_zero
= false;
1459 case nir_texop_query_levels
:
1460 args
->opcode
= ac_image_get_resinfo
;
1462 args
->lod
= ctx
->ac
.i32_0
;
1463 args
->level_zero
= false;
1466 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1468 args
->level_zero
= true;
1472 args
->opcode
= ac_image_gather4
;
1473 args
->level_zero
= true;
1476 args
->opcode
= ac_image_get_lod
;
1478 case nir_texop_fragment_fetch
:
1479 case nir_texop_fragment_mask_fetch
:
1480 args
->opcode
= ac_image_load
;
1481 args
->level_zero
= false;
1487 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1488 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1489 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1490 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1491 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1492 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1493 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1497 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1498 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1499 if ((args
->dim
== ac_image_2darray
||
1500 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1501 args
->coords
[1] = ctx
->ac
.i32_0
;
1505 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1506 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1507 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1508 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1509 /* Prevent texture instructions with implicit derivatives from being
1510 * sinked into branches. */
1511 switch (instr
->op
) {
1515 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1522 return ac_build_image_opcode(&ctx
->ac
, args
);
1525 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1526 nir_intrinsic_instr
*instr
)
1528 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1529 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1531 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1532 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1536 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1537 nir_intrinsic_instr
*instr
)
1539 LLVMValueRef ptr
, addr
;
1540 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1541 unsigned index
= nir_intrinsic_base(instr
);
1543 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1544 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1546 /* Load constant values from user SGPRS when possible, otherwise
1547 * fallback to the default path that loads directly from memory.
1549 if (LLVMIsConstant(src0
) &&
1550 instr
->dest
.ssa
.bit_size
== 32) {
1551 unsigned count
= instr
->dest
.ssa
.num_components
;
1552 unsigned offset
= index
;
1554 offset
+= LLVMConstIntGetZExtValue(src0
);
1557 offset
-= ctx
->args
->base_inline_push_consts
;
1559 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1560 if (offset
+ count
<= num_inline_push_consts
) {
1561 LLVMValueRef push_constants
[num_inline_push_consts
];
1562 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1563 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1564 ctx
->args
->inline_push_consts
[i
]);
1565 return ac_build_gather_values(&ctx
->ac
,
1566 push_constants
+ offset
,
1571 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1572 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1574 if (instr
->dest
.ssa
.bit_size
== 8) {
1575 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1576 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), 4 * load_dwords
);
1577 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1578 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1580 LLVMValueRef params
[3];
1581 if (load_dwords
> 1) {
1582 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i32
, 2), "");
1583 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1584 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1586 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1587 params
[0] = ctx
->ac
.i32_0
;
1591 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1593 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1594 if (instr
->dest
.ssa
.num_components
> 1)
1595 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), instr
->dest
.ssa
.num_components
), "");
1597 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1598 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1599 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt16TypeInContext(ctx
->ac
.context
), 2 * load_dwords
);
1600 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1601 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1602 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1603 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1604 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1605 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1606 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1607 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1608 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1609 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1610 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1611 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1612 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1613 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1616 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1618 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1621 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1622 const nir_intrinsic_instr
*instr
)
1624 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1626 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1629 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1631 uint32_t new_mask
= 0;
1632 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1633 if (mask
& (1u << i
))
1634 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1638 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1639 unsigned start
, unsigned count
)
1641 LLVMValueRef mask
[] = {
1642 ctx
->i32_0
, ctx
->i32_1
,
1643 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1645 unsigned src_elements
= ac_get_llvm_num_components(src
);
1647 if (count
== src_elements
) {
1650 } else if (count
== 1) {
1651 assert(start
< src_elements
);
1652 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1654 assert(start
+ count
<= src_elements
);
1656 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1657 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1661 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1662 enum gl_access_qualifier access
,
1663 bool may_store_unaligned
,
1664 bool writeonly_memory
)
1666 unsigned cache_policy
= 0;
1668 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1669 * store opcodes not aligned to a dword are affected. The only way to
1670 * get unaligned stores is through shader images.
1672 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1673 /* If this is write-only, don't keep data in L1 to prevent
1674 * evicting L1 cache lines that may be needed by other
1678 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1679 cache_policy
|= ac_glc
;
1682 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1683 cache_policy
|= ac_slc
;
1685 return cache_policy
;
1688 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1689 struct waterfall_context
*wctx
,
1690 const nir_intrinsic_instr
*instr
,
1693 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1694 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1697 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1698 nir_intrinsic_instr
*instr
)
1700 if (ctx
->ac
.postponed_kill
) {
1701 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1702 ctx
->ac
.postponed_kill
, "");
1703 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1706 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1707 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1708 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1709 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1710 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1711 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1713 struct waterfall_context wctx
;
1714 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1716 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1717 LLVMValueRef base_data
= src_data
;
1718 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1719 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1723 LLVMValueRef data
, offset
;
1724 LLVMTypeRef data_type
;
1726 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1728 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1729 * writes into a 2-element and a 1-element write. */
1731 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1732 writemask
|= 1 << (start
+ 2);
1735 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1737 /* we can only store 4 DWords at the same time.
1738 * can only happen for 64 Bit vectors. */
1739 if (num_bytes
> 16) {
1740 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1745 /* check alignment of 16 Bit stores */
1746 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1747 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1752 /* Due to alignment issues, split stores of 8-bit/16-bit
1755 if (ctx
->ac
.chip_class
== GFX6
&& count
> 1 && elem_size_bytes
< 4) {
1756 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1758 num_bytes
= elem_size_bytes
;
1761 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1763 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1764 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1766 if (num_bytes
== 1) {
1767 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1768 offset
, ctx
->ac
.i32_0
,
1770 } else if (num_bytes
== 2) {
1771 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1772 offset
, ctx
->ac
.i32_0
,
1775 int num_channels
= num_bytes
/ 4;
1777 switch (num_bytes
) {
1778 case 16: /* v4f32 */
1779 data_type
= ctx
->ac
.v4f32
;
1781 case 12: /* v3f32 */
1782 data_type
= ctx
->ac
.v3f32
;
1785 data_type
= ctx
->ac
.v2f32
;
1788 data_type
= ctx
->ac
.f32
;
1791 unreachable("Malformed vector store.");
1793 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1795 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1796 num_channels
, offset
,
1802 exit_waterfall(ctx
, &wctx
, NULL
);
1804 if (ctx
->ac
.postponed_kill
)
1805 ac_build_endif(&ctx
->ac
, 7000);
1808 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1809 LLVMValueRef descriptor
,
1810 LLVMValueRef offset
,
1811 LLVMValueRef compare
,
1812 LLVMValueRef exchange
)
1814 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1815 if (ctx
->abi
->robust_buffer_access
) {
1816 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1818 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1819 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1821 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1823 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1826 LLVMValueRef ptr_parts
[2] = {
1827 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1828 LLVMBuildAnd(ctx
->ac
.builder
,
1829 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1830 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1833 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1834 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1836 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1838 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1839 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1840 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1841 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1843 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1844 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1846 if (ctx
->abi
->robust_buffer_access
) {
1847 ac_build_endif(&ctx
->ac
, -1);
1849 LLVMBasicBlockRef incoming_blocks
[2] = {
1854 LLVMValueRef incoming_values
[2] = {
1855 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1858 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1859 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1866 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1867 nir_intrinsic_instr
*instr
)
1869 if (ctx
->ac
.postponed_kill
) {
1870 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1871 ctx
->ac
.postponed_kill
, "");
1872 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1875 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1877 char name
[64], type
[8];
1878 LLVMValueRef params
[6], descriptor
;
1879 LLVMValueRef result
;
1882 struct waterfall_context wctx
;
1883 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1885 switch (instr
->intrinsic
) {
1886 case nir_intrinsic_ssbo_atomic_add
:
1889 case nir_intrinsic_ssbo_atomic_imin
:
1892 case nir_intrinsic_ssbo_atomic_umin
:
1895 case nir_intrinsic_ssbo_atomic_imax
:
1898 case nir_intrinsic_ssbo_atomic_umax
:
1901 case nir_intrinsic_ssbo_atomic_and
:
1904 case nir_intrinsic_ssbo_atomic_or
:
1907 case nir_intrinsic_ssbo_atomic_xor
:
1910 case nir_intrinsic_ssbo_atomic_exchange
:
1913 case nir_intrinsic_ssbo_atomic_comp_swap
:
1920 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1924 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1925 return_type
== ctx
->ac
.i64
) {
1926 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1927 get_src(ctx
, instr
->src
[1]),
1928 get_src(ctx
, instr
->src
[2]),
1929 get_src(ctx
, instr
->src
[3]));
1931 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1932 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1934 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1935 params
[arg_count
++] = descriptor
;
1937 if (LLVM_VERSION_MAJOR
>= 9) {
1938 /* XXX: The new raw/struct atomic intrinsics are buggy with
1939 * LLVM 8, see r358579.
1941 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1942 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1943 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1945 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1946 snprintf(name
, sizeof(name
),
1947 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1949 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1950 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1951 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1953 assert(return_type
== ctx
->ac
.i32
);
1954 snprintf(name
, sizeof(name
),
1955 "llvm.amdgcn.buffer.atomic.%s", op
);
1958 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1962 result
= exit_waterfall(ctx
, &wctx
, result
);
1963 if (ctx
->ac
.postponed_kill
)
1964 ac_build_endif(&ctx
->ac
, 7001);
1968 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1969 nir_intrinsic_instr
*instr
)
1971 struct waterfall_context wctx
;
1972 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1974 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1975 int num_components
= instr
->num_components
;
1976 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1977 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1979 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1980 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1981 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1983 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1984 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1986 LLVMValueRef results
[4];
1987 for (int i
= 0; i
< num_components
;) {
1988 int num_elems
= num_components
- i
;
1989 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1991 if (num_elems
* elem_size_bytes
> 16)
1992 num_elems
= 16 / elem_size_bytes
;
1993 int load_bytes
= num_elems
* elem_size_bytes
;
1995 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
1999 if (load_bytes
== 1) {
2000 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
2006 } else if (load_bytes
== 2) {
2007 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
2014 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
2015 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2017 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2018 vindex
, offset
, immoffset
, 0,
2019 cache_policy
, can_speculate
, false);
2022 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2023 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2024 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2026 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2027 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2029 for (unsigned j
= 0; j
< num_elems
; j
++) {
2030 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2035 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2036 return exit_waterfall(ctx
, &wctx
, ret
);
2039 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2040 struct waterfall_context
*wctx
,
2041 const nir_intrinsic_instr
*instr
)
2043 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2044 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2047 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2048 nir_intrinsic_instr
*instr
)
2050 struct waterfall_context wctx
;
2051 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2054 LLVMValueRef rsrc
= rsrc_base
;
2055 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2056 int num_components
= instr
->num_components
;
2058 if (ctx
->abi
->load_ubo
)
2059 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2061 if (instr
->dest
.ssa
.bit_size
== 64)
2062 num_components
*= 2;
2064 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2065 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2066 LLVMValueRef results
[num_components
];
2067 for (unsigned i
= 0; i
< num_components
; ++i
) {
2068 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2071 if (load_bytes
== 1) {
2072 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2079 assert(load_bytes
== 2);
2080 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2088 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2090 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2091 NULL
, 0, 0, true, true);
2093 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2096 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2097 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2099 return exit_waterfall(ctx
, &wctx
, ret
);
2103 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2104 bool vs_in
, unsigned *vertex_index_out
,
2105 LLVMValueRef
*vertex_index_ref
,
2106 unsigned *const_out
, LLVMValueRef
*indir_out
)
2108 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2109 nir_deref_path path
;
2110 unsigned idx_lvl
= 1;
2112 nir_deref_path_init(&path
, instr
, NULL
);
2114 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2115 if (vertex_index_ref
) {
2116 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2117 if (vertex_index_out
)
2118 *vertex_index_out
= 0;
2120 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2125 uint32_t const_offset
= 0;
2126 LLVMValueRef offset
= NULL
;
2128 if (var
->data
.compact
) {
2129 assert(instr
->deref_type
== nir_deref_type_array
);
2130 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2134 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2135 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2136 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2137 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2139 for (unsigned i
= 0; i
< index
; i
++) {
2140 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2141 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2143 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2144 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2145 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2146 const_offset
+= size
*
2147 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2149 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2150 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2152 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2157 unreachable("Uhandled deref type in get_deref_instr_offset");
2161 nir_deref_path_finish(&path
);
2163 if (const_offset
&& offset
)
2164 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2165 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2168 *const_out
= const_offset
;
2169 *indir_out
= offset
;
2172 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2173 nir_intrinsic_instr
*instr
,
2176 LLVMValueRef result
;
2177 LLVMValueRef vertex_index
= NULL
;
2178 LLVMValueRef indir_index
= NULL
;
2179 unsigned const_index
= 0;
2181 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2183 unsigned location
= var
->data
.location
;
2184 unsigned driver_location
= var
->data
.driver_location
;
2185 const bool is_patch
= var
->data
.patch
||
2186 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2187 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2188 const bool is_compact
= var
->data
.compact
;
2190 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2191 false, NULL
, is_patch
? NULL
: &vertex_index
,
2192 &const_index
, &indir_index
);
2194 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2196 LLVMTypeRef src_component_type
;
2197 if (LLVMGetTypeKind(dest_type
) == LLVMFixedVectorTypeKind
)
2198 src_component_type
= LLVMGetElementType(dest_type
);
2200 src_component_type
= dest_type
;
2202 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2203 vertex_index
, indir_index
,
2204 const_index
, location
, driver_location
,
2205 var
->data
.location_frac
,
2206 instr
->num_components
,
2207 is_patch
, is_compact
, load_inputs
);
2208 if (instr
->dest
.ssa
.bit_size
== 16) {
2209 result
= ac_to_integer(&ctx
->ac
, result
);
2210 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2212 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2216 type_scalar_size_bytes(const struct glsl_type
*type
)
2218 assert(glsl_type_is_vector_or_scalar(type
) ||
2219 glsl_type_is_matrix(type
));
2220 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2223 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2224 nir_intrinsic_instr
*instr
)
2226 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2227 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2229 LLVMValueRef values
[8];
2231 int ve
= instr
->dest
.ssa
.num_components
;
2233 LLVMValueRef indir_index
;
2235 unsigned const_index
;
2236 unsigned stride
= 4;
2237 int mode
= deref
->mode
;
2240 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2241 var
->data
.mode
== nir_var_shader_in
;
2242 idx
= var
->data
.driver_location
;
2243 comp
= var
->data
.location_frac
;
2244 mode
= var
->data
.mode
;
2246 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2247 &const_index
, &indir_index
);
2249 if (var
->data
.compact
) {
2251 const_index
+= comp
;
2256 if (instr
->dest
.ssa
.bit_size
== 64 &&
2257 (deref
->mode
== nir_var_shader_in
||
2258 deref
->mode
== nir_var_shader_out
||
2259 deref
->mode
== nir_var_function_temp
))
2263 case nir_var_shader_in
:
2264 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2265 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2266 return load_tess_varyings(ctx
, instr
, true);
2269 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2270 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2271 LLVMValueRef indir_index
;
2272 unsigned const_index
, vertex_index
;
2273 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2274 &const_index
, &indir_index
);
2275 assert(indir_index
== NULL
);
2277 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2278 var
->data
.driver_location
,
2279 var
->data
.location_frac
,
2280 instr
->num_components
, vertex_index
, const_index
, type
);
2283 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2285 unsigned count
= glsl_count_attribute_slots(
2287 ctx
->stage
== MESA_SHADER_VERTEX
);
2289 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2290 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2291 stride
, false, true);
2293 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2297 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2300 case nir_var_function_temp
:
2301 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2303 unsigned count
= glsl_count_attribute_slots(
2306 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2307 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2308 stride
, true, true);
2310 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2314 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2318 case nir_var_shader_out
:
2319 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2320 return load_tess_varyings(ctx
, instr
, false);
2323 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2324 var
->data
.fb_fetch_output
&&
2325 ctx
->abi
->emit_fbfetch
)
2326 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2328 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2330 unsigned count
= glsl_count_attribute_slots(
2333 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2334 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2335 stride
, true, true);
2337 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2341 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2342 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2347 case nir_var_mem_global
: {
2348 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2349 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2350 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2351 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2352 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2353 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, result_type
) / 8;
2354 bool split_loads
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2356 if (stride
!= natural_stride
|| split_loads
) {
2357 if (LLVMGetTypeKind(result_type
) == LLVMFixedVectorTypeKind
)
2358 result_type
= LLVMGetElementType(result_type
);
2360 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2361 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2362 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2364 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2365 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2366 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2367 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2369 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2371 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2372 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2373 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2374 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2379 unreachable("unhandle variable mode");
2381 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2382 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2386 visit_store_var(struct ac_nir_context
*ctx
,
2387 nir_intrinsic_instr
*instr
)
2389 if (ctx
->ac
.postponed_kill
) {
2390 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2391 ctx
->ac
.postponed_kill
, "");
2392 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2395 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2396 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2398 LLVMValueRef temp_ptr
, value
;
2401 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2402 int writemask
= instr
->const_index
[0];
2403 LLVMValueRef indir_index
;
2404 unsigned const_index
;
2407 get_deref_offset(ctx
, deref
, false,
2408 NULL
, NULL
, &const_index
, &indir_index
);
2409 idx
= var
->data
.driver_location
;
2410 comp
= var
->data
.location_frac
;
2412 if (var
->data
.compact
) {
2413 const_index
+= comp
;
2418 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2419 (deref
->mode
== nir_var_shader_out
||
2420 deref
->mode
== nir_var_function_temp
)) {
2422 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2423 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2426 writemask
= widen_mask(writemask
, 2);
2429 writemask
= writemask
<< comp
;
2431 switch (deref
->mode
) {
2432 case nir_var_shader_out
:
2434 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2435 LLVMValueRef vertex_index
= NULL
;
2436 LLVMValueRef indir_index
= NULL
;
2437 unsigned const_index
= 0;
2438 const bool is_patch
= var
->data
.patch
||
2439 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2440 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2442 get_deref_offset(ctx
, deref
, false, NULL
,
2443 is_patch
? NULL
: &vertex_index
,
2444 &const_index
, &indir_index
);
2446 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2447 vertex_index
, indir_index
,
2448 const_index
, src
, writemask
);
2452 for (unsigned chan
= 0; chan
< 8; chan
++) {
2454 if (!(writemask
& (1 << chan
)))
2457 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2459 if (var
->data
.compact
)
2462 unsigned count
= glsl_count_attribute_slots(
2465 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2466 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2467 stride
, true, true);
2469 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2470 value
, indir_index
, "");
2471 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2472 count
, stride
, tmp_vec
);
2475 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2477 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2481 case nir_var_function_temp
:
2482 for (unsigned chan
= 0; chan
< 8; chan
++) {
2483 if (!(writemask
& (1 << chan
)))
2486 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2488 unsigned count
= glsl_count_attribute_slots(
2491 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2492 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2495 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2496 value
, indir_index
, "");
2497 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2500 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2502 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2507 case nir_var_mem_global
: {
2508 int writemask
= instr
->const_index
[0];
2509 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2510 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2512 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2513 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2514 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2515 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(val
)) / 8;
2516 bool split_stores
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2518 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2519 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2520 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2522 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2523 stride
== natural_stride
&& !split_stores
) {
2524 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2525 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2526 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2528 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2529 LLVMGetElementType(LLVMTypeOf(address
)), "");
2530 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2532 LLVMTypeRef val_type
= LLVMTypeOf(val
);
2533 if (LLVMGetTypeKind(LLVMTypeOf(val
)) == LLVMFixedVectorTypeKind
)
2534 val_type
= LLVMGetElementType(val_type
);
2536 LLVMTypeRef ptr_type
= LLVMPointerType(val_type
,
2537 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2538 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2539 for (unsigned chan
= 0; chan
< 4; chan
++) {
2540 if (!(writemask
& (1 << chan
)))
2543 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2545 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2546 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2548 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2549 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2550 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2560 if (ctx
->ac
.postponed_kill
)
2561 ac_build_endif(&ctx
->ac
, 7002);
2564 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2567 case GLSL_SAMPLER_DIM_BUF
:
2569 case GLSL_SAMPLER_DIM_1D
:
2570 return array
? 2 : 1;
2571 case GLSL_SAMPLER_DIM_2D
:
2572 return array
? 3 : 2;
2573 case GLSL_SAMPLER_DIM_MS
:
2574 return array
? 4 : 3;
2575 case GLSL_SAMPLER_DIM_3D
:
2576 case GLSL_SAMPLER_DIM_CUBE
:
2578 case GLSL_SAMPLER_DIM_RECT
:
2579 case GLSL_SAMPLER_DIM_SUBPASS
:
2581 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2589 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2590 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2591 LLVMValueRef coord_z
,
2592 LLVMValueRef sample_index
,
2593 LLVMValueRef fmask_desc_ptr
)
2595 unsigned sample_chan
= coord_z
? 3 : 2;
2596 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2597 addr
[sample_chan
] = sample_index
;
2599 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2600 return addr
[sample_chan
];
2603 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2605 assert(instr
->src
[0].is_ssa
);
2606 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2609 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2610 const nir_intrinsic_instr
*instr
,
2611 LLVMValueRef dynamic_index
,
2612 enum ac_descriptor_type desc_type
,
2615 nir_deref_instr
*deref_instr
=
2616 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2617 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2619 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2622 static void get_image_coords(struct ac_nir_context
*ctx
,
2623 const nir_intrinsic_instr
*instr
,
2624 LLVMValueRef dynamic_desc_index
,
2625 struct ac_image_args
*args
,
2626 enum glsl_sampler_dim dim
,
2629 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2630 LLVMValueRef masks
[] = {
2631 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2632 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2634 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2637 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2638 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2639 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2640 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2641 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2642 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2643 count
= image_type_to_components_count(dim
, is_array
);
2645 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2646 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2647 LLVMValueRef fmask_load_address
[3];
2649 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2650 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2652 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2654 fmask_load_address
[2] = NULL
;
2656 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2657 fmask_load_address
[0],
2658 fmask_load_address
[1],
2659 fmask_load_address
[2],
2661 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2662 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2664 if (count
== 1 && !gfx9_1d
) {
2665 if (instr
->src
[1].ssa
->num_components
)
2666 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2668 args
->coords
[0] = src0
;
2673 for (chan
= 0; chan
< count
; ++chan
) {
2674 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2679 args
->coords
[2] = args
->coords
[1];
2680 args
->coords
[1] = ctx
->ac
.i32_0
;
2682 args
->coords
[1] = ctx
->ac
.i32_0
;
2685 if (ctx
->ac
.chip_class
== GFX9
&&
2686 dim
== GLSL_SAMPLER_DIM_2D
&&
2688 /* The hw can't bind a slice of a 3D image as a 2D
2689 * image, because it ignores BASE_ARRAY if the target
2690 * is 3D. The workaround is to read BASE_ARRAY and set
2691 * it as the 3rd address operand for all 2D images.
2693 LLVMValueRef first_layer
, const5
, mask
;
2695 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2696 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2697 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2698 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2700 args
->coords
[count
] = first_layer
;
2706 args
->coords
[count
] = sample_index
;
2712 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2713 const nir_intrinsic_instr
*instr
,
2714 LLVMValueRef dynamic_index
,
2715 bool write
, bool atomic
)
2717 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2718 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2719 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2720 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2721 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2723 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2724 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2725 elem_count
, stride
, "");
2727 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2728 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2733 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2734 struct waterfall_context
*wctx
,
2735 const nir_intrinsic_instr
*instr
)
2737 nir_deref_instr
*deref_instr
= NULL
;
2739 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2740 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2742 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2743 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2746 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2747 const nir_intrinsic_instr
*instr
,
2752 enum glsl_sampler_dim dim
;
2753 enum gl_access_qualifier access
;
2756 dim
= nir_intrinsic_image_dim(instr
);
2757 access
= nir_intrinsic_access(instr
);
2758 is_array
= nir_intrinsic_image_array(instr
);
2760 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2761 const struct glsl_type
*type
= image_deref
->type
;
2762 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2763 dim
= glsl_get_sampler_dim(type
);
2764 access
= var
->data
.access
;
2765 is_array
= glsl_sampler_type_is_array(type
);
2768 struct waterfall_context wctx
;
2769 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2771 struct ac_image_args args
= {};
2773 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2775 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2776 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2777 unsigned num_channels
= util_last_bit(mask
);
2778 LLVMValueRef rsrc
, vindex
;
2780 rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, false, false);
2781 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2784 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2785 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2786 ctx
->ac
.i32_0
, num_channels
,
2789 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2791 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2792 res
= ac_to_integer(&ctx
->ac
, res
);
2794 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2796 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2797 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
2798 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2799 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2801 args
.lod
= get_src(ctx
, instr
->src
[3]);
2803 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2805 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2807 return exit_waterfall(ctx
, &wctx
, res
);
2810 static void visit_image_store(struct ac_nir_context
*ctx
,
2811 const nir_intrinsic_instr
*instr
,
2814 if (ctx
->ac
.postponed_kill
) {
2815 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2816 ctx
->ac
.postponed_kill
, "");
2817 ac_build_ifcc(&ctx
->ac
, cond
, 7003);
2820 enum glsl_sampler_dim dim
;
2821 enum gl_access_qualifier access
;
2825 dim
= nir_intrinsic_image_dim(instr
);
2826 access
= nir_intrinsic_access(instr
);
2827 is_array
= nir_intrinsic_image_array(instr
);
2829 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2830 const struct glsl_type
*type
= image_deref
->type
;
2831 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2832 dim
= glsl_get_sampler_dim(type
);
2833 access
= var
->data
.access
;
2834 is_array
= glsl_sampler_type_is_array(type
);
2837 struct waterfall_context wctx
;
2838 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2840 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2841 struct ac_image_args args
= {};
2843 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2845 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2846 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, false);
2847 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2848 unsigned src_channels
= ac_get_llvm_num_components(src
);
2849 LLVMValueRef vindex
;
2851 if (src_channels
== 3)
2852 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2854 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2855 get_src(ctx
, instr
->src
[1]),
2858 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2859 ctx
->ac
.i32_0
, src_channels
,
2862 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2864 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2865 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2866 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
2867 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2868 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2870 args
.lod
= get_src(ctx
, instr
->src
[4]);
2873 ac_build_image_opcode(&ctx
->ac
, &args
);
2876 exit_waterfall(ctx
, &wctx
, NULL
);
2877 if (ctx
->ac
.postponed_kill
)
2878 ac_build_endif(&ctx
->ac
, 7003);
2881 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2882 const nir_intrinsic_instr
*instr
,
2885 if (ctx
->ac
.postponed_kill
) {
2886 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2887 ctx
->ac
.postponed_kill
, "");
2888 ac_build_ifcc(&ctx
->ac
, cond
, 7004);
2891 LLVMValueRef params
[7];
2892 int param_count
= 0;
2894 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2895 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2896 const char *atomic_name
;
2897 char intrinsic_name
[64];
2898 enum ac_atomic_op atomic_subop
;
2899 ASSERTED
int length
;
2901 enum glsl_sampler_dim dim
;
2904 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2905 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2906 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2907 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2908 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2909 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2911 dim
= nir_intrinsic_image_dim(instr
);
2912 is_array
= nir_intrinsic_image_array(instr
);
2914 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2915 dim
= glsl_get_sampler_dim(type
);
2916 is_array
= glsl_sampler_type_is_array(type
);
2919 struct waterfall_context wctx
;
2920 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2922 switch (instr
->intrinsic
) {
2923 case nir_intrinsic_bindless_image_atomic_add
:
2924 case nir_intrinsic_image_deref_atomic_add
:
2925 atomic_name
= "add";
2926 atomic_subop
= ac_atomic_add
;
2928 case nir_intrinsic_bindless_image_atomic_imin
:
2929 case nir_intrinsic_image_deref_atomic_imin
:
2930 atomic_name
= "smin";
2931 atomic_subop
= ac_atomic_smin
;
2933 case nir_intrinsic_bindless_image_atomic_umin
:
2934 case nir_intrinsic_image_deref_atomic_umin
:
2935 atomic_name
= "umin";
2936 atomic_subop
= ac_atomic_umin
;
2938 case nir_intrinsic_bindless_image_atomic_imax
:
2939 case nir_intrinsic_image_deref_atomic_imax
:
2940 atomic_name
= "smax";
2941 atomic_subop
= ac_atomic_smax
;
2943 case nir_intrinsic_bindless_image_atomic_umax
:
2944 case nir_intrinsic_image_deref_atomic_umax
:
2945 atomic_name
= "umax";
2946 atomic_subop
= ac_atomic_umax
;
2948 case nir_intrinsic_bindless_image_atomic_and
:
2949 case nir_intrinsic_image_deref_atomic_and
:
2950 atomic_name
= "and";
2951 atomic_subop
= ac_atomic_and
;
2953 case nir_intrinsic_bindless_image_atomic_or
:
2954 case nir_intrinsic_image_deref_atomic_or
:
2956 atomic_subop
= ac_atomic_or
;
2958 case nir_intrinsic_bindless_image_atomic_xor
:
2959 case nir_intrinsic_image_deref_atomic_xor
:
2960 atomic_name
= "xor";
2961 atomic_subop
= ac_atomic_xor
;
2963 case nir_intrinsic_bindless_image_atomic_exchange
:
2964 case nir_intrinsic_image_deref_atomic_exchange
:
2965 atomic_name
= "swap";
2966 atomic_subop
= ac_atomic_swap
;
2968 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2969 case nir_intrinsic_image_deref_atomic_comp_swap
:
2970 atomic_name
= "cmpswap";
2971 atomic_subop
= 0; /* not used */
2973 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2974 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2975 atomic_name
= "inc";
2976 atomic_subop
= ac_atomic_inc_wrap
;
2977 /* ATOMIC_INC instruction does:
2978 * value = (value + 1) % (data + 1)
2980 * value = (value + 1) % data
2981 * So replace 'data' by 'data - 1'.
2983 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2984 LLVMBuildSub(ctx
->ac
.builder
,
2985 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2989 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2990 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2991 atomic_name
= "dec";
2992 atomic_subop
= ac_atomic_dec_wrap
;
2999 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
3000 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
3002 LLVMValueRef result
;
3003 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3004 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, true);
3005 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
3006 ctx
->ac
.i32_0
, ""); /* vindex */
3007 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
3008 if (LLVM_VERSION_MAJOR
>= 9) {
3009 /* XXX: The new raw/struct atomic intrinsics are buggy
3010 * with LLVM 8, see r358579.
3012 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
3013 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
3015 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3016 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
3018 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3020 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3021 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
3024 assert(length
< sizeof(intrinsic_name
));
3025 result
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
3026 params
, param_count
, 0);
3028 struct ac_image_args args
= {};
3029 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
3030 args
.atomic
= atomic_subop
;
3031 args
.data
[0] = params
[0];
3033 args
.data
[1] = params
[1];
3034 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
3035 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
3036 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3038 result
= ac_build_image_opcode(&ctx
->ac
, &args
);
3041 result
= exit_waterfall(ctx
, &wctx
, result
);
3042 if (ctx
->ac
.postponed_kill
)
3043 ac_build_endif(&ctx
->ac
, 7004);
3047 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3048 nir_intrinsic_instr
*instr
)
3050 struct waterfall_context wctx
;
3051 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3052 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3054 LLVMValueRef ret
= ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
3056 return exit_waterfall(ctx
, &wctx
, ret
);
3059 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3060 const nir_intrinsic_instr
*instr
,
3065 enum glsl_sampler_dim dim
;
3068 dim
= nir_intrinsic_image_dim(instr
);
3069 is_array
= nir_intrinsic_image_array(instr
);
3071 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
3072 dim
= glsl_get_sampler_dim(type
);
3073 is_array
= glsl_sampler_type_is_array(type
);
3076 struct waterfall_context wctx
;
3077 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3079 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3080 res
= get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, false), true);
3083 struct ac_image_args args
= { 0 };
3085 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3087 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3088 args
.opcode
= ac_image_get_resinfo
;
3089 args
.lod
= ctx
->ac
.i32_0
;
3090 args
.attributes
= AC_FUNC_ATTR_READNONE
;
3092 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3094 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3096 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
3097 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3098 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3099 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3100 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3103 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
3104 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3105 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3109 return exit_waterfall(ctx
, &wctx
, res
);
3112 static void emit_membar(struct ac_llvm_context
*ac
,
3113 const nir_intrinsic_instr
*instr
)
3115 unsigned wait_flags
= 0;
3117 switch (instr
->intrinsic
) {
3118 case nir_intrinsic_memory_barrier
:
3119 case nir_intrinsic_group_memory_barrier
:
3120 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3122 case nir_intrinsic_memory_barrier_buffer
:
3123 case nir_intrinsic_memory_barrier_image
:
3124 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3126 case nir_intrinsic_memory_barrier_shared
:
3127 wait_flags
= AC_WAIT_LGKM
;
3133 ac_build_waitcnt(ac
, wait_flags
);
3136 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3138 /* GFX6 only (thanks to a hw bug workaround):
3139 * The real barrier instruction isn’t needed, because an entire patch
3140 * always fits into a single wave.
3142 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
3143 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
3146 ac_build_s_barrier(ac
);
3149 static void emit_discard(struct ac_nir_context
*ctx
,
3150 const nir_intrinsic_instr
*instr
)
3154 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3155 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3156 get_src(ctx
, instr
->src
[0]),
3159 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3160 cond
= ctx
->ac
.i1false
;
3163 ac_build_kill_if_false(&ctx
->ac
, cond
);
3166 static void emit_demote(struct ac_nir_context
*ctx
,
3167 const nir_intrinsic_instr
*instr
)
3171 if (instr
->intrinsic
== nir_intrinsic_demote_if
) {
3172 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3173 get_src(ctx
, instr
->src
[0]),
3176 assert(instr
->intrinsic
== nir_intrinsic_demote
);
3177 cond
= ctx
->ac
.i1false
;
3180 /* Kill immediately while maintaining WQM. */
3181 ac_build_kill_if_false(&ctx
->ac
, ac_build_wqm_vote(&ctx
->ac
, cond
));
3183 LLVMValueRef mask
= LLVMBuildLoad(ctx
->ac
.builder
, ctx
->ac
.postponed_kill
, "");
3184 mask
= LLVMBuildAnd(ctx
->ac
.builder
, mask
, cond
, "");
3185 LLVMBuildStore(ctx
->ac
.builder
, mask
, ctx
->ac
.postponed_kill
);
3190 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3192 LLVMValueRef result
;
3193 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3194 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3195 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3196 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3198 if (ctx
->ac
.wave_size
== 32)
3199 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
3200 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
3202 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3206 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3208 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3209 LLVMValueRef result
;
3210 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3211 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3212 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3213 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3215 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3220 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3222 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3223 return LLVMBuildAnd(ctx
->ac
.builder
,
3224 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3225 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3227 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3232 visit_first_invocation(struct ac_nir_context
*ctx
)
3234 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3235 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3237 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3238 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
3239 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
3240 ctx
->ac
.iN_wavemask
, args
, 2,
3241 AC_FUNC_ATTR_NOUNWIND
|
3242 AC_FUNC_ATTR_READNONE
);
3244 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3248 visit_load_shared(struct ac_nir_context
*ctx
,
3249 const nir_intrinsic_instr
*instr
)
3251 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3253 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3254 instr
->dest
.ssa
.bit_size
);
3256 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3257 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3258 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3259 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3262 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3263 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3267 visit_store_shared(struct ac_nir_context
*ctx
,
3268 const nir_intrinsic_instr
*instr
)
3270 LLVMValueRef derived_ptr
, data
,index
;
3271 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3273 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3274 instr
->src
[0].ssa
->bit_size
);
3275 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3277 int writemask
= nir_intrinsic_write_mask(instr
);
3278 for (int chan
= 0; chan
< 4; chan
++) {
3279 if (!(writemask
& (1 << chan
))) {
3282 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3283 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3284 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3285 LLVMBuildStore(builder
, data
, derived_ptr
);
3289 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3290 const nir_intrinsic_instr
*instr
,
3291 LLVMValueRef ptr
, int src_idx
)
3293 if (ctx
->ac
.postponed_kill
) {
3294 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
3295 ctx
->ac
.postponed_kill
, "");
3296 ac_build_ifcc(&ctx
->ac
, cond
, 7005);
3299 LLVMValueRef result
;
3300 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3302 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3304 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3305 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3306 if (deref
->mode
== nir_var_mem_global
) {
3307 /* use "singlethread" sync scope to implement relaxed ordering */
3308 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3310 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3311 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3315 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3316 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3317 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3318 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3319 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3321 LLVMAtomicRMWBinOp op
;
3322 switch (instr
->intrinsic
) {
3323 case nir_intrinsic_shared_atomic_add
:
3324 case nir_intrinsic_deref_atomic_add
:
3325 op
= LLVMAtomicRMWBinOpAdd
;
3327 case nir_intrinsic_shared_atomic_umin
:
3328 case nir_intrinsic_deref_atomic_umin
:
3329 op
= LLVMAtomicRMWBinOpUMin
;
3331 case nir_intrinsic_shared_atomic_umax
:
3332 case nir_intrinsic_deref_atomic_umax
:
3333 op
= LLVMAtomicRMWBinOpUMax
;
3335 case nir_intrinsic_shared_atomic_imin
:
3336 case nir_intrinsic_deref_atomic_imin
:
3337 op
= LLVMAtomicRMWBinOpMin
;
3339 case nir_intrinsic_shared_atomic_imax
:
3340 case nir_intrinsic_deref_atomic_imax
:
3341 op
= LLVMAtomicRMWBinOpMax
;
3343 case nir_intrinsic_shared_atomic_and
:
3344 case nir_intrinsic_deref_atomic_and
:
3345 op
= LLVMAtomicRMWBinOpAnd
;
3347 case nir_intrinsic_shared_atomic_or
:
3348 case nir_intrinsic_deref_atomic_or
:
3349 op
= LLVMAtomicRMWBinOpOr
;
3351 case nir_intrinsic_shared_atomic_xor
:
3352 case nir_intrinsic_deref_atomic_xor
:
3353 op
= LLVMAtomicRMWBinOpXor
;
3355 case nir_intrinsic_shared_atomic_exchange
:
3356 case nir_intrinsic_deref_atomic_exchange
:
3357 op
= LLVMAtomicRMWBinOpXchg
;
3363 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3366 if (ctx
->ac
.postponed_kill
)
3367 ac_build_endif(&ctx
->ac
, 7005);
3371 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3373 LLVMValueRef values
[2];
3374 LLVMValueRef pos
[2];
3376 pos
[0] = ac_to_float(&ctx
->ac
,
3377 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3378 pos
[1] = ac_to_float(&ctx
->ac
,
3379 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3381 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3382 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3383 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3386 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3387 enum glsl_interp_mode interp
, unsigned location
)
3390 case INTERP_MODE_FLAT
:
3393 case INTERP_MODE_SMOOTH
:
3394 case INTERP_MODE_NONE
:
3395 if (location
== INTERP_CENTER
)
3396 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3397 else if (location
== INTERP_CENTROID
)
3398 return ctx
->abi
->persp_centroid
;
3399 else if (location
== INTERP_SAMPLE
)
3400 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3402 case INTERP_MODE_NOPERSPECTIVE
:
3403 if (location
== INTERP_CENTER
)
3404 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3405 else if (location
== INTERP_CENTROID
)
3406 return ctx
->abi
->linear_centroid
;
3407 else if (location
== INTERP_SAMPLE
)
3408 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3414 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3417 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3418 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3421 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3423 LLVMValueRef offset
)
3425 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3426 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3427 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3429 LLVMValueRef ij_out
[2];
3430 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3433 * take the I then J parameters, and the DDX/Y for it, and
3434 * calculate the IJ inputs for the interpolator.
3435 * temp1 = ddx * offset/sample.x + I;
3436 * interp_param.I = ddy * offset/sample.y + temp1;
3437 * temp1 = ddx * offset/sample.x + J;
3438 * interp_param.J = ddy * offset/sample.y + temp1;
3440 for (unsigned i
= 0; i
< 2; i
++) {
3441 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3442 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3443 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3444 ddxy_out
, ix_ll
, "");
3445 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3446 ddxy_out
, iy_ll
, "");
3447 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3448 interp_param
, ix_ll
, "");
3449 LLVMValueRef temp1
, temp2
;
3451 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3454 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3455 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3457 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3458 temp2
, ctx
->ac
.i32
, "");
3460 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3461 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3464 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3467 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3468 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3471 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3473 LLVMValueRef sample_id
)
3475 if (ctx
->abi
->interp_at_sample_force_center
)
3476 return barycentric_center(ctx
, mode
);
3478 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3480 /* fetch sample ID */
3481 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3483 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3484 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3485 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3486 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3487 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3488 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3490 return barycentric_offset(ctx
, mode
, offset
);
3494 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3497 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3498 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3501 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3503 return LLVMBuildBitCast(ctx
->ac
.builder
,
3504 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3508 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3509 LLVMValueRef interp_param
,
3510 unsigned index
, unsigned comp_start
,
3511 unsigned num_components
,
3514 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3515 LLVMValueRef interp_param_f
;
3517 interp_param_f
= LLVMBuildBitCast(ctx
->ac
.builder
,
3518 interp_param
, ctx
->ac
.v2f32
, "");
3519 LLVMValueRef i
= LLVMBuildExtractElement(
3520 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_0
, "");
3521 LLVMValueRef j
= LLVMBuildExtractElement(
3522 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_1
, "");
3524 /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
3525 if (ctx
->verified_interp
&&
3526 !_mesa_hash_table_search(ctx
->verified_interp
, interp_param
)) {
3527 LLVMValueRef args
[2];
3529 args
[1] = LLVMConstInt(ctx
->ac
.i32
, S_NAN
| Q_NAN
| N_INFINITY
| P_INFINITY
, false);
3530 LLVMValueRef cond
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.class.f32", ctx
->ac
.i1
,
3531 args
, 2, AC_FUNC_ATTR_READNONE
);
3532 ac_build_kill_if_false(&ctx
->ac
, LLVMBuildNot(ctx
->ac
.builder
, cond
, ""));
3533 _mesa_hash_table_insert(ctx
->verified_interp
, interp_param
, interp_param
);
3536 LLVMValueRef values
[4];
3537 assert(bitsize
== 16 || bitsize
== 32);
3538 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3539 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3540 if (bitsize
== 16) {
3541 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3542 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3544 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3545 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3549 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3552 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3553 nir_intrinsic_instr
*instr
)
3555 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3557 /* We only lower inputs for fragment shaders ATM */
3558 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3560 assert(offset
[0].i32
== 0);
3562 unsigned component
= nir_intrinsic_component(instr
);
3563 unsigned index
= nir_intrinsic_base(instr
);
3564 unsigned vertex_id
= 2; /* P0 */
3566 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3567 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3569 switch (src0
[0].i32
) {
3580 unreachable("Invalid vertex index");
3584 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3585 LLVMValueRef values
[8];
3587 /* Each component of a 64-bit value takes up two GL-level channels. */
3588 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3589 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3591 bit_size
== 64 ? num_components
* 2 : num_components
;
3593 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3594 if (component
+ chan
> 4)
3595 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3596 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3597 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3598 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3601 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3602 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3603 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3604 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3607 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3608 if (bit_size
== 64) {
3609 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3610 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3611 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3616 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3617 nir_intrinsic_instr
*instr
)
3619 LLVMValueRef result
= NULL
;
3621 switch (instr
->intrinsic
) {
3622 case nir_intrinsic_ballot
:
3623 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3624 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3625 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3627 case nir_intrinsic_read_invocation
:
3628 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3629 get_src(ctx
, instr
->src
[1]));
3631 case nir_intrinsic_read_first_invocation
:
3632 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3634 case nir_intrinsic_load_subgroup_invocation
:
3635 result
= ac_get_thread_id(&ctx
->ac
);
3637 case nir_intrinsic_load_work_group_id
: {
3638 LLVMValueRef values
[3];
3640 for (int i
= 0; i
< 3; i
++) {
3641 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3642 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3645 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3648 case nir_intrinsic_load_base_vertex
:
3649 case nir_intrinsic_load_first_vertex
:
3650 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3652 case nir_intrinsic_load_local_group_size
:
3653 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3655 case nir_intrinsic_load_vertex_id
:
3656 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3657 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3658 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3660 case nir_intrinsic_load_vertex_id_zero_base
: {
3661 result
= ctx
->abi
->vertex_id
;
3664 case nir_intrinsic_load_local_invocation_id
: {
3665 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3668 case nir_intrinsic_load_base_instance
:
3669 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3671 case nir_intrinsic_load_draw_id
:
3672 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3674 case nir_intrinsic_load_view_index
:
3675 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3677 case nir_intrinsic_load_invocation_id
:
3678 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3679 result
= ac_unpack_param(&ctx
->ac
,
3680 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3683 if (ctx
->ac
.chip_class
>= GFX10
) {
3684 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3685 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3686 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3688 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3692 case nir_intrinsic_load_primitive_id
:
3693 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3694 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3695 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3696 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3697 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3698 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3700 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3702 case nir_intrinsic_load_sample_id
:
3703 result
= ac_unpack_param(&ctx
->ac
,
3704 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3707 case nir_intrinsic_load_sample_pos
:
3708 result
= load_sample_pos(ctx
);
3710 case nir_intrinsic_load_sample_mask_in
:
3711 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3713 case nir_intrinsic_load_frag_coord
: {
3714 LLVMValueRef values
[4] = {
3715 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3716 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3717 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3718 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3719 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3721 result
= ac_to_integer(&ctx
->ac
,
3722 ac_build_gather_values(&ctx
->ac
, values
, 4));
3725 case nir_intrinsic_load_layer_id
:
3726 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3728 case nir_intrinsic_load_front_face
:
3729 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3731 case nir_intrinsic_load_helper_invocation
:
3732 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3734 case nir_intrinsic_is_helper_invocation
:
3735 result
= ac_build_is_helper_invocation(&ctx
->ac
);
3737 case nir_intrinsic_load_color0
:
3738 result
= ctx
->abi
->color0
;
3740 case nir_intrinsic_load_color1
:
3741 result
= ctx
->abi
->color1
;
3743 case nir_intrinsic_load_user_data_amd
:
3744 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3745 result
= ctx
->abi
->user_data
;
3747 case nir_intrinsic_load_instance_id
:
3748 result
= ctx
->abi
->instance_id
;
3750 case nir_intrinsic_load_num_work_groups
:
3751 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3753 case nir_intrinsic_load_local_invocation_index
:
3754 result
= visit_load_local_invocation_index(ctx
);
3756 case nir_intrinsic_load_subgroup_id
:
3757 result
= visit_load_subgroup_id(ctx
);
3759 case nir_intrinsic_load_num_subgroups
:
3760 result
= visit_load_num_subgroups(ctx
);
3762 case nir_intrinsic_first_invocation
:
3763 result
= visit_first_invocation(ctx
);
3765 case nir_intrinsic_load_push_constant
:
3766 result
= visit_load_push_constant(ctx
, instr
);
3768 case nir_intrinsic_vulkan_resource_index
: {
3769 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3770 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3771 unsigned binding
= nir_intrinsic_binding(instr
);
3773 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3777 case nir_intrinsic_vulkan_resource_reindex
:
3778 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3780 case nir_intrinsic_store_ssbo
:
3781 visit_store_ssbo(ctx
, instr
);
3783 case nir_intrinsic_load_ssbo
:
3784 result
= visit_load_buffer(ctx
, instr
);
3786 case nir_intrinsic_ssbo_atomic_add
:
3787 case nir_intrinsic_ssbo_atomic_imin
:
3788 case nir_intrinsic_ssbo_atomic_umin
:
3789 case nir_intrinsic_ssbo_atomic_imax
:
3790 case nir_intrinsic_ssbo_atomic_umax
:
3791 case nir_intrinsic_ssbo_atomic_and
:
3792 case nir_intrinsic_ssbo_atomic_or
:
3793 case nir_intrinsic_ssbo_atomic_xor
:
3794 case nir_intrinsic_ssbo_atomic_exchange
:
3795 case nir_intrinsic_ssbo_atomic_comp_swap
:
3796 result
= visit_atomic_ssbo(ctx
, instr
);
3798 case nir_intrinsic_load_ubo
:
3799 result
= visit_load_ubo_buffer(ctx
, instr
);
3801 case nir_intrinsic_get_buffer_size
:
3802 result
= visit_get_buffer_size(ctx
, instr
);
3804 case nir_intrinsic_load_deref
:
3805 result
= visit_load_var(ctx
, instr
);
3807 case nir_intrinsic_store_deref
:
3808 visit_store_var(ctx
, instr
);
3810 case nir_intrinsic_load_shared
:
3811 result
= visit_load_shared(ctx
, instr
);
3813 case nir_intrinsic_store_shared
:
3814 visit_store_shared(ctx
, instr
);
3816 case nir_intrinsic_bindless_image_samples
:
3817 case nir_intrinsic_image_deref_samples
:
3818 result
= visit_image_samples(ctx
, instr
);
3820 case nir_intrinsic_bindless_image_load
:
3821 result
= visit_image_load(ctx
, instr
, true);
3823 case nir_intrinsic_image_deref_load
:
3824 result
= visit_image_load(ctx
, instr
, false);
3826 case nir_intrinsic_bindless_image_store
:
3827 visit_image_store(ctx
, instr
, true);
3829 case nir_intrinsic_image_deref_store
:
3830 visit_image_store(ctx
, instr
, false);
3832 case nir_intrinsic_bindless_image_atomic_add
:
3833 case nir_intrinsic_bindless_image_atomic_imin
:
3834 case nir_intrinsic_bindless_image_atomic_umin
:
3835 case nir_intrinsic_bindless_image_atomic_imax
:
3836 case nir_intrinsic_bindless_image_atomic_umax
:
3837 case nir_intrinsic_bindless_image_atomic_and
:
3838 case nir_intrinsic_bindless_image_atomic_or
:
3839 case nir_intrinsic_bindless_image_atomic_xor
:
3840 case nir_intrinsic_bindless_image_atomic_exchange
:
3841 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3842 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3843 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3844 result
= visit_image_atomic(ctx
, instr
, true);
3846 case nir_intrinsic_image_deref_atomic_add
:
3847 case nir_intrinsic_image_deref_atomic_imin
:
3848 case nir_intrinsic_image_deref_atomic_umin
:
3849 case nir_intrinsic_image_deref_atomic_imax
:
3850 case nir_intrinsic_image_deref_atomic_umax
:
3851 case nir_intrinsic_image_deref_atomic_and
:
3852 case nir_intrinsic_image_deref_atomic_or
:
3853 case nir_intrinsic_image_deref_atomic_xor
:
3854 case nir_intrinsic_image_deref_atomic_exchange
:
3855 case nir_intrinsic_image_deref_atomic_comp_swap
:
3856 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3857 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3858 result
= visit_image_atomic(ctx
, instr
, false);
3860 case nir_intrinsic_bindless_image_size
:
3861 result
= visit_image_size(ctx
, instr
, true);
3863 case nir_intrinsic_image_deref_size
:
3864 result
= visit_image_size(ctx
, instr
, false);
3866 case nir_intrinsic_shader_clock
:
3867 result
= ac_build_shader_clock(&ctx
->ac
);
3869 case nir_intrinsic_discard
:
3870 case nir_intrinsic_discard_if
:
3871 emit_discard(ctx
, instr
);
3873 case nir_intrinsic_demote
:
3874 case nir_intrinsic_demote_if
:
3875 emit_demote(ctx
, instr
);
3877 case nir_intrinsic_memory_barrier
:
3878 case nir_intrinsic_group_memory_barrier
:
3879 case nir_intrinsic_memory_barrier_buffer
:
3880 case nir_intrinsic_memory_barrier_image
:
3881 case nir_intrinsic_memory_barrier_shared
:
3882 emit_membar(&ctx
->ac
, instr
);
3884 case nir_intrinsic_memory_barrier_tcs_patch
:
3886 case nir_intrinsic_control_barrier
:
3887 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3889 case nir_intrinsic_shared_atomic_add
:
3890 case nir_intrinsic_shared_atomic_imin
:
3891 case nir_intrinsic_shared_atomic_umin
:
3892 case nir_intrinsic_shared_atomic_imax
:
3893 case nir_intrinsic_shared_atomic_umax
:
3894 case nir_intrinsic_shared_atomic_and
:
3895 case nir_intrinsic_shared_atomic_or
:
3896 case nir_intrinsic_shared_atomic_xor
:
3897 case nir_intrinsic_shared_atomic_exchange
:
3898 case nir_intrinsic_shared_atomic_comp_swap
: {
3899 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3900 instr
->src
[1].ssa
->bit_size
);
3901 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3904 case nir_intrinsic_deref_atomic_add
:
3905 case nir_intrinsic_deref_atomic_imin
:
3906 case nir_intrinsic_deref_atomic_umin
:
3907 case nir_intrinsic_deref_atomic_imax
:
3908 case nir_intrinsic_deref_atomic_umax
:
3909 case nir_intrinsic_deref_atomic_and
:
3910 case nir_intrinsic_deref_atomic_or
:
3911 case nir_intrinsic_deref_atomic_xor
:
3912 case nir_intrinsic_deref_atomic_exchange
:
3913 case nir_intrinsic_deref_atomic_comp_swap
: {
3914 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3915 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3918 case nir_intrinsic_load_barycentric_pixel
:
3919 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3921 case nir_intrinsic_load_barycentric_centroid
:
3922 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3924 case nir_intrinsic_load_barycentric_sample
:
3925 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3927 case nir_intrinsic_load_barycentric_model
:
3928 result
= barycentric_model(ctx
);
3930 case nir_intrinsic_load_barycentric_at_offset
: {
3931 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3932 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3935 case nir_intrinsic_load_barycentric_at_sample
: {
3936 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3937 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3940 case nir_intrinsic_load_interpolated_input
: {
3941 /* We assume any indirect loads have been lowered away */
3942 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3944 assert(offset
[0].i32
== 0);
3946 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3947 unsigned index
= nir_intrinsic_base(instr
);
3948 unsigned component
= nir_intrinsic_component(instr
);
3949 result
= load_interpolated_input(ctx
, interp_param
, index
,
3951 instr
->dest
.ssa
.num_components
,
3952 instr
->dest
.ssa
.bit_size
);
3955 case nir_intrinsic_load_input
:
3956 case nir_intrinsic_load_input_vertex
:
3957 result
= load_input(ctx
, instr
);
3959 case nir_intrinsic_emit_vertex
:
3960 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3962 case nir_intrinsic_emit_vertex_with_counter
: {
3963 unsigned stream
= nir_intrinsic_stream_id(instr
);
3964 LLVMValueRef next_vertex
= get_src(ctx
, instr
->src
[0]);
3965 ctx
->abi
->emit_vertex_with_counter(ctx
->abi
, stream
,
3970 case nir_intrinsic_end_primitive
:
3971 case nir_intrinsic_end_primitive_with_counter
:
3972 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3974 case nir_intrinsic_load_tess_coord
:
3975 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3977 case nir_intrinsic_load_tess_level_outer
:
3978 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3980 case nir_intrinsic_load_tess_level_inner
:
3981 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3983 case nir_intrinsic_load_tess_level_outer_default
:
3984 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3986 case nir_intrinsic_load_tess_level_inner_default
:
3987 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3989 case nir_intrinsic_load_patch_vertices_in
:
3990 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
3992 case nir_intrinsic_vote_all
: {
3993 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3994 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3997 case nir_intrinsic_vote_any
: {
3998 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3999 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4002 case nir_intrinsic_shuffle
:
4003 if (ctx
->ac
.chip_class
== GFX8
||
4004 ctx
->ac
.chip_class
== GFX9
||
4005 (ctx
->ac
.chip_class
== GFX10
&& ctx
->ac
.wave_size
== 32)) {
4006 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4007 get_src(ctx
, instr
->src
[1]));
4009 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4010 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
4011 LLVMTypeRef type
= LLVMTypeOf(src
);
4012 struct waterfall_context wctx
;
4013 LLVMValueRef index_val
;
4015 index_val
= enter_waterfall(ctx
, &wctx
, index
, true);
4017 src
= LLVMBuildZExt(ctx
->ac
.builder
, src
,
4020 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.readlane",
4022 (LLVMValueRef
[]) { src
, index_val
}, 2,
4023 AC_FUNC_ATTR_READNONE
|
4024 AC_FUNC_ATTR_CONVERGENT
);
4026 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, type
, "");
4028 result
= exit_waterfall(ctx
, &wctx
, result
);
4031 case nir_intrinsic_reduce
:
4032 result
= ac_build_reduce(&ctx
->ac
,
4033 get_src(ctx
, instr
->src
[0]),
4034 instr
->const_index
[0],
4035 instr
->const_index
[1]);
4037 case nir_intrinsic_inclusive_scan
:
4038 result
= ac_build_inclusive_scan(&ctx
->ac
,
4039 get_src(ctx
, instr
->src
[0]),
4040 instr
->const_index
[0]);
4042 case nir_intrinsic_exclusive_scan
:
4043 result
= ac_build_exclusive_scan(&ctx
->ac
,
4044 get_src(ctx
, instr
->src
[0]),
4045 instr
->const_index
[0]);
4047 case nir_intrinsic_quad_broadcast
: {
4048 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
4049 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4050 lane
, lane
, lane
, lane
);
4053 case nir_intrinsic_quad_swap_horizontal
:
4054 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
4056 case nir_intrinsic_quad_swap_vertical
:
4057 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
4059 case nir_intrinsic_quad_swap_diagonal
:
4060 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
4062 case nir_intrinsic_quad_swizzle_amd
: {
4063 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4064 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4065 mask
& 0x3, (mask
>> 2) & 0x3,
4066 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
4069 case nir_intrinsic_masked_swizzle_amd
: {
4070 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4071 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
4074 case nir_intrinsic_write_invocation_amd
:
4075 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4076 get_src(ctx
, instr
->src
[1]),
4077 get_src(ctx
, instr
->src
[2]));
4079 case nir_intrinsic_mbcnt_amd
:
4080 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4082 case nir_intrinsic_load_scratch
: {
4083 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4084 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4086 LLVMTypeRef comp_type
=
4087 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4088 LLVMTypeRef vec_type
=
4089 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4090 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4091 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4092 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4093 LLVMPointerType(vec_type
, addr_space
), "");
4094 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4097 case nir_intrinsic_store_scratch
: {
4098 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
4099 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4101 LLVMTypeRef comp_type
=
4102 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
4103 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4104 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4105 LLVMPointerType(comp_type
, addr_space
), "");
4106 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4107 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
4110 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
4112 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
4113 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
4114 LLVMTypeRef vec_type
=
4115 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
4116 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
4118 LLVMPointerType(vec_type
, addr_space
),
4120 LLVMValueRef offset_src
=
4121 ac_extract_components(&ctx
->ac
, src
, start
, count
);
4122 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
4126 case nir_intrinsic_load_constant
: {
4127 unsigned base
= nir_intrinsic_base(instr
);
4128 unsigned range
= nir_intrinsic_range(instr
);
4130 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4131 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
4132 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
4134 /* Clamp the offset to avoid out-of-bound access because global
4135 * instructions can't handle them.
4137 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
4138 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
4140 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
4142 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
4144 LLVMTypeRef comp_type
=
4145 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4146 LLVMTypeRef vec_type
=
4147 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4148 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4149 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4150 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4151 LLVMPointerType(vec_type
, addr_space
), "");
4152 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4156 fprintf(stderr
, "Unknown intrinsic: ");
4157 nir_print_instr(&instr
->instr
, stderr
);
4158 fprintf(stderr
, "\n");
4162 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4166 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
4167 unsigned base_index
,
4168 unsigned constant_index
,
4169 LLVMValueRef dynamic_index
)
4171 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
4172 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
4173 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
4175 /* Bindless uniforms are 64bit so multiple index by 8 */
4176 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
4177 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
4179 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
4181 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
4182 NULL
, 0, 0, true, true);
4184 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
4187 struct sampler_desc_address
{
4188 unsigned descriptor_set
;
4189 unsigned base_index
; /* binding in vulkan */
4190 unsigned constant_index
;
4191 LLVMValueRef dynamic_index
;
4196 static struct sampler_desc_address
4197 get_sampler_desc_internal(struct ac_nir_context
*ctx
,
4198 nir_deref_instr
*deref_instr
,
4199 const nir_instr
*instr
,
4202 LLVMValueRef index
= NULL
;
4203 unsigned constant_index
= 0;
4204 unsigned descriptor_set
;
4205 unsigned base_index
;
4206 bool bindless
= false;
4211 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
4214 index
= get_src(ctx
, img_instr
->src
[0]);
4216 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
4217 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
4218 nir_tex_src_sampler_handle
);
4219 if (sampSrcIdx
!= -1) {
4222 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
4224 assert(tex_instr
&& !image
);
4225 base_index
= tex_instr
->sampler_index
;
4229 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
4230 if (deref_instr
->deref_type
== nir_deref_type_array
) {
4231 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
4235 if (nir_src_is_const(deref_instr
->arr
.index
)) {
4236 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
4238 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
4240 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4241 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4246 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4249 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4250 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
4251 unsigned sidx
= deref_instr
->strct
.index
;
4252 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4253 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
4255 unreachable("Unsupported deref type");
4258 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
4260 if (deref_instr
->var
->data
.bindless
) {
4261 /* For now just assert on unhandled variable types */
4262 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
4264 base_index
= deref_instr
->var
->data
.driver_location
;
4267 index
= index
? index
: ctx
->ac
.i32_0
;
4268 index
= get_bindless_index_from_uniform(ctx
, base_index
,
4269 constant_index
, index
);
4271 base_index
= deref_instr
->var
->data
.binding
;
4273 return (struct sampler_desc_address
) {
4274 .descriptor_set
= descriptor_set
,
4275 .base_index
= base_index
,
4276 .constant_index
= constant_index
,
4277 .dynamic_index
= index
,
4279 .bindless
= bindless
,
4283 /* Extract any possibly divergent index into a separate value that can be fed
4284 * into get_sampler_desc with the same arguments. */
4285 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
4286 nir_deref_instr
*deref_instr
,
4287 const nir_instr
*instr
,
4290 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4291 return addr
.dynamic_index
;
4294 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4295 nir_deref_instr
*deref_instr
,
4296 enum ac_descriptor_type desc_type
,
4297 const nir_instr
*instr
,
4299 bool image
, bool write
)
4301 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4302 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4303 addr
.descriptor_set
,
4305 addr
.constant_index
, index
,
4306 desc_type
, addr
.image
, write
, addr
.bindless
);
4309 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4312 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4313 * filtering manually. The driver sets img7 to a mask clearing
4314 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4315 * s_and_b32 samp0, samp0, img7
4318 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4320 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4321 LLVMValueRef res
, LLVMValueRef samp
)
4323 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4324 LLVMValueRef img7
, samp0
;
4326 if (ctx
->ac
.chip_class
>= GFX8
)
4329 img7
= LLVMBuildExtractElement(builder
, res
,
4330 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4331 samp0
= LLVMBuildExtractElement(builder
, samp
,
4332 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4333 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4334 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4335 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4338 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4339 nir_tex_instr
*instr
,
4340 struct waterfall_context
*wctx
,
4341 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4342 LLVMValueRef
*fmask_ptr
)
4344 nir_deref_instr
*texture_deref_instr
= NULL
;
4345 nir_deref_instr
*sampler_deref_instr
= NULL
;
4348 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4349 switch (instr
->src
[i
].src_type
) {
4350 case nir_tex_src_texture_deref
:
4351 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4353 case nir_tex_src_sampler_deref
:
4354 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4356 case nir_tex_src_plane
:
4357 plane
= nir_src_as_int(instr
->src
[i
].src
);
4364 LLVMValueRef texture_dynamic_index
= get_sampler_desc_index(ctx
, texture_deref_instr
,
4365 &instr
->instr
, false);
4366 if (!sampler_deref_instr
)
4367 sampler_deref_instr
= texture_deref_instr
;
4369 LLVMValueRef sampler_dynamic_index
= get_sampler_desc_index(ctx
, sampler_deref_instr
,
4370 &instr
->instr
, false);
4371 if (instr
->texture_non_uniform
)
4372 texture_dynamic_index
= enter_waterfall(ctx
, wctx
+ 0, texture_dynamic_index
, true);
4374 if (instr
->sampler_non_uniform
)
4375 sampler_dynamic_index
= enter_waterfall(ctx
, wctx
+ 1, sampler_dynamic_index
, true);
4377 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4380 assert(instr
->op
!= nir_texop_txf_ms
&&
4381 instr
->op
!= nir_texop_samples_identical
);
4382 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4384 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4387 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4388 /* The fragment mask is fetched from the compressed
4389 * multisampled surface.
4391 main_descriptor
= AC_DESC_FMASK
;
4394 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
,
4395 texture_dynamic_index
, false, false);
4398 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
,
4399 sampler_dynamic_index
, false, false);
4400 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4401 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4403 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4404 instr
->op
== nir_texop_samples_identical
))
4405 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
,
4406 &instr
->instr
, texture_dynamic_index
, false, false);
4409 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4412 coord
= ac_to_float(ctx
, coord
);
4413 coord
= ac_build_round(ctx
, coord
);
4414 coord
= ac_to_integer(ctx
, coord
);
4418 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4420 LLVMValueRef result
= NULL
;
4421 struct ac_image_args args
= { 0 };
4422 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4423 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4424 unsigned offset_src
= 0;
4425 struct waterfall_context wctx
[2] = {{{0}}};
4427 tex_fetch_ptrs(ctx
, instr
, wctx
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4429 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4430 switch (instr
->src
[i
].src_type
) {
4431 case nir_tex_src_coord
: {
4432 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4433 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4434 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4437 case nir_tex_src_projector
:
4439 case nir_tex_src_comparator
:
4440 if (instr
->is_shadow
) {
4441 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4442 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4445 case nir_tex_src_offset
:
4446 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4449 case nir_tex_src_bias
:
4450 if (instr
->op
== nir_texop_txb
)
4451 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4453 case nir_tex_src_lod
: {
4454 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4455 args
.level_zero
= true;
4457 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4460 case nir_tex_src_ms_index
:
4461 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4463 case nir_tex_src_ms_mcs
:
4465 case nir_tex_src_ddx
:
4466 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4468 case nir_tex_src_ddy
:
4469 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4471 case nir_tex_src_texture_offset
:
4472 case nir_tex_src_sampler_offset
:
4473 case nir_tex_src_plane
:
4479 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4480 result
= get_buffer_size(ctx
, args
.resource
, true);
4484 if (instr
->op
== nir_texop_texture_samples
) {
4485 LLVMValueRef res
, samples
, is_msaa
;
4486 LLVMValueRef default_sample
;
4488 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4489 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4490 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4491 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4492 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4493 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4494 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4495 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4496 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4498 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4499 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4500 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4501 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4502 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4505 if (ctx
->abi
->robust_buffer_access
) {
4506 LLVMValueRef dword1
, is_null_descriptor
;
4508 /* Extract the second dword of the descriptor, if it's
4509 * all zero, then it's a null descriptor.
4511 dword1
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4512 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
4513 is_null_descriptor
=
4514 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, dword1
,
4515 LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
4517 LLVMBuildSelect(ctx
->ac
.builder
, is_null_descriptor
,
4518 ctx
->ac
.i32_0
, ctx
->ac
.i32_1
, "");
4520 default_sample
= ctx
->ac
.i32_1
;
4523 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4524 default_sample
, "");
4529 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4530 LLVMValueRef offset
[3], pack
;
4531 for (unsigned chan
= 0; chan
< 3; ++chan
)
4532 offset
[chan
] = ctx
->ac
.i32_0
;
4534 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4535 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4536 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4537 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4538 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4540 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4541 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4543 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4544 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4548 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4549 * OpenGL 4.5 spec says:
4551 * "If the texture’s internal format indicates a fixed-point
4552 * depth texture, then D_t and D_ref are clamped to the
4553 * range [0, 1]; otherwise no clamping is performed."
4555 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4556 * so the depth comparison value isn't clamped for Z16 and
4557 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4558 * an explicitly clamped 32-bit float format.
4561 ctx
->ac
.chip_class
>= GFX8
&&
4562 ctx
->ac
.chip_class
<= GFX9
&&
4563 ctx
->abi
->clamp_shadow_reference
) {
4564 LLVMValueRef upgraded
, clamped
;
4566 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4567 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4568 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4569 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4570 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4571 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4572 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4576 /* pack derivatives */
4578 int num_src_deriv_channels
, num_dest_deriv_channels
;
4579 switch (instr
->sampler_dim
) {
4580 case GLSL_SAMPLER_DIM_3D
:
4581 case GLSL_SAMPLER_DIM_CUBE
:
4582 num_src_deriv_channels
= 3;
4583 num_dest_deriv_channels
= 3;
4585 case GLSL_SAMPLER_DIM_2D
:
4587 num_src_deriv_channels
= 2;
4588 num_dest_deriv_channels
= 2;
4590 case GLSL_SAMPLER_DIM_1D
:
4591 num_src_deriv_channels
= 1;
4592 if (ctx
->ac
.chip_class
== GFX9
) {
4593 num_dest_deriv_channels
= 2;
4595 num_dest_deriv_channels
= 1;
4600 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4601 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4602 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4603 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4604 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4606 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4607 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4608 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4612 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4613 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4614 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4615 if (instr
->coord_components
== 3)
4616 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4617 ac_prepare_cube_coords(&ctx
->ac
,
4618 instr
->op
== nir_texop_txd
, instr
->is_array
,
4619 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4622 /* Texture coordinates fixups */
4623 if (instr
->coord_components
> 1 &&
4624 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4626 instr
->op
!= nir_texop_txf
) {
4627 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4630 if (instr
->coord_components
> 2 &&
4631 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4632 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4633 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4634 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4636 instr
->op
!= nir_texop_txf
&&
4637 instr
->op
!= nir_texop_txf_ms
&&
4638 instr
->op
!= nir_texop_fragment_fetch
&&
4639 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4640 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4643 if (ctx
->ac
.chip_class
== GFX9
&&
4644 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4645 instr
->op
!= nir_texop_lod
) {
4646 LLVMValueRef filler
;
4647 if (instr
->op
== nir_texop_txf
)
4648 filler
= ctx
->ac
.i32_0
;
4650 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4652 if (instr
->is_array
)
4653 args
.coords
[2] = args
.coords
[1];
4654 args
.coords
[1] = filler
;
4657 /* Pack sample index */
4658 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4659 instr
->op
== nir_texop_fragment_fetch
))
4660 args
.coords
[instr
->coord_components
] = sample_index
;
4662 if (instr
->op
== nir_texop_samples_identical
) {
4663 struct ac_image_args txf_args
= { 0 };
4664 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4666 txf_args
.dmask
= 0xf;
4667 txf_args
.resource
= fmask_ptr
;
4668 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4669 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4671 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4672 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4676 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4677 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4678 instr
->op
!= nir_texop_txs
&&
4679 instr
->op
!= nir_texop_fragment_fetch
&&
4680 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4681 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4682 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4683 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4684 instr
->is_array
? args
.coords
[2] : NULL
,
4685 args
.coords
[sample_chan
], fmask_ptr
);
4688 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4689 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4690 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4691 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4692 args
.coords
[i
] = LLVMBuildAdd(
4693 ctx
->ac
.builder
, args
.coords
[i
],
4694 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4699 /* DMASK was repurposed for GATHER4. 4 components are always
4700 * returned and DMASK works like a swizzle - it selects
4701 * the component to fetch. The only valid DMASK values are
4702 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4703 * (red,red,red,red) etc.) The ISA document doesn't mention
4707 if (instr
->op
== nir_texop_tg4
) {
4708 if (instr
->is_shadow
)
4711 args
.dmask
= 1 << instr
->component
;
4714 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4715 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4716 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4719 /* Adjust the number of coordinates because we only need (x,y) for 2D
4720 * multisampled images and (x,y,layer) for 2D multisampled layered
4721 * images or for multisampled input attachments.
4723 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4724 if (args
.dim
== ac_image_2dmsaa
) {
4725 args
.dim
= ac_image_2d
;
4727 assert(args
.dim
== ac_image_2darraymsaa
);
4728 args
.dim
= ac_image_2darray
;
4732 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4734 if (instr
->op
== nir_texop_query_levels
)
4735 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4736 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4737 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4738 instr
->op
!= nir_texop_tg4
)
4739 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4740 else if (instr
->op
== nir_texop_txs
&&
4741 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4743 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4744 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4745 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4746 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4747 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4748 } else if (ctx
->ac
.chip_class
== GFX9
&&
4749 instr
->op
== nir_texop_txs
&&
4750 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4752 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4753 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4754 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4756 } else if (instr
->dest
.ssa
.num_components
!= 4)
4757 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4761 assert(instr
->dest
.is_ssa
);
4762 result
= ac_to_integer(&ctx
->ac
, result
);
4764 for (int i
= ARRAY_SIZE(wctx
); --i
>= 0;) {
4765 result
= exit_waterfall(ctx
, wctx
+ i
, result
);
4768 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4772 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4774 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4775 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4777 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4778 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4781 static void visit_post_phi(struct ac_nir_context
*ctx
,
4782 nir_phi_instr
*instr
,
4783 LLVMValueRef llvm_phi
)
4785 nir_foreach_phi_src(src
, instr
) {
4786 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4787 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4789 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4793 static void phi_post_pass(struct ac_nir_context
*ctx
)
4795 hash_table_foreach(ctx
->phis
, entry
) {
4796 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4797 (LLVMValueRef
)entry
->data
);
4802 static bool is_def_used_in_an_export(const nir_ssa_def
* def
) {
4803 nir_foreach_use(use_src
, def
) {
4804 if (use_src
->parent_instr
->type
== nir_instr_type_intrinsic
) {
4805 nir_intrinsic_instr
*instr
= nir_instr_as_intrinsic(use_src
->parent_instr
);
4806 if (instr
->intrinsic
== nir_intrinsic_store_deref
)
4808 } else if (use_src
->parent_instr
->type
== nir_instr_type_alu
) {
4809 nir_alu_instr
*instr
= nir_instr_as_alu(use_src
->parent_instr
);
4810 if (instr
->op
== nir_op_vec4
&&
4811 is_def_used_in_an_export(&instr
->dest
.dest
.ssa
)) {
4819 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4820 const nir_ssa_undef_instr
*instr
)
4822 unsigned num_components
= instr
->def
.num_components
;
4823 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4825 if (!ctx
->abi
->convert_undef_to_zero
|| is_def_used_in_an_export(&instr
->def
)) {
4828 if (num_components
== 1)
4829 undef
= LLVMGetUndef(type
);
4831 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4833 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4835 LLVMValueRef zero
= LLVMConstInt(type
, 0, false);
4836 if (num_components
> 1) {
4837 zero
= ac_build_gather_values_extended(
4838 &ctx
->ac
, &zero
, 4, 0, false, false);
4840 ctx
->ssa_defs
[instr
->def
.index
] = zero
;
4844 static void visit_jump(struct ac_llvm_context
*ctx
,
4845 const nir_jump_instr
*instr
)
4847 switch (instr
->type
) {
4848 case nir_jump_break
:
4849 ac_build_break(ctx
);
4851 case nir_jump_continue
:
4852 ac_build_continue(ctx
);
4855 fprintf(stderr
, "Unknown NIR jump instr: ");
4856 nir_print_instr(&instr
->instr
, stderr
);
4857 fprintf(stderr
, "\n");
4863 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4864 enum glsl_base_type type
)
4868 case GLSL_TYPE_UINT
:
4869 case GLSL_TYPE_BOOL
:
4870 case GLSL_TYPE_SUBROUTINE
:
4872 case GLSL_TYPE_INT8
:
4873 case GLSL_TYPE_UINT8
:
4875 case GLSL_TYPE_INT16
:
4876 case GLSL_TYPE_UINT16
:
4878 case GLSL_TYPE_FLOAT
:
4880 case GLSL_TYPE_FLOAT16
:
4882 case GLSL_TYPE_INT64
:
4883 case GLSL_TYPE_UINT64
:
4885 case GLSL_TYPE_DOUBLE
:
4888 unreachable("unknown GLSL type");
4893 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4894 const struct glsl_type
*type
)
4896 if (glsl_type_is_scalar(type
)) {
4897 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4900 if (glsl_type_is_vector(type
)) {
4901 return LLVMVectorType(
4902 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4903 glsl_get_vector_elements(type
));
4906 if (glsl_type_is_matrix(type
)) {
4907 return LLVMArrayType(
4908 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4909 glsl_get_matrix_columns(type
));
4912 if (glsl_type_is_array(type
)) {
4913 return LLVMArrayType(
4914 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4915 glsl_get_length(type
));
4918 assert(glsl_type_is_struct_or_ifc(type
));
4920 LLVMTypeRef member_types
[glsl_get_length(type
)];
4922 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4924 glsl_to_llvm_type(ac
,
4925 glsl_get_struct_field(type
, i
));
4928 return LLVMStructTypeInContext(ac
->context
, member_types
,
4929 glsl_get_length(type
), false);
4932 static void visit_deref(struct ac_nir_context
*ctx
,
4933 nir_deref_instr
*instr
)
4935 if (instr
->mode
!= nir_var_mem_shared
&&
4936 instr
->mode
!= nir_var_mem_global
)
4939 LLVMValueRef result
= NULL
;
4940 switch(instr
->deref_type
) {
4941 case nir_deref_type_var
: {
4942 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4943 result
= entry
->data
;
4946 case nir_deref_type_struct
:
4947 if (instr
->mode
== nir_var_mem_global
) {
4948 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4949 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4950 instr
->strct
.index
);
4951 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4952 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4954 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4955 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4958 case nir_deref_type_array
:
4959 if (instr
->mode
== nir_var_mem_global
) {
4960 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4961 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4963 if ((glsl_type_is_matrix(parent
->type
) &&
4964 glsl_matrix_type_is_row_major(parent
->type
)) ||
4965 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4966 stride
= type_scalar_size_bytes(parent
->type
);
4969 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4970 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4971 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4973 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4975 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4977 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4978 get_src(ctx
, instr
->arr
.index
));
4981 case nir_deref_type_ptr_as_array
:
4982 if (instr
->mode
== nir_var_mem_global
) {
4983 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4985 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4986 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4987 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4989 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4991 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4993 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4994 get_src(ctx
, instr
->arr
.index
));
4997 case nir_deref_type_cast
: {
4998 result
= get_src(ctx
, instr
->parent
);
5000 /* We can't use the structs from LLVM because the shader
5001 * specifies its own offsets. */
5002 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
5003 if (instr
->mode
== nir_var_mem_shared
)
5004 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
5006 unsigned address_space
;
5008 switch(instr
->mode
) {
5009 case nir_var_mem_shared
:
5010 address_space
= AC_ADDR_SPACE_LDS
;
5012 case nir_var_mem_global
:
5013 address_space
= AC_ADDR_SPACE_GLOBAL
;
5016 unreachable("Unhandled address space");
5019 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
5021 if (LLVMTypeOf(result
) != type
) {
5022 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMFixedVectorTypeKind
) {
5023 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
5026 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
5033 unreachable("Unhandled deref_instr deref type");
5036 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
5039 static void visit_cf_list(struct ac_nir_context
*ctx
,
5040 struct exec_list
*list
);
5042 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
5044 nir_foreach_instr(instr
, block
)
5046 switch (instr
->type
) {
5047 case nir_instr_type_alu
:
5048 visit_alu(ctx
, nir_instr_as_alu(instr
));
5050 case nir_instr_type_load_const
:
5051 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
5053 case nir_instr_type_intrinsic
:
5054 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
5056 case nir_instr_type_tex
:
5057 visit_tex(ctx
, nir_instr_as_tex(instr
));
5059 case nir_instr_type_phi
:
5060 visit_phi(ctx
, nir_instr_as_phi(instr
));
5062 case nir_instr_type_ssa_undef
:
5063 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
5065 case nir_instr_type_jump
:
5066 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
5068 case nir_instr_type_deref
:
5069 visit_deref(ctx
, nir_instr_as_deref(instr
));
5072 fprintf(stderr
, "Unknown NIR instr type: ");
5073 nir_print_instr(instr
, stderr
);
5074 fprintf(stderr
, "\n");
5079 _mesa_hash_table_insert(ctx
->defs
, block
,
5080 LLVMGetInsertBlock(ctx
->ac
.builder
));
5083 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
5085 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
5087 nir_block
*then_block
=
5088 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
5090 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
5092 visit_cf_list(ctx
, &if_stmt
->then_list
);
5094 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
5095 nir_block
*else_block
=
5096 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5098 ac_build_else(&ctx
->ac
, else_block
->index
);
5099 visit_cf_list(ctx
, &if_stmt
->else_list
);
5102 ac_build_endif(&ctx
->ac
, then_block
->index
);
5105 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5107 nir_block
*first_loop_block
=
5108 (nir_block
*) exec_list_get_head(&loop
->body
);
5110 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5112 visit_cf_list(ctx
, &loop
->body
);
5114 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5117 static void visit_cf_list(struct ac_nir_context
*ctx
,
5118 struct exec_list
*list
)
5120 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5122 switch (node
->type
) {
5123 case nir_cf_node_block
:
5124 visit_block(ctx
, nir_cf_node_as_block(node
));
5127 case nir_cf_node_if
:
5128 visit_if(ctx
, nir_cf_node_as_if(node
));
5131 case nir_cf_node_loop
:
5132 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5142 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
5143 struct ac_shader_abi
*abi
,
5144 struct nir_shader
*nir
,
5145 struct nir_variable
*variable
,
5146 gl_shader_stage stage
)
5148 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5149 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5151 /* tess ctrl has it's own load/store paths for outputs */
5152 if (stage
== MESA_SHADER_TESS_CTRL
)
5155 if (stage
== MESA_SHADER_VERTEX
||
5156 stage
== MESA_SHADER_TESS_EVAL
||
5157 stage
== MESA_SHADER_GEOMETRY
) {
5158 int idx
= variable
->data
.location
+ variable
->data
.index
;
5159 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5160 int length
= nir
->info
.clip_distance_array_size
+
5161 nir
->info
.cull_distance_array_size
;
5170 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
5171 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
5172 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5173 for (unsigned chan
= 0; chan
< 4; chan
++) {
5174 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5175 ac_build_alloca_undef(ctx
, type
, "");
5181 setup_locals(struct ac_nir_context
*ctx
,
5182 struct nir_function
*func
)
5185 ctx
->num_locals
= 0;
5186 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5187 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5188 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5189 variable
->data
.location_frac
= 0;
5190 ctx
->num_locals
+= attrib_count
;
5192 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5196 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5197 for (j
= 0; j
< 4; j
++) {
5198 ctx
->locals
[i
* 4 + j
] =
5199 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5205 setup_scratch(struct ac_nir_context
*ctx
,
5206 struct nir_shader
*shader
)
5208 if (shader
->scratch_size
== 0)
5211 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
5212 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
5217 setup_constant_data(struct ac_nir_context
*ctx
,
5218 struct nir_shader
*shader
)
5220 if (!shader
->constant_data
)
5224 LLVMConstStringInContext(ctx
->ac
.context
,
5225 shader
->constant_data
,
5226 shader
->constant_data_size
,
5228 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
5230 /* We want to put the constant data in the CONST address space so that
5231 * we can use scalar loads. However, LLVM versions before 10 put these
5232 * variables in the same section as the code, which is unacceptable
5233 * for RadeonSI as it needs to relocate all the data sections after
5234 * the code sections. See https://reviews.llvm.org/D65813.
5236 unsigned address_space
=
5237 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
5239 LLVMValueRef global
=
5240 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5244 LLVMSetInitializer(global
, data
);
5245 LLVMSetGlobalConstant(global
, true);
5246 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
5247 ctx
->constant_data
= global
;
5251 setup_shared(struct ac_nir_context
*ctx
,
5252 struct nir_shader
*nir
)
5257 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
5258 nir
->info
.cs
.shared_size
);
5261 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5264 LLVMSetAlignment(lds
, 64 * 1024);
5266 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
5267 LLVMPointerType(ctx
->ac
.i8
,
5268 AC_ADDR_SPACE_LDS
), "");
5271 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
5272 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
5274 struct ac_nir_context ctx
= {};
5275 struct nir_function
*func
;
5281 ctx
.stage
= nir
->info
.stage
;
5282 ctx
.info
= &nir
->info
;
5284 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
5286 nir_foreach_variable(variable
, &nir
->outputs
)
5287 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
5290 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5291 _mesa_key_pointer_equal
);
5292 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5293 _mesa_key_pointer_equal
);
5294 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5295 _mesa_key_pointer_equal
);
5297 if (ctx
.abi
->kill_ps_if_inf_interp
)
5298 ctx
.verified_interp
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5299 _mesa_key_pointer_equal
);
5301 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
5303 nir_index_ssa_defs(func
->impl
);
5304 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
5306 setup_locals(&ctx
, func
);
5307 setup_scratch(&ctx
, nir
);
5308 setup_constant_data(&ctx
, nir
);
5310 if (gl_shader_stage_is_compute(nir
->info
.stage
))
5311 setup_shared(&ctx
, nir
);
5313 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& nir
->info
.fs
.uses_demote
) {
5314 ctx
.ac
.postponed_kill
= ac_build_alloca_undef(&ctx
.ac
, ac
->i1
, "");
5315 /* true = don't kill. */
5316 LLVMBuildStore(ctx
.ac
.builder
, ctx
.ac
.i1true
, ctx
.ac
.postponed_kill
);
5319 visit_cf_list(&ctx
, &func
->impl
->body
);
5320 phi_post_pass(&ctx
);
5322 if (ctx
.ac
.postponed_kill
)
5323 ac_build_kill_if_false(&ctx
.ac
, LLVMBuildLoad(ctx
.ac
.builder
,
5324 ctx
.ac
.postponed_kill
, ""));
5326 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
5327 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
5332 ralloc_free(ctx
.defs
);
5333 ralloc_free(ctx
.phis
);
5334 ralloc_free(ctx
.vars
);
5335 if (ctx
.abi
->kill_ps_if_inf_interp
)
5336 ralloc_free(ctx
.verified_interp
);
5340 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
5342 bool progress
= false;
5344 /* Lower large variables to scratch first so that we won't bloat the
5345 * shader by generating large if ladders for them. We later lower
5346 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
5348 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
5349 nir_var_function_temp
,
5351 glsl_get_natural_size_align_bytes
);
5353 /* While it would be nice not to have this flag, we are constrained
5354 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
5356 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
5358 /* TODO: Indirect indexing of GS inputs is unimplemented.
5360 * TCS and TES load inputs directly from LDS or offchip memory, so
5361 * indirect indexing is trivial.
5363 nir_variable_mode indirect_mask
= 0;
5364 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
5365 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
5366 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
5367 !llvm_has_working_vgpr_indexing
)) {
5368 indirect_mask
|= nir_var_shader_in
;
5370 if (!llvm_has_working_vgpr_indexing
&&
5371 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
5372 indirect_mask
|= nir_var_shader_out
;
5374 /* TODO: We shouldn't need to do this, however LLVM isn't currently
5375 * smart enough to handle indirects without causing excess spilling
5376 * causing the gpu to hang.
5378 * See the following thread for more details of the problem:
5379 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
5381 indirect_mask
|= nir_var_function_temp
;
5383 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
5388 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
5390 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
5394 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
5396 if (var
->data
.mode
!= nir_var_shader_out
)
5399 unsigned writemask
= 0;
5400 const int location
= var
->data
.location
;
5401 unsigned first_component
= var
->data
.location_frac
;
5402 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
5404 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
5405 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
5406 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
5407 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
5413 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
5414 unsigned *cond_block_tf_writemask
,
5415 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
5417 switch (cf_node
->type
) {
5418 case nir_cf_node_block
: {
5419 nir_block
*block
= nir_cf_node_as_block(cf_node
);
5420 nir_foreach_instr(instr
, block
) {
5421 if (instr
->type
!= nir_instr_type_intrinsic
)
5424 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
5425 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
5427 /* If we find a barrier in nested control flow put this in the
5428 * too hard basket. In GLSL this is not possible but it is in
5432 *tessfactors_are_def_in_all_invocs
= false;
5436 /* The following case must be prevented:
5437 * gl_TessLevelInner = ...;
5439 * if (gl_InvocationID == 1)
5440 * gl_TessLevelInner = ...;
5442 * If you consider disjoint code segments separated by barriers, each
5443 * such segment that writes tess factor channels should write the same
5444 * channels in all codepaths within that segment.
5446 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5447 /* Accumulate the result: */
5448 *tessfactors_are_def_in_all_invocs
&=
5449 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5451 /* Analyze the next code segment from scratch. */
5452 *upper_block_tf_writemask
= 0;
5453 *cond_block_tf_writemask
= 0;
5456 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5461 case nir_cf_node_if
: {
5462 unsigned then_tessfactor_writemask
= 0;
5463 unsigned else_tessfactor_writemask
= 0;
5465 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5466 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5467 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5468 cond_block_tf_writemask
,
5469 tessfactors_are_def_in_all_invocs
, true);
5472 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5473 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5474 cond_block_tf_writemask
,
5475 tessfactors_are_def_in_all_invocs
, true);
5478 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5479 /* If both statements write the same tess factor channels,
5480 * we can say that the upper block writes them too.
5482 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5483 else_tessfactor_writemask
;
5484 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5485 else_tessfactor_writemask
;
5490 case nir_cf_node_loop
: {
5491 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5492 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5493 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5494 cond_block_tf_writemask
,
5495 tessfactors_are_def_in_all_invocs
, true);
5501 unreachable("unknown cf node type");
5506 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5508 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5510 /* The pass works as follows:
5511 * If all codepaths write tess factors, we can say that all
5512 * invocations define tess factors.
5514 * Each tess factor channel is tracked separately.
5516 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5517 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5519 /* Initial value = true. Here the pass will accumulate results from
5520 * multiple segments surrounded by barriers. If tess factors aren't
5521 * written at all, it's a shader bug and we don't care if this will be
5524 bool tessfactors_are_def_in_all_invocs
= true;
5526 nir_foreach_function(function
, nir
) {
5527 if (function
->impl
) {
5528 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5529 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5530 &cond_block_tf_writemask
,
5531 &tessfactors_are_def_in_all_invocs
,
5537 /* Accumulate the result for the last code segment separated by a
5540 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5541 tessfactors_are_def_in_all_invocs
&=
5542 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5545 return tessfactors_are_def_in_all_invocs
;