2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
54 struct hash_table
*verified_interp
;
56 LLVMValueRef main_function
;
57 LLVMBasicBlockRef continue_block
;
58 LLVMBasicBlockRef break_block
;
64 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
65 nir_deref_instr
*deref_instr
,
66 const nir_instr
*instr
,
69 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
70 nir_deref_instr
*deref_instr
,
71 enum ac_descriptor_type desc_type
,
72 const nir_instr
*instr
,
74 bool image
, bool write
);
77 build_store_values_extended(struct ac_llvm_context
*ac
,
80 unsigned value_stride
,
83 LLVMBuilderRef builder
= ac
->builder
;
86 for (i
= 0; i
< value_count
; i
++) {
87 LLVMValueRef ptr
= values
[i
* value_stride
];
88 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
89 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
90 LLVMBuildStore(builder
, value
, ptr
);
94 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
95 const nir_ssa_def
*def
)
97 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
98 if (def
->num_components
> 1) {
99 type
= LLVMVectorType(type
, def
->num_components
);
104 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
107 return nir
->ssa_defs
[src
.ssa
->index
];
111 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
113 LLVMValueRef ptr
= get_src(ctx
, src
);
114 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
115 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
117 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
119 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
120 LLVMPointerType(type
, addr_space
), "");
123 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
124 const struct nir_block
*b
)
126 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
127 return (LLVMBasicBlockRef
)entry
->data
;
130 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
132 unsigned num_components
)
134 LLVMValueRef value
= get_src(ctx
, src
.src
);
135 bool need_swizzle
= false;
138 unsigned src_components
= ac_get_llvm_num_components(value
);
139 for (unsigned i
= 0; i
< num_components
; ++i
) {
140 assert(src
.swizzle
[i
] < src_components
);
141 if (src
.swizzle
[i
] != i
)
145 if (need_swizzle
|| num_components
!= src_components
) {
146 LLVMValueRef masks
[] = {
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
150 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
152 if (src_components
> 1 && num_components
== 1) {
153 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
155 } else if (src_components
== 1 && num_components
> 1) {
156 LLVMValueRef values
[] = {value
, value
, value
, value
};
157 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
159 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
160 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
169 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
170 LLVMIntPredicate pred
, LLVMValueRef src0
,
173 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
174 return LLVMBuildSelect(ctx
->builder
, result
,
175 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
179 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
180 LLVMRealPredicate pred
, LLVMValueRef src0
,
184 src0
= ac_to_float(ctx
, src0
);
185 src1
= ac_to_float(ctx
, src1
);
186 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
187 return LLVMBuildSelect(ctx
->builder
, result
,
188 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
192 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
194 LLVMTypeRef result_type
,
197 char name
[64], type
[64];
198 LLVMValueRef params
[] = {
199 ac_to_float(ctx
, src0
),
202 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
203 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
204 assert(length
< sizeof(name
));
205 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
208 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
210 LLVMTypeRef result_type
,
211 LLVMValueRef src0
, LLVMValueRef src1
)
213 char name
[64], type
[64];
214 LLVMValueRef params
[] = {
215 ac_to_float(ctx
, src0
),
216 ac_to_float(ctx
, src1
),
219 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
220 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
221 assert(length
< sizeof(name
));
222 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
225 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
227 LLVMTypeRef result_type
,
228 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
230 char name
[64], type
[64];
231 LLVMValueRef params
[] = {
232 ac_to_float(ctx
, src0
),
233 ac_to_float(ctx
, src1
),
234 ac_to_float(ctx
, src2
),
237 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
238 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
239 assert(length
< sizeof(name
));
240 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
243 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
244 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
246 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
247 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
249 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
251 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
252 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
253 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
254 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
255 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
256 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
259 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
261 return LLVMBuildSelect(ctx
->builder
, v
,
262 ac_to_integer_or_pointer(ctx
, src1
),
263 ac_to_integer_or_pointer(ctx
, src2
), "");
266 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
269 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
272 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
274 LLVMValueRef src0
, LLVMValueRef src1
)
276 LLVMTypeRef ret_type
;
277 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
279 LLVMValueRef params
[] = { src0
, src1
};
280 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
283 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
284 params
, 2, AC_FUNC_ATTR_READNONE
);
286 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
287 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
291 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
295 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
296 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
298 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
302 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
306 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
308 unreachable("Unsupported bit size.");
312 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
315 src0
= ac_to_float(ctx
, src0
);
316 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
317 return LLVMBuildSExt(ctx
->builder
,
318 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
322 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
326 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
330 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
332 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
336 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
338 unreachable("Unsupported bit size.");
342 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
345 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
346 return LLVMBuildSExt(ctx
->builder
,
347 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
351 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
355 LLVMValueRef cond
= NULL
;
357 src0
= ac_to_float(ctx
, src0
);
358 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
360 if (ctx
->chip_class
>= GFX8
) {
361 LLVMValueRef args
[2];
362 /* Check if the result is a denormal - and flush to 0 if so. */
364 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
365 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
368 /* need to convert back up to f32 */
369 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
371 if (ctx
->chip_class
>= GFX8
)
372 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
375 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
376 * so compare the result and flush to 0 if it's smaller.
378 LLVMValueRef temp
, cond2
;
379 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
380 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
381 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
383 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
384 temp
, ctx
->f32_0
, "");
385 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
386 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
391 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
392 LLVMValueRef src0
, LLVMValueRef src1
)
394 LLVMValueRef dst64
, result
;
395 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
396 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
398 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
399 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
400 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
404 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
405 LLVMValueRef src0
, LLVMValueRef src1
)
407 LLVMValueRef dst64
, result
;
408 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
409 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
411 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
412 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
413 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
417 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
418 LLVMValueRef bits
, LLVMValueRef offset
)
420 /* mask = ((1 << bits) - 1) << offset */
421 return LLVMBuildShl(ctx
->builder
,
422 LLVMBuildSub(ctx
->builder
,
423 LLVMBuildShl(ctx
->builder
,
430 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
431 LLVMValueRef mask
, LLVMValueRef insert
,
435 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
436 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
438 return LLVMBuildXor(ctx
->builder
, base
,
439 LLVMBuildAnd(ctx
->builder
, mask
,
440 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
443 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
445 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
446 LLVMValueRef args
[2]))
448 LLVMValueRef comp
[2];
450 src0
= ac_to_float(ctx
, src0
);
451 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
452 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
454 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
457 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
460 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
461 LLVMValueRef temps
[2], val
;
464 for (i
= 0; i
< 2; i
++) {
465 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
466 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
467 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
468 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
470 return ac_build_gather_values(ctx
, temps
, 2);
473 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
481 if (op
== nir_op_fddx_fine
)
482 mask
= AC_TID_MASK_LEFT
;
483 else if (op
== nir_op_fddy_fine
)
484 mask
= AC_TID_MASK_TOP
;
486 mask
= AC_TID_MASK_TOP_LEFT
;
488 /* for DDX we want to next X pixel, DDY next Y pixel. */
489 if (op
== nir_op_fddx_fine
||
490 op
== nir_op_fddx_coarse
||
496 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
500 struct waterfall_context
{
501 LLVMBasicBlockRef phi_bb
[2];
505 /* To deal with divergent descriptors we can create a loop that handles all
506 * lanes with the same descriptor on a given iteration (henceforth a
509 * These helper create the begin and end of the loop leaving the caller
510 * to implement the body.
513 * - ctx is the usal nir context
514 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
515 * - value is the possibly divergent value for which we built the loop
516 * - divergent is whether value is actually divergent. If false we just pass
519 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
520 struct waterfall_context
*wctx
,
521 LLVMValueRef value
, bool divergent
)
523 /* If the app claims the value is divergent but it is constant we can
524 * end up with a dynamic index of NULL. */
528 wctx
->use_waterfall
= divergent
;
532 ac_build_bgnloop(&ctx
->ac
, 6000);
534 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
536 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
537 scalar_value
, "uniform_active");
539 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
540 ac_build_ifcc(&ctx
->ac
, active
, 6001);
545 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
546 struct waterfall_context
*wctx
,
549 LLVMValueRef ret
= NULL
;
550 LLVMValueRef phi_src
[2];
551 LLVMValueRef cc_phi_src
[2] = {
552 LLVMConstInt(ctx
->ac
.i32
, 0, false),
553 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
556 if (!wctx
->use_waterfall
)
559 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
561 ac_build_endif(&ctx
->ac
, 6001);
564 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
567 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
571 * By using the optimization barrier on the exit decision, we decouple
572 * the operations from the break, and hence avoid LLVM hoisting the
573 * opteration into the break block.
575 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
576 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
578 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
579 ac_build_ifcc(&ctx
->ac
, active
, 6002);
580 ac_build_break(&ctx
->ac
);
581 ac_build_endif(&ctx
->ac
, 6002);
583 ac_build_endloop(&ctx
->ac
, 6000);
587 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
589 LLVMValueRef src
[4], result
= NULL
;
590 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
591 unsigned src_components
;
592 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
593 bool saved_inexact
= false;
596 saved_inexact
= ac_disable_inexact_math(ctx
->ac
.builder
);
598 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
605 case nir_op_pack_half_2x16
:
606 case nir_op_pack_snorm_2x16
:
607 case nir_op_pack_unorm_2x16
:
610 case nir_op_unpack_half_2x16
:
613 case nir_op_cube_face_coord
:
614 case nir_op_cube_face_index
:
618 src_components
= num_components
;
621 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
622 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
629 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
630 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
631 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
632 /* fneg will be optimized by backend compiler with sign
633 * bit removed via XOR. This is probably a LLVM bug.
635 result
= ac_build_canonicalize(&ctx
->ac
, result
,
636 instr
->dest
.dest
.ssa
.bit_size
);
640 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
643 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
646 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
649 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
650 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
651 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
654 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
655 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
656 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
659 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
662 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
665 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
668 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
671 /* lower_fmod only lower 16-bit and 32-bit fmod */
672 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
673 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
674 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
675 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
676 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
677 ac_to_float_type(&ctx
->ac
, def_type
), result
);
678 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
679 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
682 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
685 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
688 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
691 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
692 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
693 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
696 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
697 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
700 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
703 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
706 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
709 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
710 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
711 LLVMTypeOf(src
[0]), "");
712 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
713 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
714 LLVMTypeOf(src
[0]), "");
715 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
718 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
719 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
720 LLVMTypeOf(src
[0]), "");
721 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
722 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
723 LLVMTypeOf(src
[0]), "");
724 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
727 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
728 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
729 LLVMTypeOf(src
[0]), "");
730 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
731 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
732 LLVMTypeOf(src
[0]), "");
733 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
736 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
739 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
742 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
745 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
748 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
751 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
754 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
757 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
760 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
763 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
766 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
767 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
768 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
769 /* fabs will be optimized by backend compiler with sign
770 * bit removed via AND.
772 result
= ac_build_canonicalize(&ctx
->ac
, result
,
773 instr
->dest
.dest
.ssa
.bit_size
);
777 result
= emit_iabs(&ctx
->ac
, src
[0]);
780 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
783 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
786 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
789 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
792 result
= ac_build_isign(&ctx
->ac
, src
[0],
793 instr
->dest
.dest
.ssa
.bit_size
);
796 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
797 result
= ac_build_fsign(&ctx
->ac
, src
[0],
798 instr
->dest
.dest
.ssa
.bit_size
);
801 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
802 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
805 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
806 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
809 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
810 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
812 case nir_op_fround_even
:
813 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
814 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
817 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
818 result
= ac_build_fract(&ctx
->ac
, src
[0],
819 instr
->dest
.dest
.ssa
.bit_size
);
822 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
823 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
826 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
827 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
830 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
831 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
834 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
835 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
838 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
839 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
842 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rsq",
843 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
845 case nir_op_frexp_exp
:
846 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
847 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
848 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
849 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
850 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
853 case nir_op_frexp_sig
:
854 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
855 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
856 instr
->dest
.dest
.ssa
.bit_size
);
859 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
860 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
863 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
864 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
865 if (ctx
->ac
.chip_class
< GFX9
&&
866 instr
->dest
.dest
.ssa
.bit_size
== 32) {
867 /* Only pre-GFX9 chips do not flush denorms. */
868 result
= ac_build_canonicalize(&ctx
->ac
, result
,
869 instr
->dest
.dest
.ssa
.bit_size
);
873 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
874 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
875 if (ctx
->ac
.chip_class
< GFX9
&&
876 instr
->dest
.dest
.ssa
.bit_size
== 32) {
877 /* Only pre-GFX9 chips do not flush denorms. */
878 result
= ac_build_canonicalize(&ctx
->ac
, result
,
879 instr
->dest
.dest
.ssa
.bit_size
);
883 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
884 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
885 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
888 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
889 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
890 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
891 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
892 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
894 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
897 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
899 case nir_op_bitfield_select
:
900 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
903 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
906 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
908 case nir_op_bitfield_reverse
:
909 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
911 case nir_op_bit_count
:
912 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
917 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
918 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
919 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
925 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
926 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
932 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
933 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
938 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
943 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
945 case nir_op_f2f16_rtz
:
946 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
947 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
948 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
949 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
950 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
951 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
953 case nir_op_f2f16_rtne
:
957 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
958 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
959 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
961 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
967 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
968 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
970 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
976 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
977 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
979 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
982 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
984 case nir_op_find_lsb
:
985 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
987 case nir_op_ufind_msb
:
988 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
990 case nir_op_ifind_msb
:
991 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
993 case nir_op_uadd_carry
:
994 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
996 case nir_op_usub_borrow
:
997 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
1002 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1005 result
= emit_f2b(&ctx
->ac
, src
[0]);
1011 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1014 result
= emit_i2b(&ctx
->ac
, src
[0]);
1016 case nir_op_fquantize2f16
:
1017 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1019 case nir_op_umul_high
:
1020 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1022 case nir_op_imul_high
:
1023 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1025 case nir_op_pack_half_2x16
:
1026 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1028 case nir_op_pack_snorm_2x16
:
1029 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1031 case nir_op_pack_unorm_2x16
:
1032 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1034 case nir_op_unpack_half_2x16
:
1035 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1039 case nir_op_fddx_fine
:
1040 case nir_op_fddy_fine
:
1041 case nir_op_fddx_coarse
:
1042 case nir_op_fddy_coarse
:
1043 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1046 case nir_op_unpack_64_2x32_split_x
: {
1047 assert(ac_get_llvm_num_components(src
[0]) == 1);
1048 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1051 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1056 case nir_op_unpack_64_2x32_split_y
: {
1057 assert(ac_get_llvm_num_components(src
[0]) == 1);
1058 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1061 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1066 case nir_op_pack_64_2x32_split
: {
1067 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1068 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1072 case nir_op_pack_32_2x16_split
: {
1073 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1074 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1078 case nir_op_unpack_32_2x16_split_x
: {
1079 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1082 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1087 case nir_op_unpack_32_2x16_split_y
: {
1088 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1091 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1096 case nir_op_cube_face_coord
: {
1097 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1098 LLVMValueRef results
[2];
1100 for (unsigned chan
= 0; chan
< 3; chan
++)
1101 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1102 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1103 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1104 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1105 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1106 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1107 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1108 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1109 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1110 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1111 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1112 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1113 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1117 case nir_op_cube_face_index
: {
1118 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1120 for (unsigned chan
= 0; chan
< 3; chan
++)
1121 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1122 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1123 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1128 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1129 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1130 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1131 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1134 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1135 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1138 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1139 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1142 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1143 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1144 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1145 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1148 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1149 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1152 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1153 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1155 case nir_op_fmed3
: {
1156 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1157 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1158 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1159 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1160 instr
->dest
.dest
.ssa
.bit_size
);
1163 case nir_op_imed3
: {
1164 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1165 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1166 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1167 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1170 case nir_op_umed3
: {
1171 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1172 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1173 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1174 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1179 fprintf(stderr
, "Unknown NIR alu instr: ");
1180 nir_print_instr(&instr
->instr
, stderr
);
1181 fprintf(stderr
, "\n");
1186 assert(instr
->dest
.dest
.is_ssa
);
1187 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1188 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1192 ac_restore_inexact_math(ctx
->ac
.builder
, saved_inexact
);
1195 static void visit_load_const(struct ac_nir_context
*ctx
,
1196 const nir_load_const_instr
*instr
)
1198 LLVMValueRef values
[4], value
= NULL
;
1199 LLVMTypeRef element_type
=
1200 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1202 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1203 switch (instr
->def
.bit_size
) {
1205 values
[i
] = LLVMConstInt(element_type
,
1206 instr
->value
[i
].u8
, false);
1209 values
[i
] = LLVMConstInt(element_type
,
1210 instr
->value
[i
].u16
, false);
1213 values
[i
] = LLVMConstInt(element_type
,
1214 instr
->value
[i
].u32
, false);
1217 values
[i
] = LLVMConstInt(element_type
,
1218 instr
->value
[i
].u64
, false);
1222 "unsupported nir load_const bit_size: %d\n",
1223 instr
->def
.bit_size
);
1227 if (instr
->def
.num_components
> 1) {
1228 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1232 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1236 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1239 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1240 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1243 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1244 /* On GFX8, the descriptor contains the size in bytes,
1245 * but TXQ must return the size in elements.
1246 * The stride is always non-zero for resources using TXQ.
1248 LLVMValueRef stride
=
1249 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1251 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1252 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1253 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1254 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1256 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1261 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1262 * incorrectly forces nearest filtering if the texture format is integer.
1263 * The only effect it has on Gather4, which always returns 4 texels for
1264 * bilinear filtering, is that the final coordinates are off by 0.5 of
1267 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1268 * or (0.5 / size) from the normalized coordinates.
1270 * However, cube textures with 8_8_8_8 data formats require a different
1271 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1272 * precision in 32-bit data formats, so it needs to be applied dynamically at
1273 * runtime. In this case, return an i1 value that indicates whether the
1274 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1276 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1278 struct ac_image_args
*args
,
1279 const nir_tex_instr
*instr
)
1281 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1282 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1283 LLVMValueRef wa_8888
= NULL
;
1284 LLVMValueRef half_texel
[2];
1285 LLVMValueRef result
;
1287 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1289 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1290 LLVMValueRef formats
;
1291 LLVMValueRef data_format
;
1292 LLVMValueRef wa_formats
;
1294 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1296 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1297 LLVMConstInt(ctx
->i32
, 20, false), "");
1298 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1299 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1300 wa_8888
= LLVMBuildICmp(
1301 ctx
->builder
, LLVMIntEQ
, data_format
,
1302 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1305 uint32_t wa_num_format
=
1306 stype
== GLSL_TYPE_UINT
?
1307 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1308 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1309 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1310 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1312 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1313 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1315 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1316 args
->resource
= LLVMBuildInsertElement(
1317 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1320 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1322 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1324 struct ac_image_args resinfo
= {};
1325 LLVMBasicBlockRef bbs
[2];
1327 LLVMValueRef unnorm
= NULL
;
1328 LLVMValueRef default_offset
= ctx
->f32_0
;
1329 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1331 /* In vulkan, whether the sampler uses unnormalized
1332 * coordinates or not is a dynamic property of the
1333 * sampler. Hence, to figure out whether or not we
1334 * need to divide by the texture size, we need to test
1335 * the sampler at runtime. This tests the bit set by
1336 * radv_init_sampler().
1338 LLVMValueRef sampler0
=
1339 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1340 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1341 LLVMConstInt(ctx
->i32
, 15, false), "");
1342 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1343 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1344 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1347 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1348 if (wa_8888
|| unnorm
) {
1349 assert(!(wa_8888
&& unnorm
));
1350 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1351 /* Skip the texture size query entirely if we don't need it. */
1352 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1353 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1356 /* Query the texture size. */
1357 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1358 resinfo
.opcode
= ac_image_get_resinfo
;
1359 resinfo
.dmask
= 0xf;
1360 resinfo
.lod
= ctx
->i32_0
;
1361 resinfo
.resource
= args
->resource
;
1362 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1363 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1365 /* Compute -0.5 / size. */
1366 for (unsigned c
= 0; c
< 2; c
++) {
1368 LLVMBuildExtractElement(ctx
->builder
, size
,
1369 LLVMConstInt(ctx
->i32
, c
, 0), "");
1370 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1371 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1372 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1373 LLVMConstReal(ctx
->f32
, -0.5), "");
1376 if (wa_8888
|| unnorm
) {
1377 ac_build_endif(ctx
, 2000);
1379 for (unsigned c
= 0; c
< 2; c
++) {
1380 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1381 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1387 for (unsigned c
= 0; c
< 2; c
++) {
1389 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1390 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1393 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1394 result
= ac_build_image_opcode(ctx
, args
);
1396 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1397 LLVMValueRef tmp
, tmp2
;
1399 /* if the cube workaround is in place, f2i the result. */
1400 for (unsigned c
= 0; c
< 4; c
++) {
1401 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1402 if (stype
== GLSL_TYPE_UINT
)
1403 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1405 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1406 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1407 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1408 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1409 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1410 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1416 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1418 nir_deref_instr
*texture_deref_instr
= NULL
;
1420 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1421 switch (instr
->src
[i
].src_type
) {
1422 case nir_tex_src_texture_deref
:
1423 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1429 return texture_deref_instr
;
1432 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1433 const nir_tex_instr
*instr
,
1434 struct ac_image_args
*args
)
1436 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1437 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1439 assert(instr
->dest
.is_ssa
);
1440 return ac_build_buffer_load_format(&ctx
->ac
,
1444 util_last_bit(mask
),
1446 instr
->dest
.ssa
.bit_size
== 16);
1449 args
->opcode
= ac_image_sample
;
1451 switch (instr
->op
) {
1453 case nir_texop_txf_ms
:
1454 case nir_texop_samples_identical
:
1455 args
->opcode
= args
->level_zero
||
1456 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1457 ac_image_load
: ac_image_load_mip
;
1458 args
->level_zero
= false;
1461 case nir_texop_query_levels
:
1462 args
->opcode
= ac_image_get_resinfo
;
1464 args
->lod
= ctx
->ac
.i32_0
;
1465 args
->level_zero
= false;
1468 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1470 args
->level_zero
= true;
1474 args
->opcode
= ac_image_gather4
;
1475 if (!args
->lod
&& !args
->bias
)
1476 args
->level_zero
= true;
1479 args
->opcode
= ac_image_get_lod
;
1481 case nir_texop_fragment_fetch
:
1482 case nir_texop_fragment_mask_fetch
:
1483 args
->opcode
= ac_image_load
;
1484 args
->level_zero
= false;
1490 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1491 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1492 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1493 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1494 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1495 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1496 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1500 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1501 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1502 if ((args
->dim
== ac_image_2darray
||
1503 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1504 args
->coords
[1] = ctx
->ac
.i32_0
;
1508 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1509 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1510 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1511 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1512 /* Prevent texture instructions with implicit derivatives from being
1513 * sinked into branches. */
1514 switch (instr
->op
) {
1518 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1525 return ac_build_image_opcode(&ctx
->ac
, args
);
1528 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1529 nir_intrinsic_instr
*instr
)
1531 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1532 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1534 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1535 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1539 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1540 nir_intrinsic_instr
*instr
)
1542 LLVMValueRef ptr
, addr
;
1543 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1544 unsigned index
= nir_intrinsic_base(instr
);
1546 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1547 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1549 /* Load constant values from user SGPRS when possible, otherwise
1550 * fallback to the default path that loads directly from memory.
1552 if (LLVMIsConstant(src0
) &&
1553 instr
->dest
.ssa
.bit_size
== 32) {
1554 unsigned count
= instr
->dest
.ssa
.num_components
;
1555 unsigned offset
= index
;
1557 offset
+= LLVMConstIntGetZExtValue(src0
);
1560 offset
-= ctx
->args
->base_inline_push_consts
;
1562 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1563 if (offset
+ count
<= num_inline_push_consts
) {
1564 LLVMValueRef push_constants
[num_inline_push_consts
];
1565 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1566 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1567 ctx
->args
->inline_push_consts
[i
]);
1568 return ac_build_gather_values(&ctx
->ac
,
1569 push_constants
+ offset
,
1574 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1575 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1577 if (instr
->dest
.ssa
.bit_size
== 8) {
1578 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1579 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i8
, 4 * load_dwords
);
1580 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1581 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1583 LLVMValueRef params
[3];
1584 if (load_dwords
> 1) {
1585 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.v2i32
, "");
1586 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1587 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1589 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1590 params
[0] = ctx
->ac
.i32_0
;
1594 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1596 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1597 if (instr
->dest
.ssa
.num_components
> 1)
1598 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i8
, instr
->dest
.ssa
.num_components
), "");
1600 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1601 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1602 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i16
, 2 * load_dwords
);
1603 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1604 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1605 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1606 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1607 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1608 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1609 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1610 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1611 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1612 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1613 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1614 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1615 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1616 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1619 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1621 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1624 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1625 const nir_intrinsic_instr
*instr
)
1627 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1629 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1632 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1634 uint32_t new_mask
= 0;
1635 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1636 if (mask
& (1u << i
))
1637 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1641 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1642 unsigned start
, unsigned count
)
1644 LLVMValueRef mask
[] = {
1645 ctx
->i32_0
, ctx
->i32_1
,
1646 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1648 unsigned src_elements
= ac_get_llvm_num_components(src
);
1650 if (count
== src_elements
) {
1653 } else if (count
== 1) {
1654 assert(start
< src_elements
);
1655 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1657 assert(start
+ count
<= src_elements
);
1659 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1660 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1664 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1665 enum gl_access_qualifier access
,
1666 bool may_store_unaligned
,
1667 bool writeonly_memory
)
1669 unsigned cache_policy
= 0;
1671 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1672 * store opcodes not aligned to a dword are affected. The only way to
1673 * get unaligned stores is through shader images.
1675 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1676 /* If this is write-only, don't keep data in L1 to prevent
1677 * evicting L1 cache lines that may be needed by other
1681 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1682 cache_policy
|= ac_glc
;
1685 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1686 cache_policy
|= ac_slc
| ac_glc
;
1688 return cache_policy
;
1691 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1692 struct waterfall_context
*wctx
,
1693 const nir_intrinsic_instr
*instr
,
1696 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1697 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1700 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1701 nir_intrinsic_instr
*instr
)
1703 if (ctx
->ac
.postponed_kill
) {
1704 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1705 ctx
->ac
.postponed_kill
, "");
1706 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1709 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1710 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1711 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1712 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1713 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1714 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1716 struct waterfall_context wctx
;
1717 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1719 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1720 LLVMValueRef base_data
= src_data
;
1721 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1722 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1726 LLVMValueRef data
, offset
;
1727 LLVMTypeRef data_type
;
1729 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1731 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1732 * writes into a 2-element and a 1-element write. */
1734 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1735 writemask
|= 1 << (start
+ 2);
1738 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1740 /* we can only store 4 DWords at the same time.
1741 * can only happen for 64 Bit vectors. */
1742 if (num_bytes
> 16) {
1743 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1748 /* check alignment of 16 Bit stores */
1749 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1750 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1755 /* Due to alignment issues, split stores of 8-bit/16-bit
1758 if (ctx
->ac
.chip_class
== GFX6
&& count
> 1 && elem_size_bytes
< 4) {
1759 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1761 num_bytes
= elem_size_bytes
;
1764 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1766 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1767 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1769 if (num_bytes
== 1) {
1770 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1771 offset
, ctx
->ac
.i32_0
,
1773 } else if (num_bytes
== 2) {
1774 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1775 offset
, ctx
->ac
.i32_0
,
1778 int num_channels
= num_bytes
/ 4;
1780 switch (num_bytes
) {
1781 case 16: /* v4f32 */
1782 data_type
= ctx
->ac
.v4f32
;
1784 case 12: /* v3f32 */
1785 data_type
= ctx
->ac
.v3f32
;
1788 data_type
= ctx
->ac
.v2f32
;
1791 data_type
= ctx
->ac
.f32
;
1794 unreachable("Malformed vector store.");
1796 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1798 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1799 num_channels
, offset
,
1805 exit_waterfall(ctx
, &wctx
, NULL
);
1807 if (ctx
->ac
.postponed_kill
)
1808 ac_build_endif(&ctx
->ac
, 7000);
1811 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1812 LLVMValueRef descriptor
,
1813 LLVMValueRef offset
,
1814 LLVMValueRef compare
,
1815 LLVMValueRef exchange
)
1817 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1818 if (ctx
->abi
->robust_buffer_access
) {
1819 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1821 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1822 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1824 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1826 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1829 LLVMValueRef ptr_parts
[2] = {
1830 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1831 LLVMBuildAnd(ctx
->ac
.builder
,
1832 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1833 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1836 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1837 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1839 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1841 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1842 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1843 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1844 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1846 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1847 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1849 if (ctx
->abi
->robust_buffer_access
) {
1850 ac_build_endif(&ctx
->ac
, -1);
1852 LLVMBasicBlockRef incoming_blocks
[2] = {
1857 LLVMValueRef incoming_values
[2] = {
1858 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1861 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1862 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1869 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1870 nir_intrinsic_instr
*instr
)
1872 if (ctx
->ac
.postponed_kill
) {
1873 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1874 ctx
->ac
.postponed_kill
, "");
1875 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1878 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1880 char name
[64], type
[8];
1881 LLVMValueRef params
[6], descriptor
;
1882 LLVMValueRef result
;
1885 struct waterfall_context wctx
;
1886 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1888 switch (instr
->intrinsic
) {
1889 case nir_intrinsic_ssbo_atomic_add
:
1892 case nir_intrinsic_ssbo_atomic_imin
:
1895 case nir_intrinsic_ssbo_atomic_umin
:
1898 case nir_intrinsic_ssbo_atomic_imax
:
1901 case nir_intrinsic_ssbo_atomic_umax
:
1904 case nir_intrinsic_ssbo_atomic_and
:
1907 case nir_intrinsic_ssbo_atomic_or
:
1910 case nir_intrinsic_ssbo_atomic_xor
:
1913 case nir_intrinsic_ssbo_atomic_exchange
:
1916 case nir_intrinsic_ssbo_atomic_comp_swap
:
1923 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1927 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1928 return_type
== ctx
->ac
.i64
) {
1929 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1930 get_src(ctx
, instr
->src
[1]),
1931 get_src(ctx
, instr
->src
[2]),
1932 get_src(ctx
, instr
->src
[3]));
1934 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1935 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1937 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1938 params
[arg_count
++] = descriptor
;
1940 if (LLVM_VERSION_MAJOR
>= 9) {
1941 /* XXX: The new raw/struct atomic intrinsics are buggy with
1942 * LLVM 8, see r358579.
1944 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1945 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1946 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1948 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1949 snprintf(name
, sizeof(name
),
1950 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1952 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1953 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1954 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1956 assert(return_type
== ctx
->ac
.i32
);
1957 snprintf(name
, sizeof(name
),
1958 "llvm.amdgcn.buffer.atomic.%s", op
);
1961 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1965 result
= exit_waterfall(ctx
, &wctx
, result
);
1966 if (ctx
->ac
.postponed_kill
)
1967 ac_build_endif(&ctx
->ac
, 7001);
1971 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1972 nir_intrinsic_instr
*instr
)
1974 struct waterfall_context wctx
;
1975 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1977 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1978 int num_components
= instr
->num_components
;
1979 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1980 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1982 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1983 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1984 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1986 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1987 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1989 LLVMValueRef results
[4];
1990 for (int i
= 0; i
< num_components
;) {
1991 int num_elems
= num_components
- i
;
1992 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1994 if (num_elems
* elem_size_bytes
> 16)
1995 num_elems
= 16 / elem_size_bytes
;
1996 int load_bytes
= num_elems
* elem_size_bytes
;
1998 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
2002 if (load_bytes
== 1) {
2003 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
2009 } else if (load_bytes
== 2) {
2010 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
2017 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
2018 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2020 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2021 vindex
, offset
, immoffset
, 0,
2022 cache_policy
, can_speculate
, false);
2025 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2026 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2027 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2029 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2030 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2032 for (unsigned j
= 0; j
< num_elems
; j
++) {
2033 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2038 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2039 return exit_waterfall(ctx
, &wctx
, ret
);
2042 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2043 struct waterfall_context
*wctx
,
2044 const nir_intrinsic_instr
*instr
)
2046 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2047 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2050 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2051 nir_intrinsic_instr
*instr
)
2053 struct waterfall_context wctx
;
2054 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2057 LLVMValueRef rsrc
= rsrc_base
;
2058 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2059 int num_components
= instr
->num_components
;
2061 if (ctx
->abi
->load_ubo
)
2062 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2064 if (instr
->dest
.ssa
.bit_size
== 64)
2065 num_components
*= 2;
2067 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2068 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2069 LLVMValueRef results
[num_components
];
2070 for (unsigned i
= 0; i
< num_components
; ++i
) {
2071 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2074 if (load_bytes
== 1) {
2075 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2082 assert(load_bytes
== 2);
2083 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2091 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2093 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2094 NULL
, 0, 0, true, true);
2096 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2099 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2100 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2102 return exit_waterfall(ctx
, &wctx
, ret
);
2106 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2107 bool vs_in
, unsigned *vertex_index_out
,
2108 LLVMValueRef
*vertex_index_ref
,
2109 unsigned *const_out
, LLVMValueRef
*indir_out
)
2111 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2112 nir_deref_path path
;
2113 unsigned idx_lvl
= 1;
2115 nir_deref_path_init(&path
, instr
, NULL
);
2117 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2118 if (vertex_index_ref
) {
2119 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2120 if (vertex_index_out
)
2121 *vertex_index_out
= 0;
2123 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2128 uint32_t const_offset
= 0;
2129 LLVMValueRef offset
= NULL
;
2131 if (var
->data
.compact
) {
2132 assert(instr
->deref_type
== nir_deref_type_array
);
2133 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2137 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2138 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2139 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2140 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2142 for (unsigned i
= 0; i
< index
; i
++) {
2143 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2144 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2146 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2147 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2148 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2149 const_offset
+= size
*
2150 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2152 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2153 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2155 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2160 unreachable("Uhandled deref type in get_deref_instr_offset");
2164 nir_deref_path_finish(&path
);
2166 if (const_offset
&& offset
)
2167 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2168 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2171 *const_out
= const_offset
;
2172 *indir_out
= offset
;
2175 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2176 nir_intrinsic_instr
*instr
,
2179 LLVMValueRef result
;
2180 LLVMValueRef vertex_index
= NULL
;
2181 LLVMValueRef indir_index
= NULL
;
2182 unsigned const_index
= 0;
2184 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2186 unsigned location
= var
->data
.location
;
2187 unsigned driver_location
= var
->data
.driver_location
;
2188 const bool is_patch
= var
->data
.patch
||
2189 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2190 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2191 const bool is_compact
= var
->data
.compact
;
2193 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2194 false, NULL
, is_patch
? NULL
: &vertex_index
,
2195 &const_index
, &indir_index
);
2197 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2199 LLVMTypeRef src_component_type
;
2200 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2201 src_component_type
= LLVMGetElementType(dest_type
);
2203 src_component_type
= dest_type
;
2205 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2206 vertex_index
, indir_index
,
2207 const_index
, location
, driver_location
,
2208 var
->data
.location_frac
,
2209 instr
->num_components
,
2210 is_patch
, is_compact
, load_inputs
);
2211 if (instr
->dest
.ssa
.bit_size
== 16) {
2212 result
= ac_to_integer(&ctx
->ac
, result
);
2213 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2215 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2219 type_scalar_size_bytes(const struct glsl_type
*type
)
2221 assert(glsl_type_is_vector_or_scalar(type
) ||
2222 glsl_type_is_matrix(type
));
2223 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2226 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2227 nir_intrinsic_instr
*instr
)
2229 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2230 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2232 LLVMValueRef values
[8];
2234 int ve
= instr
->dest
.ssa
.num_components
;
2236 LLVMValueRef indir_index
;
2238 unsigned const_index
;
2239 unsigned stride
= 4;
2240 int mode
= deref
->mode
;
2243 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2244 var
->data
.mode
== nir_var_shader_in
;
2245 idx
= var
->data
.driver_location
;
2246 comp
= var
->data
.location_frac
;
2247 mode
= var
->data
.mode
;
2249 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2250 &const_index
, &indir_index
);
2252 if (var
->data
.compact
) {
2254 const_index
+= comp
;
2259 if (instr
->dest
.ssa
.bit_size
== 64 &&
2260 (deref
->mode
== nir_var_shader_in
||
2261 deref
->mode
== nir_var_shader_out
||
2262 deref
->mode
== nir_var_function_temp
))
2266 case nir_var_shader_in
:
2267 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2268 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2269 return load_tess_varyings(ctx
, instr
, true);
2272 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2273 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2274 LLVMValueRef indir_index
;
2275 unsigned const_index
, vertex_index
;
2276 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2277 &const_index
, &indir_index
);
2278 assert(indir_index
== NULL
);
2280 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2281 var
->data
.driver_location
,
2282 var
->data
.location_frac
,
2283 instr
->num_components
, vertex_index
, const_index
, type
);
2286 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2288 unsigned count
= glsl_count_attribute_slots(
2290 ctx
->stage
== MESA_SHADER_VERTEX
);
2292 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2293 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2294 stride
, false, true);
2296 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2300 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2303 case nir_var_function_temp
:
2304 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2306 unsigned count
= glsl_count_attribute_slots(
2309 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2310 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2311 stride
, true, true);
2313 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2317 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2321 case nir_var_shader_out
:
2322 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2323 return load_tess_varyings(ctx
, instr
, false);
2326 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2327 var
->data
.fb_fetch_output
&&
2328 ctx
->abi
->emit_fbfetch
)
2329 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2331 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2333 unsigned count
= glsl_count_attribute_slots(
2336 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2337 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2338 stride
, true, true);
2340 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2344 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2345 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2350 case nir_var_mem_global
: {
2351 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2352 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2353 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2354 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2355 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2356 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, result_type
) / 8;
2357 bool split_loads
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2359 if (stride
!= natural_stride
|| split_loads
) {
2360 if (LLVMGetTypeKind(result_type
) == LLVMVectorTypeKind
)
2361 result_type
= LLVMGetElementType(result_type
);
2363 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2364 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2365 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2367 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2368 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2369 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2370 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2372 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2374 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2375 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2376 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2377 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2382 unreachable("unhandle variable mode");
2384 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2385 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2389 visit_store_var(struct ac_nir_context
*ctx
,
2390 nir_intrinsic_instr
*instr
)
2392 if (ctx
->ac
.postponed_kill
) {
2393 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2394 ctx
->ac
.postponed_kill
, "");
2395 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2398 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2399 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2401 LLVMValueRef temp_ptr
, value
;
2404 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2405 int writemask
= instr
->const_index
[0];
2406 LLVMValueRef indir_index
;
2407 unsigned const_index
;
2410 get_deref_offset(ctx
, deref
, false,
2411 NULL
, NULL
, &const_index
, &indir_index
);
2412 idx
= var
->data
.driver_location
;
2413 comp
= var
->data
.location_frac
;
2415 if (var
->data
.compact
) {
2416 const_index
+= comp
;
2421 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2422 (deref
->mode
== nir_var_shader_out
||
2423 deref
->mode
== nir_var_function_temp
)) {
2425 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2426 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2429 writemask
= widen_mask(writemask
, 2);
2432 writemask
= writemask
<< comp
;
2434 switch (deref
->mode
) {
2435 case nir_var_shader_out
:
2437 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2438 LLVMValueRef vertex_index
= NULL
;
2439 LLVMValueRef indir_index
= NULL
;
2440 unsigned const_index
= 0;
2441 const bool is_patch
= var
->data
.patch
||
2442 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2443 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2445 get_deref_offset(ctx
, deref
, false, NULL
,
2446 is_patch
? NULL
: &vertex_index
,
2447 &const_index
, &indir_index
);
2449 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2450 vertex_index
, indir_index
,
2451 const_index
, src
, writemask
);
2455 for (unsigned chan
= 0; chan
< 8; chan
++) {
2457 if (!(writemask
& (1 << chan
)))
2460 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2462 if (var
->data
.compact
)
2465 unsigned count
= glsl_count_attribute_slots(
2468 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2469 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2470 stride
, true, true);
2472 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2473 value
, indir_index
, "");
2474 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2475 count
, stride
, tmp_vec
);
2478 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2480 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2484 case nir_var_function_temp
:
2485 for (unsigned chan
= 0; chan
< 8; chan
++) {
2486 if (!(writemask
& (1 << chan
)))
2489 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2491 unsigned count
= glsl_count_attribute_slots(
2494 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2495 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2498 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2499 value
, indir_index
, "");
2500 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2503 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2505 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2510 case nir_var_mem_global
: {
2511 int writemask
= instr
->const_index
[0];
2512 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2513 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2515 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2516 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2517 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2518 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(val
)) / 8;
2519 bool split_stores
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2521 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2522 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2523 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2525 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2526 stride
== natural_stride
&& !split_stores
) {
2527 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2528 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2529 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2531 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2532 LLVMGetElementType(LLVMTypeOf(address
)), "");
2533 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2535 LLVMTypeRef val_type
= LLVMTypeOf(val
);
2536 if (LLVMGetTypeKind(LLVMTypeOf(val
)) == LLVMVectorTypeKind
)
2537 val_type
= LLVMGetElementType(val_type
);
2539 LLVMTypeRef ptr_type
= LLVMPointerType(val_type
,
2540 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2541 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2542 for (unsigned chan
= 0; chan
< 4; chan
++) {
2543 if (!(writemask
& (1 << chan
)))
2546 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2548 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2549 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2551 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2552 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2553 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2563 if (ctx
->ac
.postponed_kill
)
2564 ac_build_endif(&ctx
->ac
, 7002);
2567 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2570 case GLSL_SAMPLER_DIM_BUF
:
2572 case GLSL_SAMPLER_DIM_1D
:
2573 return array
? 2 : 1;
2574 case GLSL_SAMPLER_DIM_2D
:
2575 return array
? 3 : 2;
2576 case GLSL_SAMPLER_DIM_MS
:
2577 return array
? 4 : 3;
2578 case GLSL_SAMPLER_DIM_3D
:
2579 case GLSL_SAMPLER_DIM_CUBE
:
2581 case GLSL_SAMPLER_DIM_RECT
:
2582 case GLSL_SAMPLER_DIM_SUBPASS
:
2584 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2592 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2593 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2594 LLVMValueRef coord_z
,
2595 LLVMValueRef sample_index
,
2596 LLVMValueRef fmask_desc_ptr
)
2598 unsigned sample_chan
= coord_z
? 3 : 2;
2599 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2600 addr
[sample_chan
] = sample_index
;
2602 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2603 return addr
[sample_chan
];
2606 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2608 assert(instr
->src
[0].is_ssa
);
2609 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2612 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2613 const nir_intrinsic_instr
*instr
,
2614 LLVMValueRef dynamic_index
,
2615 enum ac_descriptor_type desc_type
,
2618 nir_deref_instr
*deref_instr
=
2619 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2620 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2622 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2625 static void get_image_coords(struct ac_nir_context
*ctx
,
2626 const nir_intrinsic_instr
*instr
,
2627 LLVMValueRef dynamic_desc_index
,
2628 struct ac_image_args
*args
,
2629 enum glsl_sampler_dim dim
,
2632 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2633 LLVMValueRef masks
[] = {
2634 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2635 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2637 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2640 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2641 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2642 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2643 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2644 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2645 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2646 count
= image_type_to_components_count(dim
, is_array
);
2648 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2649 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2650 LLVMValueRef fmask_load_address
[3];
2652 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2653 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2655 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2657 fmask_load_address
[2] = NULL
;
2659 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2660 fmask_load_address
[0],
2661 fmask_load_address
[1],
2662 fmask_load_address
[2],
2664 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2665 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2667 if (count
== 1 && !gfx9_1d
) {
2668 if (instr
->src
[1].ssa
->num_components
)
2669 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2671 args
->coords
[0] = src0
;
2676 for (chan
= 0; chan
< count
; ++chan
) {
2677 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2682 args
->coords
[2] = args
->coords
[1];
2683 args
->coords
[1] = ctx
->ac
.i32_0
;
2685 args
->coords
[1] = ctx
->ac
.i32_0
;
2688 if (ctx
->ac
.chip_class
== GFX9
&&
2689 dim
== GLSL_SAMPLER_DIM_2D
&&
2691 /* The hw can't bind a slice of a 3D image as a 2D
2692 * image, because it ignores BASE_ARRAY if the target
2693 * is 3D. The workaround is to read BASE_ARRAY and set
2694 * it as the 3rd address operand for all 2D images.
2696 LLVMValueRef first_layer
, const5
, mask
;
2698 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2699 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2700 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2701 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2703 args
->coords
[count
] = first_layer
;
2709 args
->coords
[count
] = sample_index
;
2715 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2716 const nir_intrinsic_instr
*instr
,
2717 LLVMValueRef dynamic_index
,
2718 bool write
, bool atomic
)
2720 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2721 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2722 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2723 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2724 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2726 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2727 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2728 elem_count
, stride
, "");
2730 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2731 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2736 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2737 struct waterfall_context
*wctx
,
2738 const nir_intrinsic_instr
*instr
)
2740 nir_deref_instr
*deref_instr
= NULL
;
2742 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2743 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2745 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2746 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2749 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2750 const nir_intrinsic_instr
*instr
,
2755 enum glsl_sampler_dim dim
;
2756 enum gl_access_qualifier access
;
2759 dim
= nir_intrinsic_image_dim(instr
);
2760 access
= nir_intrinsic_access(instr
);
2761 is_array
= nir_intrinsic_image_array(instr
);
2763 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2764 const struct glsl_type
*type
= image_deref
->type
;
2765 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2766 dim
= glsl_get_sampler_dim(type
);
2767 access
= var
->data
.access
;
2768 is_array
= glsl_sampler_type_is_array(type
);
2771 struct waterfall_context wctx
;
2772 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2774 struct ac_image_args args
= {};
2776 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2778 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2779 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2780 unsigned num_channels
= util_last_bit(mask
);
2781 LLVMValueRef rsrc
, vindex
;
2783 rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, false, false);
2784 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2787 assert(instr
->dest
.is_ssa
);
2788 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2789 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2790 ctx
->ac
.i32_0
, num_channels
,
2793 instr
->dest
.ssa
.bit_size
== 16);
2794 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2796 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2797 res
= ac_to_integer(&ctx
->ac
, res
);
2799 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2801 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2802 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
2803 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2804 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2806 args
.lod
= get_src(ctx
, instr
->src
[3]);
2808 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2810 assert(instr
->dest
.is_ssa
);
2811 args
.d16
= instr
->dest
.ssa
.bit_size
== 16;
2813 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2815 return exit_waterfall(ctx
, &wctx
, res
);
2818 static void visit_image_store(struct ac_nir_context
*ctx
,
2819 const nir_intrinsic_instr
*instr
,
2822 if (ctx
->ac
.postponed_kill
) {
2823 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2824 ctx
->ac
.postponed_kill
, "");
2825 ac_build_ifcc(&ctx
->ac
, cond
, 7003);
2828 enum glsl_sampler_dim dim
;
2829 enum gl_access_qualifier access
;
2833 dim
= nir_intrinsic_image_dim(instr
);
2834 access
= nir_intrinsic_access(instr
);
2835 is_array
= nir_intrinsic_image_array(instr
);
2837 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2838 const struct glsl_type
*type
= image_deref
->type
;
2839 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2840 dim
= glsl_get_sampler_dim(type
);
2841 access
= var
->data
.access
;
2842 is_array
= glsl_sampler_type_is_array(type
);
2845 struct waterfall_context wctx
;
2846 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2848 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2849 struct ac_image_args args
= {};
2851 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2853 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2854 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, false);
2855 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2856 unsigned src_channels
= ac_get_llvm_num_components(src
);
2857 LLVMValueRef vindex
;
2859 if (src_channels
== 3)
2860 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2862 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2863 get_src(ctx
, instr
->src
[1]),
2866 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2867 ctx
->ac
.i32_0
, args
.cache_policy
);
2869 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2871 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2872 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2873 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
2874 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2875 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2877 args
.lod
= get_src(ctx
, instr
->src
[4]);
2879 args
.d16
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(args
.data
[0])) == 16;
2881 ac_build_image_opcode(&ctx
->ac
, &args
);
2884 exit_waterfall(ctx
, &wctx
, NULL
);
2885 if (ctx
->ac
.postponed_kill
)
2886 ac_build_endif(&ctx
->ac
, 7003);
2889 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2890 const nir_intrinsic_instr
*instr
,
2893 if (ctx
->ac
.postponed_kill
) {
2894 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2895 ctx
->ac
.postponed_kill
, "");
2896 ac_build_ifcc(&ctx
->ac
, cond
, 7004);
2899 LLVMValueRef params
[7];
2900 int param_count
= 0;
2902 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2903 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2904 const char *atomic_name
;
2905 char intrinsic_name
[64];
2906 enum ac_atomic_op atomic_subop
;
2907 ASSERTED
int length
;
2909 enum glsl_sampler_dim dim
;
2912 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2913 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2914 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2915 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2916 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2917 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2919 dim
= nir_intrinsic_image_dim(instr
);
2920 is_array
= nir_intrinsic_image_array(instr
);
2922 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2923 dim
= glsl_get_sampler_dim(type
);
2924 is_array
= glsl_sampler_type_is_array(type
);
2927 struct waterfall_context wctx
;
2928 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2930 switch (instr
->intrinsic
) {
2931 case nir_intrinsic_bindless_image_atomic_add
:
2932 case nir_intrinsic_image_deref_atomic_add
:
2933 atomic_name
= "add";
2934 atomic_subop
= ac_atomic_add
;
2936 case nir_intrinsic_bindless_image_atomic_imin
:
2937 case nir_intrinsic_image_deref_atomic_imin
:
2938 atomic_name
= "smin";
2939 atomic_subop
= ac_atomic_smin
;
2941 case nir_intrinsic_bindless_image_atomic_umin
:
2942 case nir_intrinsic_image_deref_atomic_umin
:
2943 atomic_name
= "umin";
2944 atomic_subop
= ac_atomic_umin
;
2946 case nir_intrinsic_bindless_image_atomic_imax
:
2947 case nir_intrinsic_image_deref_atomic_imax
:
2948 atomic_name
= "smax";
2949 atomic_subop
= ac_atomic_smax
;
2951 case nir_intrinsic_bindless_image_atomic_umax
:
2952 case nir_intrinsic_image_deref_atomic_umax
:
2953 atomic_name
= "umax";
2954 atomic_subop
= ac_atomic_umax
;
2956 case nir_intrinsic_bindless_image_atomic_and
:
2957 case nir_intrinsic_image_deref_atomic_and
:
2958 atomic_name
= "and";
2959 atomic_subop
= ac_atomic_and
;
2961 case nir_intrinsic_bindless_image_atomic_or
:
2962 case nir_intrinsic_image_deref_atomic_or
:
2964 atomic_subop
= ac_atomic_or
;
2966 case nir_intrinsic_bindless_image_atomic_xor
:
2967 case nir_intrinsic_image_deref_atomic_xor
:
2968 atomic_name
= "xor";
2969 atomic_subop
= ac_atomic_xor
;
2971 case nir_intrinsic_bindless_image_atomic_exchange
:
2972 case nir_intrinsic_image_deref_atomic_exchange
:
2973 atomic_name
= "swap";
2974 atomic_subop
= ac_atomic_swap
;
2976 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2977 case nir_intrinsic_image_deref_atomic_comp_swap
:
2978 atomic_name
= "cmpswap";
2979 atomic_subop
= 0; /* not used */
2981 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2982 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2983 atomic_name
= "inc";
2984 atomic_subop
= ac_atomic_inc_wrap
;
2985 /* ATOMIC_INC instruction does:
2986 * value = (value + 1) % (data + 1)
2988 * value = (value + 1) % data
2989 * So replace 'data' by 'data - 1'.
2991 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2992 LLVMBuildSub(ctx
->ac
.builder
,
2993 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2997 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2998 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2999 atomic_name
= "dec";
3000 atomic_subop
= ac_atomic_dec_wrap
;
3007 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
3008 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
3010 LLVMValueRef result
;
3011 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3012 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, true);
3013 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
3014 ctx
->ac
.i32_0
, ""); /* vindex */
3015 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
3016 if (LLVM_VERSION_MAJOR
>= 9) {
3017 /* XXX: The new raw/struct atomic intrinsics are buggy
3018 * with LLVM 8, see r358579.
3020 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
3021 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
3023 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3024 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
3026 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3028 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3029 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
3032 assert(length
< sizeof(intrinsic_name
));
3033 result
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
3034 params
, param_count
, 0);
3036 struct ac_image_args args
= {};
3037 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
3038 args
.atomic
= atomic_subop
;
3039 args
.data
[0] = params
[0];
3041 args
.data
[1] = params
[1];
3042 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
3043 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
3044 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3046 result
= ac_build_image_opcode(&ctx
->ac
, &args
);
3049 result
= exit_waterfall(ctx
, &wctx
, result
);
3050 if (ctx
->ac
.postponed_kill
)
3051 ac_build_endif(&ctx
->ac
, 7004);
3055 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3056 nir_intrinsic_instr
*instr
)
3058 struct waterfall_context wctx
;
3059 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3060 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3062 LLVMValueRef ret
= ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
3064 return exit_waterfall(ctx
, &wctx
, ret
);
3067 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3068 const nir_intrinsic_instr
*instr
,
3073 enum glsl_sampler_dim dim
;
3076 dim
= nir_intrinsic_image_dim(instr
);
3077 is_array
= nir_intrinsic_image_array(instr
);
3079 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
3080 dim
= glsl_get_sampler_dim(type
);
3081 is_array
= glsl_sampler_type_is_array(type
);
3084 struct waterfall_context wctx
;
3085 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3087 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3088 res
= get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, false), true);
3091 struct ac_image_args args
= { 0 };
3093 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3095 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3096 args
.opcode
= ac_image_get_resinfo
;
3097 args
.lod
= ctx
->ac
.i32_0
;
3098 args
.attributes
= AC_FUNC_ATTR_READNONE
;
3100 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3102 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3104 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
3105 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3106 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3107 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3108 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3111 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
3112 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3113 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3117 return exit_waterfall(ctx
, &wctx
, res
);
3120 static void emit_membar(struct ac_llvm_context
*ac
,
3121 const nir_intrinsic_instr
*instr
)
3123 unsigned wait_flags
= 0;
3125 switch (instr
->intrinsic
) {
3126 case nir_intrinsic_memory_barrier
:
3127 case nir_intrinsic_group_memory_barrier
:
3128 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3130 case nir_intrinsic_memory_barrier_buffer
:
3131 case nir_intrinsic_memory_barrier_image
:
3132 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3134 case nir_intrinsic_memory_barrier_shared
:
3135 wait_flags
= AC_WAIT_LGKM
;
3141 ac_build_waitcnt(ac
, wait_flags
);
3144 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3146 /* GFX6 only (thanks to a hw bug workaround):
3147 * The real barrier instruction isn’t needed, because an entire patch
3148 * always fits into a single wave.
3150 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
3151 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
3154 ac_build_s_barrier(ac
);
3157 static void emit_discard(struct ac_nir_context
*ctx
,
3158 const nir_intrinsic_instr
*instr
)
3162 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3163 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3164 get_src(ctx
, instr
->src
[0]),
3167 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3168 cond
= ctx
->ac
.i1false
;
3171 ac_build_kill_if_false(&ctx
->ac
, cond
);
3174 static void emit_demote(struct ac_nir_context
*ctx
,
3175 const nir_intrinsic_instr
*instr
)
3179 if (instr
->intrinsic
== nir_intrinsic_demote_if
) {
3180 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3181 get_src(ctx
, instr
->src
[0]),
3184 assert(instr
->intrinsic
== nir_intrinsic_demote
);
3185 cond
= ctx
->ac
.i1false
;
3188 /* Kill immediately while maintaining WQM. */
3189 ac_build_kill_if_false(&ctx
->ac
, ac_build_wqm_vote(&ctx
->ac
, cond
));
3191 LLVMValueRef mask
= LLVMBuildLoad(ctx
->ac
.builder
, ctx
->ac
.postponed_kill
, "");
3192 mask
= LLVMBuildAnd(ctx
->ac
.builder
, mask
, cond
, "");
3193 LLVMBuildStore(ctx
->ac
.builder
, mask
, ctx
->ac
.postponed_kill
);
3198 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3200 LLVMValueRef result
;
3201 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3202 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3203 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3204 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3206 if (ctx
->ac
.wave_size
== 32)
3207 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
3208 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
3210 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3214 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3216 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3217 LLVMValueRef result
;
3218 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3219 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3220 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3221 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3223 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3228 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3230 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3231 return LLVMBuildAnd(ctx
->ac
.builder
,
3232 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3233 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3235 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3240 visit_first_invocation(struct ac_nir_context
*ctx
)
3242 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3243 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3245 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3246 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
3247 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
3248 ctx
->ac
.iN_wavemask
, args
, 2,
3249 AC_FUNC_ATTR_NOUNWIND
|
3250 AC_FUNC_ATTR_READNONE
);
3252 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3256 visit_load_shared(struct ac_nir_context
*ctx
,
3257 const nir_intrinsic_instr
*instr
)
3259 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3261 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3262 instr
->dest
.ssa
.bit_size
);
3264 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3265 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3266 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3267 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3270 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3271 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3275 visit_store_shared(struct ac_nir_context
*ctx
,
3276 const nir_intrinsic_instr
*instr
)
3278 LLVMValueRef derived_ptr
, data
,index
;
3279 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3281 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3282 instr
->src
[0].ssa
->bit_size
);
3283 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3285 int writemask
= nir_intrinsic_write_mask(instr
);
3286 for (int chan
= 0; chan
< 4; chan
++) {
3287 if (!(writemask
& (1 << chan
))) {
3290 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3291 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3292 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3293 LLVMBuildStore(builder
, data
, derived_ptr
);
3297 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3298 const nir_intrinsic_instr
*instr
,
3299 LLVMValueRef ptr
, int src_idx
)
3301 if (ctx
->ac
.postponed_kill
) {
3302 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
3303 ctx
->ac
.postponed_kill
, "");
3304 ac_build_ifcc(&ctx
->ac
, cond
, 7005);
3307 LLVMValueRef result
;
3308 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3310 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3312 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3313 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3314 if (deref
->mode
== nir_var_mem_global
) {
3315 /* use "singlethread" sync scope to implement relaxed ordering */
3316 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3318 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3319 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3323 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3324 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3325 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3326 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3327 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3329 LLVMAtomicRMWBinOp op
;
3330 switch (instr
->intrinsic
) {
3331 case nir_intrinsic_shared_atomic_add
:
3332 case nir_intrinsic_deref_atomic_add
:
3333 op
= LLVMAtomicRMWBinOpAdd
;
3335 case nir_intrinsic_shared_atomic_umin
:
3336 case nir_intrinsic_deref_atomic_umin
:
3337 op
= LLVMAtomicRMWBinOpUMin
;
3339 case nir_intrinsic_shared_atomic_umax
:
3340 case nir_intrinsic_deref_atomic_umax
:
3341 op
= LLVMAtomicRMWBinOpUMax
;
3343 case nir_intrinsic_shared_atomic_imin
:
3344 case nir_intrinsic_deref_atomic_imin
:
3345 op
= LLVMAtomicRMWBinOpMin
;
3347 case nir_intrinsic_shared_atomic_imax
:
3348 case nir_intrinsic_deref_atomic_imax
:
3349 op
= LLVMAtomicRMWBinOpMax
;
3351 case nir_intrinsic_shared_atomic_and
:
3352 case nir_intrinsic_deref_atomic_and
:
3353 op
= LLVMAtomicRMWBinOpAnd
;
3355 case nir_intrinsic_shared_atomic_or
:
3356 case nir_intrinsic_deref_atomic_or
:
3357 op
= LLVMAtomicRMWBinOpOr
;
3359 case nir_intrinsic_shared_atomic_xor
:
3360 case nir_intrinsic_deref_atomic_xor
:
3361 op
= LLVMAtomicRMWBinOpXor
;
3363 case nir_intrinsic_shared_atomic_exchange
:
3364 case nir_intrinsic_deref_atomic_exchange
:
3365 op
= LLVMAtomicRMWBinOpXchg
;
3371 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3374 if (ctx
->ac
.postponed_kill
)
3375 ac_build_endif(&ctx
->ac
, 7005);
3379 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3381 LLVMValueRef values
[2];
3382 LLVMValueRef pos
[2];
3384 pos
[0] = ac_to_float(&ctx
->ac
,
3385 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3386 pos
[1] = ac_to_float(&ctx
->ac
,
3387 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3389 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3390 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3391 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3394 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3395 enum glsl_interp_mode interp
, unsigned location
)
3398 case INTERP_MODE_FLAT
:
3401 case INTERP_MODE_SMOOTH
:
3402 case INTERP_MODE_NONE
:
3403 if (location
== INTERP_CENTER
)
3404 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3405 else if (location
== INTERP_CENTROID
)
3406 return ctx
->abi
->persp_centroid
;
3407 else if (location
== INTERP_SAMPLE
)
3408 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3410 case INTERP_MODE_NOPERSPECTIVE
:
3411 if (location
== INTERP_CENTER
)
3412 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3413 else if (location
== INTERP_CENTROID
)
3414 return ctx
->abi
->linear_centroid
;
3415 else if (location
== INTERP_SAMPLE
)
3416 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3422 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3425 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3426 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3429 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3431 LLVMValueRef offset
)
3433 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3434 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3435 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3437 LLVMValueRef ij_out
[2];
3438 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3441 * take the I then J parameters, and the DDX/Y for it, and
3442 * calculate the IJ inputs for the interpolator.
3443 * temp1 = ddx * offset/sample.x + I;
3444 * interp_param.I = ddy * offset/sample.y + temp1;
3445 * temp1 = ddx * offset/sample.x + J;
3446 * interp_param.J = ddy * offset/sample.y + temp1;
3448 for (unsigned i
= 0; i
< 2; i
++) {
3449 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3450 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3451 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3452 ddxy_out
, ix_ll
, "");
3453 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3454 ddxy_out
, iy_ll
, "");
3455 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3456 interp_param
, ix_ll
, "");
3457 LLVMValueRef temp1
, temp2
;
3459 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3462 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3463 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3465 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3466 temp2
, ctx
->ac
.i32
, "");
3468 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3469 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3472 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3475 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3476 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3479 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3481 LLVMValueRef sample_id
)
3483 if (ctx
->abi
->interp_at_sample_force_center
)
3484 return barycentric_center(ctx
, mode
);
3486 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3488 /* fetch sample ID */
3489 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3491 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3492 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3493 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3494 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3495 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3496 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3498 return barycentric_offset(ctx
, mode
, offset
);
3502 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3505 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3506 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3509 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3511 return LLVMBuildBitCast(ctx
->ac
.builder
,
3512 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3516 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3517 LLVMValueRef interp_param
,
3518 unsigned index
, unsigned comp_start
,
3519 unsigned num_components
,
3522 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3523 LLVMValueRef interp_param_f
;
3525 interp_param_f
= LLVMBuildBitCast(ctx
->ac
.builder
,
3526 interp_param
, ctx
->ac
.v2f32
, "");
3527 LLVMValueRef i
= LLVMBuildExtractElement(
3528 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_0
, "");
3529 LLVMValueRef j
= LLVMBuildExtractElement(
3530 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_1
, "");
3532 /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
3533 if (ctx
->verified_interp
&&
3534 !_mesa_hash_table_search(ctx
->verified_interp
, interp_param
)) {
3535 LLVMValueRef args
[2];
3537 args
[1] = LLVMConstInt(ctx
->ac
.i32
, S_NAN
| Q_NAN
| N_INFINITY
| P_INFINITY
, false);
3538 LLVMValueRef cond
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.class.f32", ctx
->ac
.i1
,
3539 args
, 2, AC_FUNC_ATTR_READNONE
);
3540 ac_build_kill_if_false(&ctx
->ac
, LLVMBuildNot(ctx
->ac
.builder
, cond
, ""));
3541 _mesa_hash_table_insert(ctx
->verified_interp
, interp_param
, interp_param
);
3544 LLVMValueRef values
[4];
3545 assert(bitsize
== 16 || bitsize
== 32);
3546 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3547 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3548 if (bitsize
== 16) {
3549 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3550 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3552 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3553 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3557 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3560 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3561 nir_intrinsic_instr
*instr
)
3563 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3565 /* We only lower inputs for fragment shaders ATM */
3566 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3568 assert(offset
[0].i32
== 0);
3570 unsigned component
= nir_intrinsic_component(instr
);
3571 unsigned index
= nir_intrinsic_base(instr
);
3572 unsigned vertex_id
= 2; /* P0 */
3574 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3575 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3577 switch (src0
[0].i32
) {
3588 unreachable("Invalid vertex index");
3592 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3593 LLVMValueRef values
[8];
3595 /* Each component of a 64-bit value takes up two GL-level channels. */
3596 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3597 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3599 bit_size
== 64 ? num_components
* 2 : num_components
;
3601 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3602 if (component
+ chan
> 4)
3603 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3604 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3605 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3606 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3609 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3610 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3611 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3612 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3615 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3616 if (bit_size
== 64) {
3617 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3618 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3619 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3624 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3625 nir_intrinsic_instr
*instr
)
3627 LLVMValueRef result
= NULL
;
3629 switch (instr
->intrinsic
) {
3630 case nir_intrinsic_ballot
:
3631 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3632 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3633 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3635 case nir_intrinsic_read_invocation
:
3636 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3637 get_src(ctx
, instr
->src
[1]));
3639 case nir_intrinsic_read_first_invocation
:
3640 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3642 case nir_intrinsic_load_subgroup_invocation
:
3643 result
= ac_get_thread_id(&ctx
->ac
);
3645 case nir_intrinsic_load_work_group_id
: {
3646 LLVMValueRef values
[3];
3648 for (int i
= 0; i
< 3; i
++) {
3649 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3650 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3653 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3656 case nir_intrinsic_load_base_vertex
:
3657 case nir_intrinsic_load_first_vertex
:
3658 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3660 case nir_intrinsic_load_local_group_size
:
3661 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3663 case nir_intrinsic_load_vertex_id
:
3664 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3665 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3666 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3668 case nir_intrinsic_load_vertex_id_zero_base
: {
3669 result
= ctx
->abi
->vertex_id
;
3672 case nir_intrinsic_load_local_invocation_id
: {
3673 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3676 case nir_intrinsic_load_base_instance
:
3677 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3679 case nir_intrinsic_load_draw_id
:
3680 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3682 case nir_intrinsic_load_view_index
:
3683 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3685 case nir_intrinsic_load_invocation_id
:
3686 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3687 result
= ac_unpack_param(&ctx
->ac
,
3688 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3691 if (ctx
->ac
.chip_class
>= GFX10
) {
3692 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3693 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3694 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3696 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3700 case nir_intrinsic_load_primitive_id
:
3701 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3702 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3703 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3704 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3705 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3706 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3708 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3710 case nir_intrinsic_load_sample_id
:
3711 result
= ac_unpack_param(&ctx
->ac
,
3712 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3715 case nir_intrinsic_load_sample_pos
:
3716 result
= load_sample_pos(ctx
);
3718 case nir_intrinsic_load_sample_mask_in
:
3719 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3721 case nir_intrinsic_load_frag_coord
: {
3722 LLVMValueRef values
[4] = {
3723 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3724 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3725 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3726 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3727 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3729 result
= ac_to_integer(&ctx
->ac
,
3730 ac_build_gather_values(&ctx
->ac
, values
, 4));
3733 case nir_intrinsic_load_layer_id
:
3734 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3736 case nir_intrinsic_load_front_face
:
3737 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3739 case nir_intrinsic_load_helper_invocation
:
3740 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3742 case nir_intrinsic_is_helper_invocation
:
3743 result
= ac_build_is_helper_invocation(&ctx
->ac
);
3745 case nir_intrinsic_load_color0
:
3746 result
= ctx
->abi
->color0
;
3748 case nir_intrinsic_load_color1
:
3749 result
= ctx
->abi
->color1
;
3751 case nir_intrinsic_load_user_data_amd
:
3752 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3753 result
= ctx
->abi
->user_data
;
3755 case nir_intrinsic_load_instance_id
:
3756 result
= ctx
->abi
->instance_id
;
3758 case nir_intrinsic_load_num_work_groups
:
3759 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3761 case nir_intrinsic_load_local_invocation_index
:
3762 result
= visit_load_local_invocation_index(ctx
);
3764 case nir_intrinsic_load_subgroup_id
:
3765 result
= visit_load_subgroup_id(ctx
);
3767 case nir_intrinsic_load_num_subgroups
:
3768 result
= visit_load_num_subgroups(ctx
);
3770 case nir_intrinsic_first_invocation
:
3771 result
= visit_first_invocation(ctx
);
3773 case nir_intrinsic_load_push_constant
:
3774 result
= visit_load_push_constant(ctx
, instr
);
3776 case nir_intrinsic_vulkan_resource_index
: {
3777 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3778 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3779 unsigned binding
= nir_intrinsic_binding(instr
);
3781 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3785 case nir_intrinsic_vulkan_resource_reindex
:
3786 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3788 case nir_intrinsic_store_ssbo
:
3789 visit_store_ssbo(ctx
, instr
);
3791 case nir_intrinsic_load_ssbo
:
3792 result
= visit_load_buffer(ctx
, instr
);
3794 case nir_intrinsic_ssbo_atomic_add
:
3795 case nir_intrinsic_ssbo_atomic_imin
:
3796 case nir_intrinsic_ssbo_atomic_umin
:
3797 case nir_intrinsic_ssbo_atomic_imax
:
3798 case nir_intrinsic_ssbo_atomic_umax
:
3799 case nir_intrinsic_ssbo_atomic_and
:
3800 case nir_intrinsic_ssbo_atomic_or
:
3801 case nir_intrinsic_ssbo_atomic_xor
:
3802 case nir_intrinsic_ssbo_atomic_exchange
:
3803 case nir_intrinsic_ssbo_atomic_comp_swap
:
3804 result
= visit_atomic_ssbo(ctx
, instr
);
3806 case nir_intrinsic_load_ubo
:
3807 result
= visit_load_ubo_buffer(ctx
, instr
);
3809 case nir_intrinsic_get_buffer_size
:
3810 result
= visit_get_buffer_size(ctx
, instr
);
3812 case nir_intrinsic_load_deref
:
3813 result
= visit_load_var(ctx
, instr
);
3815 case nir_intrinsic_store_deref
:
3816 visit_store_var(ctx
, instr
);
3818 case nir_intrinsic_load_shared
:
3819 result
= visit_load_shared(ctx
, instr
);
3821 case nir_intrinsic_store_shared
:
3822 visit_store_shared(ctx
, instr
);
3824 case nir_intrinsic_bindless_image_samples
:
3825 case nir_intrinsic_image_deref_samples
:
3826 result
= visit_image_samples(ctx
, instr
);
3828 case nir_intrinsic_bindless_image_load
:
3829 result
= visit_image_load(ctx
, instr
, true);
3831 case nir_intrinsic_image_deref_load
:
3832 result
= visit_image_load(ctx
, instr
, false);
3834 case nir_intrinsic_bindless_image_store
:
3835 visit_image_store(ctx
, instr
, true);
3837 case nir_intrinsic_image_deref_store
:
3838 visit_image_store(ctx
, instr
, false);
3840 case nir_intrinsic_bindless_image_atomic_add
:
3841 case nir_intrinsic_bindless_image_atomic_imin
:
3842 case nir_intrinsic_bindless_image_atomic_umin
:
3843 case nir_intrinsic_bindless_image_atomic_imax
:
3844 case nir_intrinsic_bindless_image_atomic_umax
:
3845 case nir_intrinsic_bindless_image_atomic_and
:
3846 case nir_intrinsic_bindless_image_atomic_or
:
3847 case nir_intrinsic_bindless_image_atomic_xor
:
3848 case nir_intrinsic_bindless_image_atomic_exchange
:
3849 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3850 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3851 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3852 result
= visit_image_atomic(ctx
, instr
, true);
3854 case nir_intrinsic_image_deref_atomic_add
:
3855 case nir_intrinsic_image_deref_atomic_imin
:
3856 case nir_intrinsic_image_deref_atomic_umin
:
3857 case nir_intrinsic_image_deref_atomic_imax
:
3858 case nir_intrinsic_image_deref_atomic_umax
:
3859 case nir_intrinsic_image_deref_atomic_and
:
3860 case nir_intrinsic_image_deref_atomic_or
:
3861 case nir_intrinsic_image_deref_atomic_xor
:
3862 case nir_intrinsic_image_deref_atomic_exchange
:
3863 case nir_intrinsic_image_deref_atomic_comp_swap
:
3864 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3865 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3866 result
= visit_image_atomic(ctx
, instr
, false);
3868 case nir_intrinsic_bindless_image_size
:
3869 result
= visit_image_size(ctx
, instr
, true);
3871 case nir_intrinsic_image_deref_size
:
3872 result
= visit_image_size(ctx
, instr
, false);
3874 case nir_intrinsic_shader_clock
:
3875 result
= ac_build_shader_clock(&ctx
->ac
,
3876 nir_intrinsic_memory_scope(instr
));
3878 case nir_intrinsic_discard
:
3879 case nir_intrinsic_discard_if
:
3880 emit_discard(ctx
, instr
);
3882 case nir_intrinsic_demote
:
3883 case nir_intrinsic_demote_if
:
3884 emit_demote(ctx
, instr
);
3886 case nir_intrinsic_memory_barrier
:
3887 case nir_intrinsic_group_memory_barrier
:
3888 case nir_intrinsic_memory_barrier_buffer
:
3889 case nir_intrinsic_memory_barrier_image
:
3890 case nir_intrinsic_memory_barrier_shared
:
3891 emit_membar(&ctx
->ac
, instr
);
3893 case nir_intrinsic_memory_barrier_tcs_patch
:
3895 case nir_intrinsic_control_barrier
:
3896 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3898 case nir_intrinsic_shared_atomic_add
:
3899 case nir_intrinsic_shared_atomic_imin
:
3900 case nir_intrinsic_shared_atomic_umin
:
3901 case nir_intrinsic_shared_atomic_imax
:
3902 case nir_intrinsic_shared_atomic_umax
:
3903 case nir_intrinsic_shared_atomic_and
:
3904 case nir_intrinsic_shared_atomic_or
:
3905 case nir_intrinsic_shared_atomic_xor
:
3906 case nir_intrinsic_shared_atomic_exchange
:
3907 case nir_intrinsic_shared_atomic_comp_swap
: {
3908 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3909 instr
->src
[1].ssa
->bit_size
);
3910 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3913 case nir_intrinsic_deref_atomic_add
:
3914 case nir_intrinsic_deref_atomic_imin
:
3915 case nir_intrinsic_deref_atomic_umin
:
3916 case nir_intrinsic_deref_atomic_imax
:
3917 case nir_intrinsic_deref_atomic_umax
:
3918 case nir_intrinsic_deref_atomic_and
:
3919 case nir_intrinsic_deref_atomic_or
:
3920 case nir_intrinsic_deref_atomic_xor
:
3921 case nir_intrinsic_deref_atomic_exchange
:
3922 case nir_intrinsic_deref_atomic_comp_swap
: {
3923 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3924 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3927 case nir_intrinsic_load_barycentric_pixel
:
3928 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3930 case nir_intrinsic_load_barycentric_centroid
:
3931 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3933 case nir_intrinsic_load_barycentric_sample
:
3934 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3936 case nir_intrinsic_load_barycentric_model
:
3937 result
= barycentric_model(ctx
);
3939 case nir_intrinsic_load_barycentric_at_offset
: {
3940 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3941 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3944 case nir_intrinsic_load_barycentric_at_sample
: {
3945 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3946 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3949 case nir_intrinsic_load_interpolated_input
: {
3950 /* We assume any indirect loads have been lowered away */
3951 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3953 assert(offset
[0].i32
== 0);
3955 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3956 unsigned index
= nir_intrinsic_base(instr
);
3957 unsigned component
= nir_intrinsic_component(instr
);
3958 result
= load_interpolated_input(ctx
, interp_param
, index
,
3960 instr
->dest
.ssa
.num_components
,
3961 instr
->dest
.ssa
.bit_size
);
3964 case nir_intrinsic_load_input
:
3965 case nir_intrinsic_load_input_vertex
:
3966 result
= load_input(ctx
, instr
);
3968 case nir_intrinsic_emit_vertex
:
3969 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3971 case nir_intrinsic_emit_vertex_with_counter
: {
3972 unsigned stream
= nir_intrinsic_stream_id(instr
);
3973 LLVMValueRef next_vertex
= get_src(ctx
, instr
->src
[0]);
3974 ctx
->abi
->emit_vertex_with_counter(ctx
->abi
, stream
,
3979 case nir_intrinsic_end_primitive
:
3980 case nir_intrinsic_end_primitive_with_counter
:
3981 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3983 case nir_intrinsic_load_tess_coord
:
3984 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3986 case nir_intrinsic_load_tess_level_outer
:
3987 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3989 case nir_intrinsic_load_tess_level_inner
:
3990 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3992 case nir_intrinsic_load_tess_level_outer_default
:
3993 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3995 case nir_intrinsic_load_tess_level_inner_default
:
3996 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3998 case nir_intrinsic_load_patch_vertices_in
:
3999 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
4001 case nir_intrinsic_vote_all
: {
4002 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4003 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4006 case nir_intrinsic_vote_any
: {
4007 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4008 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4011 case nir_intrinsic_shuffle
:
4012 if (ctx
->ac
.chip_class
== GFX8
||
4013 ctx
->ac
.chip_class
== GFX9
||
4014 (ctx
->ac
.chip_class
== GFX10
&& ctx
->ac
.wave_size
== 32)) {
4015 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4016 get_src(ctx
, instr
->src
[1]));
4018 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4019 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
4020 LLVMTypeRef type
= LLVMTypeOf(src
);
4021 struct waterfall_context wctx
;
4022 LLVMValueRef index_val
;
4024 index_val
= enter_waterfall(ctx
, &wctx
, index
, true);
4026 src
= LLVMBuildZExt(ctx
->ac
.builder
, src
,
4029 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.readlane",
4031 (LLVMValueRef
[]) { src
, index_val
}, 2,
4032 AC_FUNC_ATTR_READNONE
|
4033 AC_FUNC_ATTR_CONVERGENT
);
4035 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, type
, "");
4037 result
= exit_waterfall(ctx
, &wctx
, result
);
4040 case nir_intrinsic_reduce
:
4041 result
= ac_build_reduce(&ctx
->ac
,
4042 get_src(ctx
, instr
->src
[0]),
4043 instr
->const_index
[0],
4044 instr
->const_index
[1]);
4046 case nir_intrinsic_inclusive_scan
:
4047 result
= ac_build_inclusive_scan(&ctx
->ac
,
4048 get_src(ctx
, instr
->src
[0]),
4049 instr
->const_index
[0]);
4051 case nir_intrinsic_exclusive_scan
:
4052 result
= ac_build_exclusive_scan(&ctx
->ac
,
4053 get_src(ctx
, instr
->src
[0]),
4054 instr
->const_index
[0]);
4056 case nir_intrinsic_quad_broadcast
: {
4057 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
4058 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4059 lane
, lane
, lane
, lane
);
4062 case nir_intrinsic_quad_swap_horizontal
:
4063 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
4065 case nir_intrinsic_quad_swap_vertical
:
4066 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
4068 case nir_intrinsic_quad_swap_diagonal
:
4069 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
4071 case nir_intrinsic_quad_swizzle_amd
: {
4072 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4073 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4074 mask
& 0x3, (mask
>> 2) & 0x3,
4075 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
4078 case nir_intrinsic_masked_swizzle_amd
: {
4079 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4080 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
4083 case nir_intrinsic_write_invocation_amd
:
4084 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4085 get_src(ctx
, instr
->src
[1]),
4086 get_src(ctx
, instr
->src
[2]));
4088 case nir_intrinsic_mbcnt_amd
:
4089 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4091 case nir_intrinsic_load_scratch
: {
4092 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4093 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4095 LLVMTypeRef comp_type
=
4096 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4097 LLVMTypeRef vec_type
=
4098 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4099 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4100 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4101 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4102 LLVMPointerType(vec_type
, addr_space
), "");
4103 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4106 case nir_intrinsic_store_scratch
: {
4107 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
4108 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4110 LLVMTypeRef comp_type
=
4111 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
4112 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4113 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4114 LLVMPointerType(comp_type
, addr_space
), "");
4115 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4116 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
4119 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
4121 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
4122 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
4123 LLVMTypeRef vec_type
=
4124 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
4125 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
4127 LLVMPointerType(vec_type
, addr_space
),
4129 LLVMValueRef offset_src
=
4130 ac_extract_components(&ctx
->ac
, src
, start
, count
);
4131 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
4135 case nir_intrinsic_load_constant
: {
4136 unsigned base
= nir_intrinsic_base(instr
);
4137 unsigned range
= nir_intrinsic_range(instr
);
4139 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4140 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
4141 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
4143 /* Clamp the offset to avoid out-of-bound access because global
4144 * instructions can't handle them.
4146 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
4147 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
4149 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
4151 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
4153 LLVMTypeRef comp_type
=
4154 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4155 LLVMTypeRef vec_type
=
4156 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4157 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4158 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4159 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4160 LLVMPointerType(vec_type
, addr_space
), "");
4161 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4165 fprintf(stderr
, "Unknown intrinsic: ");
4166 nir_print_instr(&instr
->instr
, stderr
);
4167 fprintf(stderr
, "\n");
4171 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4175 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
4176 unsigned base_index
,
4177 unsigned constant_index
,
4178 LLVMValueRef dynamic_index
)
4180 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
4181 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
4182 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
4184 /* Bindless uniforms are 64bit so multiple index by 8 */
4185 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
4186 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
4188 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
4190 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
4191 NULL
, 0, 0, true, true);
4193 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
4196 struct sampler_desc_address
{
4197 unsigned descriptor_set
;
4198 unsigned base_index
; /* binding in vulkan */
4199 unsigned constant_index
;
4200 LLVMValueRef dynamic_index
;
4205 static struct sampler_desc_address
4206 get_sampler_desc_internal(struct ac_nir_context
*ctx
,
4207 nir_deref_instr
*deref_instr
,
4208 const nir_instr
*instr
,
4211 LLVMValueRef index
= NULL
;
4212 unsigned constant_index
= 0;
4213 unsigned descriptor_set
;
4214 unsigned base_index
;
4215 bool bindless
= false;
4220 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
4223 index
= get_src(ctx
, img_instr
->src
[0]);
4225 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
4226 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
4227 nir_tex_src_sampler_handle
);
4228 if (sampSrcIdx
!= -1) {
4231 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
4233 assert(tex_instr
&& !image
);
4234 base_index
= tex_instr
->sampler_index
;
4238 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
4239 if (deref_instr
->deref_type
== nir_deref_type_array
) {
4240 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
4244 if (nir_src_is_const(deref_instr
->arr
.index
)) {
4245 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
4247 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
4249 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4250 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4255 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4258 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4259 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
4260 unsigned sidx
= deref_instr
->strct
.index
;
4261 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4262 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
4264 unreachable("Unsupported deref type");
4267 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
4269 if (deref_instr
->var
->data
.bindless
) {
4270 /* For now just assert on unhandled variable types */
4271 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
4273 base_index
= deref_instr
->var
->data
.driver_location
;
4276 index
= index
? index
: ctx
->ac
.i32_0
;
4277 index
= get_bindless_index_from_uniform(ctx
, base_index
,
4278 constant_index
, index
);
4280 base_index
= deref_instr
->var
->data
.binding
;
4282 return (struct sampler_desc_address
) {
4283 .descriptor_set
= descriptor_set
,
4284 .base_index
= base_index
,
4285 .constant_index
= constant_index
,
4286 .dynamic_index
= index
,
4288 .bindless
= bindless
,
4292 /* Extract any possibly divergent index into a separate value that can be fed
4293 * into get_sampler_desc with the same arguments. */
4294 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
4295 nir_deref_instr
*deref_instr
,
4296 const nir_instr
*instr
,
4299 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4300 return addr
.dynamic_index
;
4303 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4304 nir_deref_instr
*deref_instr
,
4305 enum ac_descriptor_type desc_type
,
4306 const nir_instr
*instr
,
4308 bool image
, bool write
)
4310 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4311 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4312 addr
.descriptor_set
,
4314 addr
.constant_index
, index
,
4315 desc_type
, addr
.image
, write
, addr
.bindless
);
4318 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4321 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4322 * filtering manually. The driver sets img7 to a mask clearing
4323 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4324 * s_and_b32 samp0, samp0, img7
4327 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4329 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4330 LLVMValueRef res
, LLVMValueRef samp
)
4332 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4333 LLVMValueRef img7
, samp0
;
4335 if (ctx
->ac
.chip_class
>= GFX8
)
4338 img7
= LLVMBuildExtractElement(builder
, res
,
4339 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4340 samp0
= LLVMBuildExtractElement(builder
, samp
,
4341 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4342 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4343 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4344 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4347 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4348 nir_tex_instr
*instr
,
4349 struct waterfall_context
*wctx
,
4350 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4351 LLVMValueRef
*fmask_ptr
)
4353 nir_deref_instr
*texture_deref_instr
= NULL
;
4354 nir_deref_instr
*sampler_deref_instr
= NULL
;
4357 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4358 switch (instr
->src
[i
].src_type
) {
4359 case nir_tex_src_texture_deref
:
4360 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4362 case nir_tex_src_sampler_deref
:
4363 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4365 case nir_tex_src_plane
:
4366 plane
= nir_src_as_int(instr
->src
[i
].src
);
4373 LLVMValueRef texture_dynamic_index
= get_sampler_desc_index(ctx
, texture_deref_instr
,
4374 &instr
->instr
, false);
4375 if (!sampler_deref_instr
)
4376 sampler_deref_instr
= texture_deref_instr
;
4378 LLVMValueRef sampler_dynamic_index
= get_sampler_desc_index(ctx
, sampler_deref_instr
,
4379 &instr
->instr
, false);
4380 if (instr
->texture_non_uniform
)
4381 texture_dynamic_index
= enter_waterfall(ctx
, wctx
+ 0, texture_dynamic_index
, true);
4383 if (instr
->sampler_non_uniform
)
4384 sampler_dynamic_index
= enter_waterfall(ctx
, wctx
+ 1, sampler_dynamic_index
, true);
4386 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4389 assert(instr
->op
!= nir_texop_txf_ms
&&
4390 instr
->op
!= nir_texop_samples_identical
);
4391 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4393 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4396 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4397 /* The fragment mask is fetched from the compressed
4398 * multisampled surface.
4400 main_descriptor
= AC_DESC_FMASK
;
4403 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
,
4404 texture_dynamic_index
, false, false);
4407 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
,
4408 sampler_dynamic_index
, false, false);
4409 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4410 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4412 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4413 instr
->op
== nir_texop_samples_identical
))
4414 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
,
4415 &instr
->instr
, texture_dynamic_index
, false, false);
4418 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4421 coord
= ac_to_float(ctx
, coord
);
4422 coord
= ac_build_round(ctx
, coord
);
4423 coord
= ac_to_integer(ctx
, coord
);
4427 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4429 LLVMValueRef result
= NULL
;
4430 struct ac_image_args args
= { 0 };
4431 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4432 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4433 unsigned offset_src
= 0;
4434 struct waterfall_context wctx
[2] = {{{0}}};
4436 tex_fetch_ptrs(ctx
, instr
, wctx
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4438 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4439 switch (instr
->src
[i
].src_type
) {
4440 case nir_tex_src_coord
: {
4441 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4442 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4443 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4446 case nir_tex_src_projector
:
4448 case nir_tex_src_comparator
:
4449 if (instr
->is_shadow
) {
4450 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4451 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4454 case nir_tex_src_offset
:
4455 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4458 case nir_tex_src_bias
:
4459 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4461 case nir_tex_src_lod
: {
4462 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4463 args
.level_zero
= true;
4465 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4468 case nir_tex_src_ms_index
:
4469 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4471 case nir_tex_src_ms_mcs
:
4473 case nir_tex_src_ddx
:
4474 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4476 case nir_tex_src_ddy
:
4477 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4479 case nir_tex_src_min_lod
:
4480 args
.min_lod
= get_src(ctx
, instr
->src
[i
].src
);
4482 case nir_tex_src_texture_offset
:
4483 case nir_tex_src_sampler_offset
:
4484 case nir_tex_src_plane
:
4490 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4491 result
= get_buffer_size(ctx
, args
.resource
, true);
4495 if (instr
->op
== nir_texop_texture_samples
) {
4496 LLVMValueRef res
, samples
, is_msaa
;
4497 LLVMValueRef default_sample
;
4499 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4500 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4501 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4502 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4503 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4504 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4505 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4506 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4507 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4509 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4510 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4511 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4512 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4513 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4516 if (ctx
->abi
->robust_buffer_access
) {
4517 LLVMValueRef dword1
, is_null_descriptor
;
4519 /* Extract the second dword of the descriptor, if it's
4520 * all zero, then it's a null descriptor.
4522 dword1
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4523 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
4524 is_null_descriptor
=
4525 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, dword1
,
4526 LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
4528 LLVMBuildSelect(ctx
->ac
.builder
, is_null_descriptor
,
4529 ctx
->ac
.i32_0
, ctx
->ac
.i32_1
, "");
4531 default_sample
= ctx
->ac
.i32_1
;
4534 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4535 default_sample
, "");
4540 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4541 LLVMValueRef offset
[3], pack
;
4542 for (unsigned chan
= 0; chan
< 3; ++chan
)
4543 offset
[chan
] = ctx
->ac
.i32_0
;
4545 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4546 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4547 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4548 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4549 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4551 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4552 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4554 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4555 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4559 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4560 * OpenGL 4.5 spec says:
4562 * "If the texture’s internal format indicates a fixed-point
4563 * depth texture, then D_t and D_ref are clamped to the
4564 * range [0, 1]; otherwise no clamping is performed."
4566 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4567 * so the depth comparison value isn't clamped for Z16 and
4568 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4569 * an explicitly clamped 32-bit float format.
4572 ctx
->ac
.chip_class
>= GFX8
&&
4573 ctx
->ac
.chip_class
<= GFX9
&&
4574 ctx
->abi
->clamp_shadow_reference
) {
4575 LLVMValueRef upgraded
, clamped
;
4577 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4578 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4579 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4580 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4581 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4582 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4583 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4587 /* pack derivatives */
4589 int num_src_deriv_channels
, num_dest_deriv_channels
;
4590 switch (instr
->sampler_dim
) {
4591 case GLSL_SAMPLER_DIM_3D
:
4592 case GLSL_SAMPLER_DIM_CUBE
:
4593 num_src_deriv_channels
= 3;
4594 num_dest_deriv_channels
= 3;
4596 case GLSL_SAMPLER_DIM_2D
:
4598 num_src_deriv_channels
= 2;
4599 num_dest_deriv_channels
= 2;
4601 case GLSL_SAMPLER_DIM_1D
:
4602 num_src_deriv_channels
= 1;
4603 if (ctx
->ac
.chip_class
== GFX9
) {
4604 num_dest_deriv_channels
= 2;
4606 num_dest_deriv_channels
= 1;
4611 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4612 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4613 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4614 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4615 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4617 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4618 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4619 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4623 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4624 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4625 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4626 if (instr
->coord_components
== 3)
4627 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4628 ac_prepare_cube_coords(&ctx
->ac
,
4629 instr
->op
== nir_texop_txd
, instr
->is_array
,
4630 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4633 /* Texture coordinates fixups */
4634 if (instr
->coord_components
> 1 &&
4635 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4637 instr
->op
!= nir_texop_txf
) {
4638 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4641 if (instr
->coord_components
> 2 &&
4642 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4643 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4644 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4645 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4647 instr
->op
!= nir_texop_txf
&&
4648 instr
->op
!= nir_texop_txf_ms
&&
4649 instr
->op
!= nir_texop_fragment_fetch
&&
4650 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4651 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4654 if (ctx
->ac
.chip_class
== GFX9
&&
4655 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4656 instr
->op
!= nir_texop_lod
) {
4657 LLVMValueRef filler
;
4658 if (instr
->op
== nir_texop_txf
)
4659 filler
= ctx
->ac
.i32_0
;
4661 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4663 if (instr
->is_array
)
4664 args
.coords
[2] = args
.coords
[1];
4665 args
.coords
[1] = filler
;
4668 /* Pack sample index */
4669 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4670 instr
->op
== nir_texop_fragment_fetch
))
4671 args
.coords
[instr
->coord_components
] = sample_index
;
4673 if (instr
->op
== nir_texop_samples_identical
) {
4674 struct ac_image_args txf_args
= { 0 };
4675 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4677 txf_args
.dmask
= 0xf;
4678 txf_args
.resource
= fmask_ptr
;
4679 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4680 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4682 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4683 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4687 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4688 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4689 instr
->op
!= nir_texop_txs
&&
4690 instr
->op
!= nir_texop_fragment_fetch
&&
4691 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4692 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4693 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4694 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4695 instr
->is_array
? args
.coords
[2] : NULL
,
4696 args
.coords
[sample_chan
], fmask_ptr
);
4699 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4700 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4701 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4702 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4703 args
.coords
[i
] = LLVMBuildAdd(
4704 ctx
->ac
.builder
, args
.coords
[i
],
4705 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4710 /* DMASK was repurposed for GATHER4. 4 components are always
4711 * returned and DMASK works like a swizzle - it selects
4712 * the component to fetch. The only valid DMASK values are
4713 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4714 * (red,red,red,red) etc.) The ISA document doesn't mention
4718 if (instr
->op
== nir_texop_tg4
) {
4719 if (instr
->is_shadow
)
4722 args
.dmask
= 1 << instr
->component
;
4725 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4726 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4727 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4730 /* Adjust the number of coordinates because we only need (x,y) for 2D
4731 * multisampled images and (x,y,layer) for 2D multisampled layered
4732 * images or for multisampled input attachments.
4734 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4735 if (args
.dim
== ac_image_2dmsaa
) {
4736 args
.dim
= ac_image_2d
;
4738 assert(args
.dim
== ac_image_2darraymsaa
);
4739 args
.dim
= ac_image_2darray
;
4743 assert(instr
->dest
.is_ssa
);
4744 args
.d16
= instr
->dest
.ssa
.bit_size
== 16;
4746 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4748 if (instr
->op
== nir_texop_query_levels
)
4749 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4750 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4751 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4752 instr
->op
!= nir_texop_tg4
)
4753 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4754 else if (instr
->op
== nir_texop_txs
&&
4755 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4757 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4758 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4759 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4760 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4761 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4762 } else if (ctx
->ac
.chip_class
== GFX9
&&
4763 instr
->op
== nir_texop_txs
&&
4764 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4766 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4767 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4768 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4770 } else if (instr
->dest
.ssa
.num_components
!= 4)
4771 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4775 assert(instr
->dest
.is_ssa
);
4776 result
= ac_to_integer(&ctx
->ac
, result
);
4778 for (int i
= ARRAY_SIZE(wctx
); --i
>= 0;) {
4779 result
= exit_waterfall(ctx
, wctx
+ i
, result
);
4782 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4786 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4788 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4789 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4791 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4792 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4795 static void visit_post_phi(struct ac_nir_context
*ctx
,
4796 nir_phi_instr
*instr
,
4797 LLVMValueRef llvm_phi
)
4799 nir_foreach_phi_src(src
, instr
) {
4800 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4801 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4803 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4807 static void phi_post_pass(struct ac_nir_context
*ctx
)
4809 hash_table_foreach(ctx
->phis
, entry
) {
4810 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4811 (LLVMValueRef
)entry
->data
);
4816 static bool is_def_used_in_an_export(const nir_ssa_def
* def
) {
4817 nir_foreach_use(use_src
, def
) {
4818 if (use_src
->parent_instr
->type
== nir_instr_type_intrinsic
) {
4819 nir_intrinsic_instr
*instr
= nir_instr_as_intrinsic(use_src
->parent_instr
);
4820 if (instr
->intrinsic
== nir_intrinsic_store_deref
)
4822 } else if (use_src
->parent_instr
->type
== nir_instr_type_alu
) {
4823 nir_alu_instr
*instr
= nir_instr_as_alu(use_src
->parent_instr
);
4824 if (instr
->op
== nir_op_vec4
&&
4825 is_def_used_in_an_export(&instr
->dest
.dest
.ssa
)) {
4833 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4834 const nir_ssa_undef_instr
*instr
)
4836 unsigned num_components
= instr
->def
.num_components
;
4837 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4839 if (!ctx
->abi
->convert_undef_to_zero
|| is_def_used_in_an_export(&instr
->def
)) {
4842 if (num_components
== 1)
4843 undef
= LLVMGetUndef(type
);
4845 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4847 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4849 LLVMValueRef zero
= LLVMConstInt(type
, 0, false);
4850 if (num_components
> 1) {
4851 zero
= ac_build_gather_values_extended(
4852 &ctx
->ac
, &zero
, 4, 0, false, false);
4854 ctx
->ssa_defs
[instr
->def
.index
] = zero
;
4858 static void visit_jump(struct ac_llvm_context
*ctx
,
4859 const nir_jump_instr
*instr
)
4861 switch (instr
->type
) {
4862 case nir_jump_break
:
4863 ac_build_break(ctx
);
4865 case nir_jump_continue
:
4866 ac_build_continue(ctx
);
4869 fprintf(stderr
, "Unknown NIR jump instr: ");
4870 nir_print_instr(&instr
->instr
, stderr
);
4871 fprintf(stderr
, "\n");
4877 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4878 enum glsl_base_type type
)
4882 case GLSL_TYPE_UINT
:
4883 case GLSL_TYPE_BOOL
:
4884 case GLSL_TYPE_SUBROUTINE
:
4886 case GLSL_TYPE_INT8
:
4887 case GLSL_TYPE_UINT8
:
4889 case GLSL_TYPE_INT16
:
4890 case GLSL_TYPE_UINT16
:
4892 case GLSL_TYPE_FLOAT
:
4894 case GLSL_TYPE_FLOAT16
:
4896 case GLSL_TYPE_INT64
:
4897 case GLSL_TYPE_UINT64
:
4899 case GLSL_TYPE_DOUBLE
:
4902 unreachable("unknown GLSL type");
4907 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4908 const struct glsl_type
*type
)
4910 if (glsl_type_is_scalar(type
)) {
4911 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4914 if (glsl_type_is_vector(type
)) {
4915 return LLVMVectorType(
4916 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4917 glsl_get_vector_elements(type
));
4920 if (glsl_type_is_matrix(type
)) {
4921 return LLVMArrayType(
4922 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4923 glsl_get_matrix_columns(type
));
4926 if (glsl_type_is_array(type
)) {
4927 return LLVMArrayType(
4928 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4929 glsl_get_length(type
));
4932 assert(glsl_type_is_struct_or_ifc(type
));
4934 LLVMTypeRef member_types
[glsl_get_length(type
)];
4936 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4938 glsl_to_llvm_type(ac
,
4939 glsl_get_struct_field(type
, i
));
4942 return LLVMStructTypeInContext(ac
->context
, member_types
,
4943 glsl_get_length(type
), false);
4946 static void visit_deref(struct ac_nir_context
*ctx
,
4947 nir_deref_instr
*instr
)
4949 if (instr
->mode
!= nir_var_mem_shared
&&
4950 instr
->mode
!= nir_var_mem_global
)
4953 LLVMValueRef result
= NULL
;
4954 switch(instr
->deref_type
) {
4955 case nir_deref_type_var
: {
4956 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4957 result
= entry
->data
;
4960 case nir_deref_type_struct
:
4961 if (instr
->mode
== nir_var_mem_global
) {
4962 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4963 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4964 instr
->strct
.index
);
4965 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4966 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4968 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4969 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4972 case nir_deref_type_array
:
4973 if (instr
->mode
== nir_var_mem_global
) {
4974 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4975 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4977 if ((glsl_type_is_matrix(parent
->type
) &&
4978 glsl_matrix_type_is_row_major(parent
->type
)) ||
4979 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4980 stride
= type_scalar_size_bytes(parent
->type
);
4983 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4984 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4985 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4987 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4989 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4991 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4992 get_src(ctx
, instr
->arr
.index
));
4995 case nir_deref_type_ptr_as_array
:
4996 if (instr
->mode
== nir_var_mem_global
) {
4997 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4999 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
5000 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
5001 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
5003 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
5005 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
5007 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
5008 get_src(ctx
, instr
->arr
.index
));
5011 case nir_deref_type_cast
: {
5012 result
= get_src(ctx
, instr
->parent
);
5014 /* We can't use the structs from LLVM because the shader
5015 * specifies its own offsets. */
5016 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
5017 if (instr
->mode
== nir_var_mem_shared
)
5018 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
5020 unsigned address_space
;
5022 switch(instr
->mode
) {
5023 case nir_var_mem_shared
:
5024 address_space
= AC_ADDR_SPACE_LDS
;
5026 case nir_var_mem_global
:
5027 address_space
= AC_ADDR_SPACE_GLOBAL
;
5030 unreachable("Unhandled address space");
5033 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
5035 if (LLVMTypeOf(result
) != type
) {
5036 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
5037 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
5040 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
5047 unreachable("Unhandled deref_instr deref type");
5050 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
5053 static void visit_cf_list(struct ac_nir_context
*ctx
,
5054 struct exec_list
*list
);
5056 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
5058 nir_foreach_instr(instr
, block
)
5060 switch (instr
->type
) {
5061 case nir_instr_type_alu
:
5062 visit_alu(ctx
, nir_instr_as_alu(instr
));
5064 case nir_instr_type_load_const
:
5065 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
5067 case nir_instr_type_intrinsic
:
5068 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
5070 case nir_instr_type_tex
:
5071 visit_tex(ctx
, nir_instr_as_tex(instr
));
5073 case nir_instr_type_phi
:
5074 visit_phi(ctx
, nir_instr_as_phi(instr
));
5076 case nir_instr_type_ssa_undef
:
5077 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
5079 case nir_instr_type_jump
:
5080 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
5082 case nir_instr_type_deref
:
5083 visit_deref(ctx
, nir_instr_as_deref(instr
));
5086 fprintf(stderr
, "Unknown NIR instr type: ");
5087 nir_print_instr(instr
, stderr
);
5088 fprintf(stderr
, "\n");
5093 _mesa_hash_table_insert(ctx
->defs
, block
,
5094 LLVMGetInsertBlock(ctx
->ac
.builder
));
5097 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
5099 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
5101 nir_block
*then_block
=
5102 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
5104 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
5106 visit_cf_list(ctx
, &if_stmt
->then_list
);
5108 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
5109 nir_block
*else_block
=
5110 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5112 ac_build_else(&ctx
->ac
, else_block
->index
);
5113 visit_cf_list(ctx
, &if_stmt
->else_list
);
5116 ac_build_endif(&ctx
->ac
, then_block
->index
);
5119 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5121 nir_block
*first_loop_block
=
5122 (nir_block
*) exec_list_get_head(&loop
->body
);
5124 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5126 visit_cf_list(ctx
, &loop
->body
);
5128 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5131 static void visit_cf_list(struct ac_nir_context
*ctx
,
5132 struct exec_list
*list
)
5134 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5136 switch (node
->type
) {
5137 case nir_cf_node_block
:
5138 visit_block(ctx
, nir_cf_node_as_block(node
));
5141 case nir_cf_node_if
:
5142 visit_if(ctx
, nir_cf_node_as_if(node
));
5145 case nir_cf_node_loop
:
5146 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5156 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
5157 struct ac_shader_abi
*abi
,
5158 struct nir_shader
*nir
,
5159 struct nir_variable
*variable
,
5160 gl_shader_stage stage
)
5162 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5163 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5165 /* tess ctrl has it's own load/store paths for outputs */
5166 if (stage
== MESA_SHADER_TESS_CTRL
)
5169 if (stage
== MESA_SHADER_VERTEX
||
5170 stage
== MESA_SHADER_TESS_EVAL
||
5171 stage
== MESA_SHADER_GEOMETRY
) {
5172 int idx
= variable
->data
.location
+ variable
->data
.index
;
5173 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5174 int length
= nir
->info
.clip_distance_array_size
+
5175 nir
->info
.cull_distance_array_size
;
5184 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
5185 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
5186 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5187 for (unsigned chan
= 0; chan
< 4; chan
++) {
5188 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5189 ac_build_alloca_undef(ctx
, type
, "");
5195 setup_locals(struct ac_nir_context
*ctx
,
5196 struct nir_function
*func
)
5199 ctx
->num_locals
= 0;
5200 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5201 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5202 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5203 variable
->data
.location_frac
= 0;
5204 ctx
->num_locals
+= attrib_count
;
5206 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5210 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5211 for (j
= 0; j
< 4; j
++) {
5212 ctx
->locals
[i
* 4 + j
] =
5213 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5219 setup_scratch(struct ac_nir_context
*ctx
,
5220 struct nir_shader
*shader
)
5222 if (shader
->scratch_size
== 0)
5225 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
5226 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
5231 setup_constant_data(struct ac_nir_context
*ctx
,
5232 struct nir_shader
*shader
)
5234 if (!shader
->constant_data
)
5238 LLVMConstStringInContext(ctx
->ac
.context
,
5239 shader
->constant_data
,
5240 shader
->constant_data_size
,
5242 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
5244 /* We want to put the constant data in the CONST address space so that
5245 * we can use scalar loads. However, LLVM versions before 10 put these
5246 * variables in the same section as the code, which is unacceptable
5247 * for RadeonSI as it needs to relocate all the data sections after
5248 * the code sections. See https://reviews.llvm.org/D65813.
5250 unsigned address_space
=
5251 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
5253 LLVMValueRef global
=
5254 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5258 LLVMSetInitializer(global
, data
);
5259 LLVMSetGlobalConstant(global
, true);
5260 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
5261 ctx
->constant_data
= global
;
5265 setup_shared(struct ac_nir_context
*ctx
,
5266 struct nir_shader
*nir
)
5271 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
5272 nir
->info
.cs
.shared_size
);
5275 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5278 LLVMSetAlignment(lds
, 64 * 1024);
5280 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
5281 LLVMPointerType(ctx
->ac
.i8
,
5282 AC_ADDR_SPACE_LDS
), "");
5285 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
5286 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
5288 struct ac_nir_context ctx
= {};
5289 struct nir_function
*func
;
5295 ctx
.stage
= nir
->info
.stage
;
5296 ctx
.info
= &nir
->info
;
5298 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
5300 nir_foreach_variable(variable
, &nir
->outputs
)
5301 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
5304 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5305 _mesa_key_pointer_equal
);
5306 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5307 _mesa_key_pointer_equal
);
5308 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5309 _mesa_key_pointer_equal
);
5311 if (ctx
.abi
->kill_ps_if_inf_interp
)
5312 ctx
.verified_interp
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5313 _mesa_key_pointer_equal
);
5315 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
5317 nir_index_ssa_defs(func
->impl
);
5318 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
5320 setup_locals(&ctx
, func
);
5321 setup_scratch(&ctx
, nir
);
5322 setup_constant_data(&ctx
, nir
);
5324 if (gl_shader_stage_is_compute(nir
->info
.stage
))
5325 setup_shared(&ctx
, nir
);
5327 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& nir
->info
.fs
.uses_demote
) {
5328 ctx
.ac
.postponed_kill
= ac_build_alloca_undef(&ctx
.ac
, ac
->i1
, "");
5329 /* true = don't kill. */
5330 LLVMBuildStore(ctx
.ac
.builder
, ctx
.ac
.i1true
, ctx
.ac
.postponed_kill
);
5333 visit_cf_list(&ctx
, &func
->impl
->body
);
5334 phi_post_pass(&ctx
);
5336 if (ctx
.ac
.postponed_kill
)
5337 ac_build_kill_if_false(&ctx
.ac
, LLVMBuildLoad(ctx
.ac
.builder
,
5338 ctx
.ac
.postponed_kill
, ""));
5340 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
5341 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
5346 ralloc_free(ctx
.defs
);
5347 ralloc_free(ctx
.phis
);
5348 ralloc_free(ctx
.vars
);
5349 if (ctx
.abi
->kill_ps_if_inf_interp
)
5350 ralloc_free(ctx
.verified_interp
);
5354 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
5356 bool progress
= false;
5358 /* Lower large variables to scratch first so that we won't bloat the
5359 * shader by generating large if ladders for them. We later lower
5360 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
5362 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
5363 nir_var_function_temp
,
5365 glsl_get_natural_size_align_bytes
);
5367 /* While it would be nice not to have this flag, we are constrained
5368 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
5370 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
5372 /* TODO: Indirect indexing of GS inputs is unimplemented.
5374 * TCS and TES load inputs directly from LDS or offchip memory, so
5375 * indirect indexing is trivial.
5377 nir_variable_mode indirect_mask
= 0;
5378 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
5379 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
5380 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
5381 !llvm_has_working_vgpr_indexing
)) {
5382 indirect_mask
|= nir_var_shader_in
;
5384 if (!llvm_has_working_vgpr_indexing
&&
5385 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
5386 indirect_mask
|= nir_var_shader_out
;
5388 /* TODO: We shouldn't need to do this, however LLVM isn't currently
5389 * smart enough to handle indirects without causing excess spilling
5390 * causing the gpu to hang.
5392 * See the following thread for more details of the problem:
5393 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
5395 indirect_mask
|= nir_var_function_temp
;
5397 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
5402 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
5404 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
5408 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
5410 if (var
->data
.mode
!= nir_var_shader_out
)
5413 unsigned writemask
= 0;
5414 const int location
= var
->data
.location
;
5415 unsigned first_component
= var
->data
.location_frac
;
5416 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
5418 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
5419 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
5420 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
5421 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
5427 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
5428 unsigned *cond_block_tf_writemask
,
5429 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
5431 switch (cf_node
->type
) {
5432 case nir_cf_node_block
: {
5433 nir_block
*block
= nir_cf_node_as_block(cf_node
);
5434 nir_foreach_instr(instr
, block
) {
5435 if (instr
->type
!= nir_instr_type_intrinsic
)
5438 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
5439 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
5441 /* If we find a barrier in nested control flow put this in the
5442 * too hard basket. In GLSL this is not possible but it is in
5446 *tessfactors_are_def_in_all_invocs
= false;
5450 /* The following case must be prevented:
5451 * gl_TessLevelInner = ...;
5453 * if (gl_InvocationID == 1)
5454 * gl_TessLevelInner = ...;
5456 * If you consider disjoint code segments separated by barriers, each
5457 * such segment that writes tess factor channels should write the same
5458 * channels in all codepaths within that segment.
5460 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5461 /* Accumulate the result: */
5462 *tessfactors_are_def_in_all_invocs
&=
5463 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5465 /* Analyze the next code segment from scratch. */
5466 *upper_block_tf_writemask
= 0;
5467 *cond_block_tf_writemask
= 0;
5470 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5475 case nir_cf_node_if
: {
5476 unsigned then_tessfactor_writemask
= 0;
5477 unsigned else_tessfactor_writemask
= 0;
5479 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5480 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5481 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5482 cond_block_tf_writemask
,
5483 tessfactors_are_def_in_all_invocs
, true);
5486 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5487 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5488 cond_block_tf_writemask
,
5489 tessfactors_are_def_in_all_invocs
, true);
5492 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5493 /* If both statements write the same tess factor channels,
5494 * we can say that the upper block writes them too.
5496 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5497 else_tessfactor_writemask
;
5498 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5499 else_tessfactor_writemask
;
5504 case nir_cf_node_loop
: {
5505 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5506 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5507 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5508 cond_block_tf_writemask
,
5509 tessfactors_are_def_in_all_invocs
, true);
5515 unreachable("unknown cf node type");
5520 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5522 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5524 /* The pass works as follows:
5525 * If all codepaths write tess factors, we can say that all
5526 * invocations define tess factors.
5528 * Each tess factor channel is tracked separately.
5530 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5531 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5533 /* Initial value = true. Here the pass will accumulate results from
5534 * multiple segments surrounded by barriers. If tess factors aren't
5535 * written at all, it's a shader bug and we don't care if this will be
5538 bool tessfactors_are_def_in_all_invocs
= true;
5540 nir_foreach_function(function
, nir
) {
5541 if (function
->impl
) {
5542 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5543 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5544 &cond_block_tf_writemask
,
5545 &tessfactors_are_def_in_all_invocs
,
5551 /* Accumulate the result for the last code segment separated by a
5554 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5555 tessfactors_are_def_in_all_invocs
&=
5556 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5559 return tessfactors_are_def_in_all_invocs
;