2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
54 struct hash_table
*verified_interp
;
56 LLVMValueRef main_function
;
57 LLVMBasicBlockRef continue_block
;
58 LLVMBasicBlockRef break_block
;
64 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
65 nir_deref_instr
*deref_instr
,
66 const nir_instr
*instr
,
69 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
70 nir_deref_instr
*deref_instr
,
71 enum ac_descriptor_type desc_type
,
72 const nir_instr
*instr
,
74 bool image
, bool write
);
77 build_store_values_extended(struct ac_llvm_context
*ac
,
80 unsigned value_stride
,
83 LLVMBuilderRef builder
= ac
->builder
;
86 for (i
= 0; i
< value_count
; i
++) {
87 LLVMValueRef ptr
= values
[i
* value_stride
];
88 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
89 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
90 LLVMBuildStore(builder
, value
, ptr
);
94 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
95 const nir_ssa_def
*def
)
97 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
98 if (def
->num_components
> 1) {
99 type
= LLVMVectorType(type
, def
->num_components
);
104 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
107 return nir
->ssa_defs
[src
.ssa
->index
];
111 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
113 LLVMValueRef ptr
= get_src(ctx
, src
);
114 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
115 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
117 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
119 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
120 LLVMPointerType(type
, addr_space
), "");
123 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
124 const struct nir_block
*b
)
126 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
127 return (LLVMBasicBlockRef
)entry
->data
;
130 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
132 unsigned num_components
)
134 LLVMValueRef value
= get_src(ctx
, src
.src
);
135 bool need_swizzle
= false;
138 unsigned src_components
= ac_get_llvm_num_components(value
);
139 for (unsigned i
= 0; i
< num_components
; ++i
) {
140 assert(src
.swizzle
[i
] < src_components
);
141 if (src
.swizzle
[i
] != i
)
145 if (need_swizzle
|| num_components
!= src_components
) {
146 LLVMValueRef masks
[] = {
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
150 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
152 if (src_components
> 1 && num_components
== 1) {
153 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
155 } else if (src_components
== 1 && num_components
> 1) {
156 LLVMValueRef values
[] = {value
, value
, value
, value
};
157 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
159 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
160 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
169 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
170 LLVMIntPredicate pred
, LLVMValueRef src0
,
173 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
174 return LLVMBuildSelect(ctx
->builder
, result
,
175 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
179 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
180 LLVMRealPredicate pred
, LLVMValueRef src0
,
184 src0
= ac_to_float(ctx
, src0
);
185 src1
= ac_to_float(ctx
, src1
);
186 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
187 return LLVMBuildSelect(ctx
->builder
, result
,
188 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
192 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
194 LLVMTypeRef result_type
,
197 char name
[64], type
[64];
198 LLVMValueRef params
[] = {
199 ac_to_float(ctx
, src0
),
202 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
203 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
204 assert(length
< sizeof(name
));
205 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
208 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
210 LLVMTypeRef result_type
,
211 LLVMValueRef src0
, LLVMValueRef src1
)
213 char name
[64], type
[64];
214 LLVMValueRef params
[] = {
215 ac_to_float(ctx
, src0
),
216 ac_to_float(ctx
, src1
),
219 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
220 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
221 assert(length
< sizeof(name
));
222 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
225 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
227 LLVMTypeRef result_type
,
228 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
230 char name
[64], type
[64];
231 LLVMValueRef params
[] = {
232 ac_to_float(ctx
, src0
),
233 ac_to_float(ctx
, src1
),
234 ac_to_float(ctx
, src2
),
237 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
238 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
239 assert(length
< sizeof(name
));
240 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
243 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
244 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
246 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
247 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
249 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
251 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
252 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
253 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
254 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
255 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
256 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
259 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
261 return LLVMBuildSelect(ctx
->builder
, v
,
262 ac_to_integer_or_pointer(ctx
, src1
),
263 ac_to_integer_or_pointer(ctx
, src2
), "");
266 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
269 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
272 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
274 LLVMValueRef src0
, LLVMValueRef src1
)
276 LLVMTypeRef ret_type
;
277 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
279 LLVMValueRef params
[] = { src0
, src1
};
280 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
283 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
284 params
, 2, AC_FUNC_ATTR_READNONE
);
286 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
287 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
291 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
295 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
296 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
298 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
302 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
306 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
308 unreachable("Unsupported bit size.");
312 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
315 src0
= ac_to_float(ctx
, src0
);
316 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
317 return LLVMBuildSExt(ctx
->builder
,
318 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
322 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
326 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
330 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
332 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
336 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
338 unreachable("Unsupported bit size.");
342 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
345 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
346 return LLVMBuildSExt(ctx
->builder
,
347 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
351 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
355 LLVMValueRef cond
= NULL
;
357 src0
= ac_to_float(ctx
, src0
);
358 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
360 if (ctx
->chip_class
>= GFX8
) {
361 LLVMValueRef args
[2];
362 /* Check if the result is a denormal - and flush to 0 if so. */
364 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
365 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
368 /* need to convert back up to f32 */
369 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
371 if (ctx
->chip_class
>= GFX8
)
372 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
375 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
376 * so compare the result and flush to 0 if it's smaller.
378 LLVMValueRef temp
, cond2
;
379 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
380 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
381 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
383 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
384 temp
, ctx
->f32_0
, "");
385 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
386 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
391 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
392 LLVMValueRef src0
, LLVMValueRef src1
)
394 LLVMValueRef dst64
, result
;
395 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
396 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
398 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
399 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
400 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
404 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
405 LLVMValueRef src0
, LLVMValueRef src1
)
407 LLVMValueRef dst64
, result
;
408 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
409 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
411 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
412 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
413 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
417 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
418 LLVMValueRef bits
, LLVMValueRef offset
)
420 /* mask = ((1 << bits) - 1) << offset */
421 return LLVMBuildShl(ctx
->builder
,
422 LLVMBuildSub(ctx
->builder
,
423 LLVMBuildShl(ctx
->builder
,
430 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
431 LLVMValueRef mask
, LLVMValueRef insert
,
435 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
436 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
438 return LLVMBuildXor(ctx
->builder
, base
,
439 LLVMBuildAnd(ctx
->builder
, mask
,
440 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
443 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
445 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
446 LLVMValueRef args
[2]))
448 LLVMValueRef comp
[2];
450 src0
= ac_to_float(ctx
, src0
);
451 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
452 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
454 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
457 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
460 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
461 LLVMValueRef temps
[2], val
;
464 for (i
= 0; i
< 2; i
++) {
465 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
466 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
467 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
468 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
470 return ac_build_gather_values(ctx
, temps
, 2);
473 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
481 if (op
== nir_op_fddx_fine
)
482 mask
= AC_TID_MASK_LEFT
;
483 else if (op
== nir_op_fddy_fine
)
484 mask
= AC_TID_MASK_TOP
;
486 mask
= AC_TID_MASK_TOP_LEFT
;
488 /* for DDX we want to next X pixel, DDY next Y pixel. */
489 if (op
== nir_op_fddx_fine
||
490 op
== nir_op_fddx_coarse
||
496 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
500 struct waterfall_context
{
501 LLVMBasicBlockRef phi_bb
[2];
505 /* To deal with divergent descriptors we can create a loop that handles all
506 * lanes with the same descriptor on a given iteration (henceforth a
509 * These helper create the begin and end of the loop leaving the caller
510 * to implement the body.
513 * - ctx is the usal nir context
514 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
515 * - value is the possibly divergent value for which we built the loop
516 * - divergent is whether value is actually divergent. If false we just pass
519 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
520 struct waterfall_context
*wctx
,
521 LLVMValueRef value
, bool divergent
)
523 /* If the app claims the value is divergent but it is constant we can
524 * end up with a dynamic index of NULL. */
528 wctx
->use_waterfall
= divergent
;
532 ac_build_bgnloop(&ctx
->ac
, 6000);
534 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
536 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
537 scalar_value
, "uniform_active");
539 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
540 ac_build_ifcc(&ctx
->ac
, active
, 6001);
545 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
546 struct waterfall_context
*wctx
,
549 LLVMValueRef ret
= NULL
;
550 LLVMValueRef phi_src
[2];
551 LLVMValueRef cc_phi_src
[2] = {
552 LLVMConstInt(ctx
->ac
.i32
, 0, false),
553 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
556 if (!wctx
->use_waterfall
)
559 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
561 ac_build_endif(&ctx
->ac
, 6001);
564 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
567 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
571 * By using the optimization barrier on the exit decision, we decouple
572 * the operations from the break, and hence avoid LLVM hoisting the
573 * opteration into the break block.
575 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
576 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
578 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
579 ac_build_ifcc(&ctx
->ac
, active
, 6002);
580 ac_build_break(&ctx
->ac
);
581 ac_build_endif(&ctx
->ac
, 6002);
583 ac_build_endloop(&ctx
->ac
, 6000);
587 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
589 LLVMValueRef src
[4], result
= NULL
;
590 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
591 unsigned src_components
;
592 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
593 bool saved_inexact
= false;
596 saved_inexact
= ac_disable_inexact_math(ctx
->ac
.builder
);
598 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
605 case nir_op_pack_half_2x16
:
606 case nir_op_pack_snorm_2x16
:
607 case nir_op_pack_unorm_2x16
:
610 case nir_op_unpack_half_2x16
:
613 case nir_op_cube_face_coord
:
614 case nir_op_cube_face_index
:
618 src_components
= num_components
;
621 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
622 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
629 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
630 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
631 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
632 /* fneg will be optimized by backend compiler with sign
633 * bit removed via XOR. This is probably a LLVM bug.
635 result
= ac_build_canonicalize(&ctx
->ac
, result
,
636 instr
->dest
.dest
.ssa
.bit_size
);
640 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
643 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
646 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
649 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
650 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
651 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
654 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
655 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
656 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
659 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
662 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
665 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
668 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
671 /* lower_fmod only lower 16-bit and 32-bit fmod */
672 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
673 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
674 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
675 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
676 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
677 ac_to_float_type(&ctx
->ac
, def_type
), result
);
678 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
679 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
682 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
685 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
688 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
691 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
692 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
693 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
696 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
697 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
700 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
703 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
706 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
709 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
710 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
711 LLVMTypeOf(src
[0]), "");
712 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
713 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
714 LLVMTypeOf(src
[0]), "");
715 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
718 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
719 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
720 LLVMTypeOf(src
[0]), "");
721 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
722 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
723 LLVMTypeOf(src
[0]), "");
724 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
727 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
728 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
729 LLVMTypeOf(src
[0]), "");
730 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
731 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
732 LLVMTypeOf(src
[0]), "");
733 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
736 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
739 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
742 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
745 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
748 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
751 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
754 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
757 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
760 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
763 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
766 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
767 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
768 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
769 /* fabs will be optimized by backend compiler with sign
770 * bit removed via AND.
772 result
= ac_build_canonicalize(&ctx
->ac
, result
,
773 instr
->dest
.dest
.ssa
.bit_size
);
777 result
= emit_iabs(&ctx
->ac
, src
[0]);
780 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
783 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
786 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
789 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
792 result
= ac_build_isign(&ctx
->ac
, src
[0],
793 instr
->dest
.dest
.ssa
.bit_size
);
796 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
797 result
= ac_build_fsign(&ctx
->ac
, src
[0],
798 instr
->dest
.dest
.ssa
.bit_size
);
801 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
802 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
805 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
806 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
809 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
810 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
812 case nir_op_fround_even
:
813 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
814 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
817 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
818 result
= ac_build_fract(&ctx
->ac
, src
[0],
819 instr
->dest
.dest
.ssa
.bit_size
);
822 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
823 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
826 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
827 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
830 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
831 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
834 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
835 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
838 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
839 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
842 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rsq",
843 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
845 case nir_op_frexp_exp
:
846 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
847 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
848 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
849 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
850 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
853 case nir_op_frexp_sig
:
854 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
855 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
856 instr
->dest
.dest
.ssa
.bit_size
);
859 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
860 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
863 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
864 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
865 if (ctx
->ac
.chip_class
< GFX9
&&
866 instr
->dest
.dest
.ssa
.bit_size
== 32) {
867 /* Only pre-GFX9 chips do not flush denorms. */
868 result
= ac_build_canonicalize(&ctx
->ac
, result
,
869 instr
->dest
.dest
.ssa
.bit_size
);
873 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
874 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
875 if (ctx
->ac
.chip_class
< GFX9
&&
876 instr
->dest
.dest
.ssa
.bit_size
== 32) {
877 /* Only pre-GFX9 chips do not flush denorms. */
878 result
= ac_build_canonicalize(&ctx
->ac
, result
,
879 instr
->dest
.dest
.ssa
.bit_size
);
883 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
884 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
885 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
888 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
889 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
890 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
891 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
892 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
894 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
897 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
899 case nir_op_bitfield_select
:
900 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
903 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
906 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
908 case nir_op_bitfield_reverse
:
909 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
911 case nir_op_bit_count
:
912 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
917 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
918 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
919 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
925 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
926 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
932 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
933 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
938 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
943 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
945 case nir_op_f2f16_rtz
:
946 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
947 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
948 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
949 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
950 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
951 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
953 case nir_op_f2f16_rtne
:
958 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
959 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
960 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
962 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
969 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
970 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
972 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
979 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
980 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
982 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
985 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
987 case nir_op_find_lsb
:
988 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
990 case nir_op_ufind_msb
:
991 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
993 case nir_op_ifind_msb
:
994 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
996 case nir_op_uadd_carry
:
997 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
999 case nir_op_usub_borrow
:
1000 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
1005 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1008 result
= emit_f2b(&ctx
->ac
, src
[0]);
1014 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1017 result
= emit_i2b(&ctx
->ac
, src
[0]);
1019 case nir_op_fquantize2f16
:
1020 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1022 case nir_op_umul_high
:
1023 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1025 case nir_op_imul_high
:
1026 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1028 case nir_op_pack_half_2x16
:
1029 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1031 case nir_op_pack_snorm_2x16
:
1032 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1034 case nir_op_pack_unorm_2x16
:
1035 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1037 case nir_op_unpack_half_2x16
:
1038 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1042 case nir_op_fddx_fine
:
1043 case nir_op_fddy_fine
:
1044 case nir_op_fddx_coarse
:
1045 case nir_op_fddy_coarse
:
1046 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1049 case nir_op_unpack_64_2x32_split_x
: {
1050 assert(ac_get_llvm_num_components(src
[0]) == 1);
1051 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1054 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1059 case nir_op_unpack_64_2x32_split_y
: {
1060 assert(ac_get_llvm_num_components(src
[0]) == 1);
1061 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1064 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1069 case nir_op_pack_64_2x32_split
: {
1070 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1071 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1075 case nir_op_pack_32_2x16_split
: {
1076 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1077 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1081 case nir_op_unpack_32_2x16_split_x
: {
1082 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1085 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1090 case nir_op_unpack_32_2x16_split_y
: {
1091 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1094 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1099 case nir_op_cube_face_coord
: {
1100 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1101 LLVMValueRef results
[2];
1103 for (unsigned chan
= 0; chan
< 3; chan
++)
1104 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1105 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1106 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1107 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1108 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1109 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1110 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1111 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1112 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1113 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1114 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1115 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1116 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1120 case nir_op_cube_face_index
: {
1121 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1123 for (unsigned chan
= 0; chan
< 3; chan
++)
1124 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1125 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1126 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1131 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1132 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1133 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1134 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1137 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1138 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1141 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1142 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1145 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1146 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1147 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1148 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1151 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1152 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1155 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1156 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1158 case nir_op_fmed3
: {
1159 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1160 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1161 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1162 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1163 instr
->dest
.dest
.ssa
.bit_size
);
1166 case nir_op_imed3
: {
1167 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1168 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1169 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1170 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1173 case nir_op_umed3
: {
1174 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1175 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1176 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1177 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1182 fprintf(stderr
, "Unknown NIR alu instr: ");
1183 nir_print_instr(&instr
->instr
, stderr
);
1184 fprintf(stderr
, "\n");
1189 assert(instr
->dest
.dest
.is_ssa
);
1190 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1191 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1195 ac_restore_inexact_math(ctx
->ac
.builder
, saved_inexact
);
1198 static void visit_load_const(struct ac_nir_context
*ctx
,
1199 const nir_load_const_instr
*instr
)
1201 LLVMValueRef values
[4], value
= NULL
;
1202 LLVMTypeRef element_type
=
1203 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1205 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1206 switch (instr
->def
.bit_size
) {
1208 values
[i
] = LLVMConstInt(element_type
,
1209 instr
->value
[i
].u8
, false);
1212 values
[i
] = LLVMConstInt(element_type
,
1213 instr
->value
[i
].u16
, false);
1216 values
[i
] = LLVMConstInt(element_type
,
1217 instr
->value
[i
].u32
, false);
1220 values
[i
] = LLVMConstInt(element_type
,
1221 instr
->value
[i
].u64
, false);
1225 "unsupported nir load_const bit_size: %d\n",
1226 instr
->def
.bit_size
);
1230 if (instr
->def
.num_components
> 1) {
1231 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1235 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1239 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1242 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1243 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1246 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1247 /* On GFX8, the descriptor contains the size in bytes,
1248 * but TXQ must return the size in elements.
1249 * The stride is always non-zero for resources using TXQ.
1251 LLVMValueRef stride
=
1252 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1254 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1255 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1256 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1257 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1259 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1264 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1265 * incorrectly forces nearest filtering if the texture format is integer.
1266 * The only effect it has on Gather4, which always returns 4 texels for
1267 * bilinear filtering, is that the final coordinates are off by 0.5 of
1270 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1271 * or (0.5 / size) from the normalized coordinates.
1273 * However, cube textures with 8_8_8_8 data formats require a different
1274 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1275 * precision in 32-bit data formats, so it needs to be applied dynamically at
1276 * runtime. In this case, return an i1 value that indicates whether the
1277 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1279 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1281 struct ac_image_args
*args
,
1282 const nir_tex_instr
*instr
)
1284 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1285 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1286 LLVMValueRef wa_8888
= NULL
;
1287 LLVMValueRef half_texel
[2];
1288 LLVMValueRef result
;
1290 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1292 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1293 LLVMValueRef formats
;
1294 LLVMValueRef data_format
;
1295 LLVMValueRef wa_formats
;
1297 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1299 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1300 LLVMConstInt(ctx
->i32
, 20, false), "");
1301 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1302 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1303 wa_8888
= LLVMBuildICmp(
1304 ctx
->builder
, LLVMIntEQ
, data_format
,
1305 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1308 uint32_t wa_num_format
=
1309 stype
== GLSL_TYPE_UINT
?
1310 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1311 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1312 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1313 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1315 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1316 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1318 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1319 args
->resource
= LLVMBuildInsertElement(
1320 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1323 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1325 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1327 struct ac_image_args resinfo
= {};
1328 LLVMBasicBlockRef bbs
[2];
1330 LLVMValueRef unnorm
= NULL
;
1331 LLVMValueRef default_offset
= ctx
->f32_0
;
1332 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1334 /* In vulkan, whether the sampler uses unnormalized
1335 * coordinates or not is a dynamic property of the
1336 * sampler. Hence, to figure out whether or not we
1337 * need to divide by the texture size, we need to test
1338 * the sampler at runtime. This tests the bit set by
1339 * radv_init_sampler().
1341 LLVMValueRef sampler0
=
1342 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1343 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1344 LLVMConstInt(ctx
->i32
, 15, false), "");
1345 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1346 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1347 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1350 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1351 if (wa_8888
|| unnorm
) {
1352 assert(!(wa_8888
&& unnorm
));
1353 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1354 /* Skip the texture size query entirely if we don't need it. */
1355 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1356 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1359 /* Query the texture size. */
1360 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1361 resinfo
.opcode
= ac_image_get_resinfo
;
1362 resinfo
.dmask
= 0xf;
1363 resinfo
.lod
= ctx
->i32_0
;
1364 resinfo
.resource
= args
->resource
;
1365 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1366 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1368 /* Compute -0.5 / size. */
1369 for (unsigned c
= 0; c
< 2; c
++) {
1371 LLVMBuildExtractElement(ctx
->builder
, size
,
1372 LLVMConstInt(ctx
->i32
, c
, 0), "");
1373 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1374 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1375 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1376 LLVMConstReal(ctx
->f32
, -0.5), "");
1379 if (wa_8888
|| unnorm
) {
1380 ac_build_endif(ctx
, 2000);
1382 for (unsigned c
= 0; c
< 2; c
++) {
1383 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1384 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1390 for (unsigned c
= 0; c
< 2; c
++) {
1392 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1393 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1396 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1397 result
= ac_build_image_opcode(ctx
, args
);
1399 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1400 LLVMValueRef tmp
, tmp2
;
1402 /* if the cube workaround is in place, f2i the result. */
1403 for (unsigned c
= 0; c
< 4; c
++) {
1404 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1405 if (stype
== GLSL_TYPE_UINT
)
1406 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1408 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1409 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1410 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1411 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1412 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1413 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1419 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1421 nir_deref_instr
*texture_deref_instr
= NULL
;
1423 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1424 switch (instr
->src
[i
].src_type
) {
1425 case nir_tex_src_texture_deref
:
1426 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1432 return texture_deref_instr
;
1435 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1436 const nir_tex_instr
*instr
,
1437 struct ac_image_args
*args
)
1439 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1440 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1442 assert(instr
->dest
.is_ssa
);
1443 return ac_build_buffer_load_format(&ctx
->ac
,
1447 util_last_bit(mask
),
1449 instr
->dest
.ssa
.bit_size
== 16);
1452 args
->opcode
= ac_image_sample
;
1454 switch (instr
->op
) {
1456 case nir_texop_txf_ms
:
1457 case nir_texop_samples_identical
:
1458 args
->opcode
= args
->level_zero
||
1459 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1460 ac_image_load
: ac_image_load_mip
;
1461 args
->level_zero
= false;
1464 case nir_texop_query_levels
:
1465 args
->opcode
= ac_image_get_resinfo
;
1467 args
->lod
= ctx
->ac
.i32_0
;
1468 args
->level_zero
= false;
1471 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1473 args
->level_zero
= true;
1477 args
->opcode
= ac_image_gather4
;
1478 if (!args
->lod
&& !args
->bias
)
1479 args
->level_zero
= true;
1482 args
->opcode
= ac_image_get_lod
;
1484 case nir_texop_fragment_fetch
:
1485 case nir_texop_fragment_mask_fetch
:
1486 args
->opcode
= ac_image_load
;
1487 args
->level_zero
= false;
1493 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1494 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1495 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1496 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1497 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1498 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1499 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1503 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1504 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1505 if ((args
->dim
== ac_image_2darray
||
1506 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1507 args
->coords
[1] = ctx
->ac
.i32_0
;
1511 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1512 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1513 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1514 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1515 /* Prevent texture instructions with implicit derivatives from being
1516 * sinked into branches. */
1517 switch (instr
->op
) {
1521 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1528 return ac_build_image_opcode(&ctx
->ac
, args
);
1531 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1532 nir_intrinsic_instr
*instr
)
1534 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1535 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1537 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1538 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1542 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1543 nir_intrinsic_instr
*instr
)
1545 LLVMValueRef ptr
, addr
;
1546 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1547 unsigned index
= nir_intrinsic_base(instr
);
1549 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1550 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1552 /* Load constant values from user SGPRS when possible, otherwise
1553 * fallback to the default path that loads directly from memory.
1555 if (LLVMIsConstant(src0
) &&
1556 instr
->dest
.ssa
.bit_size
== 32) {
1557 unsigned count
= instr
->dest
.ssa
.num_components
;
1558 unsigned offset
= index
;
1560 offset
+= LLVMConstIntGetZExtValue(src0
);
1563 offset
-= ctx
->args
->base_inline_push_consts
;
1565 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1566 if (offset
+ count
<= num_inline_push_consts
) {
1567 LLVMValueRef push_constants
[num_inline_push_consts
];
1568 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1569 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1570 ctx
->args
->inline_push_consts
[i
]);
1571 return ac_build_gather_values(&ctx
->ac
,
1572 push_constants
+ offset
,
1577 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1578 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1580 if (instr
->dest
.ssa
.bit_size
== 8) {
1581 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1582 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i8
, 4 * load_dwords
);
1583 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1584 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1586 LLVMValueRef params
[3];
1587 if (load_dwords
> 1) {
1588 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.v2i32
, "");
1589 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1590 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1592 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1593 params
[0] = ctx
->ac
.i32_0
;
1597 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1599 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1600 if (instr
->dest
.ssa
.num_components
> 1)
1601 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i8
, instr
->dest
.ssa
.num_components
), "");
1603 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1604 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1605 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i16
, 2 * load_dwords
);
1606 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1607 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1608 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1609 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1610 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1611 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1612 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1613 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1614 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1615 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1616 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1617 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1618 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1619 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1622 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1624 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1627 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1628 const nir_intrinsic_instr
*instr
)
1630 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1632 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1635 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1637 uint32_t new_mask
= 0;
1638 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1639 if (mask
& (1u << i
))
1640 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1644 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1645 unsigned start
, unsigned count
)
1647 LLVMValueRef mask
[] = {
1648 ctx
->i32_0
, ctx
->i32_1
,
1649 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1651 unsigned src_elements
= ac_get_llvm_num_components(src
);
1653 if (count
== src_elements
) {
1656 } else if (count
== 1) {
1657 assert(start
< src_elements
);
1658 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1660 assert(start
+ count
<= src_elements
);
1662 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1663 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1667 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1668 enum gl_access_qualifier access
,
1669 bool may_store_unaligned
,
1670 bool writeonly_memory
)
1672 unsigned cache_policy
= 0;
1674 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1675 * store opcodes not aligned to a dword are affected. The only way to
1676 * get unaligned stores is through shader images.
1678 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1679 /* If this is write-only, don't keep data in L1 to prevent
1680 * evicting L1 cache lines that may be needed by other
1684 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1685 cache_policy
|= ac_glc
;
1688 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1689 cache_policy
|= ac_slc
| ac_glc
;
1691 return cache_policy
;
1694 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1695 struct waterfall_context
*wctx
,
1696 const nir_intrinsic_instr
*instr
,
1699 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1700 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1703 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1704 nir_intrinsic_instr
*instr
)
1706 if (ctx
->ac
.postponed_kill
) {
1707 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1708 ctx
->ac
.postponed_kill
, "");
1709 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1712 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1713 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1714 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1715 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1716 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1717 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1719 struct waterfall_context wctx
;
1720 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1722 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1723 LLVMValueRef base_data
= src_data
;
1724 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1725 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1729 LLVMValueRef data
, offset
;
1730 LLVMTypeRef data_type
;
1732 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1734 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1735 * writes into a 2-element and a 1-element write. */
1737 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1738 writemask
|= 1 << (start
+ 2);
1741 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1743 /* we can only store 4 DWords at the same time.
1744 * can only happen for 64 Bit vectors. */
1745 if (num_bytes
> 16) {
1746 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1751 /* check alignment of 16 Bit stores */
1752 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1753 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1758 /* Due to alignment issues, split stores of 8-bit/16-bit
1761 if (ctx
->ac
.chip_class
== GFX6
&& count
> 1 && elem_size_bytes
< 4) {
1762 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1764 num_bytes
= elem_size_bytes
;
1767 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1769 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1770 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1772 if (num_bytes
== 1) {
1773 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1774 offset
, ctx
->ac
.i32_0
,
1776 } else if (num_bytes
== 2) {
1777 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1778 offset
, ctx
->ac
.i32_0
,
1781 int num_channels
= num_bytes
/ 4;
1783 switch (num_bytes
) {
1784 case 16: /* v4f32 */
1785 data_type
= ctx
->ac
.v4f32
;
1787 case 12: /* v3f32 */
1788 data_type
= ctx
->ac
.v3f32
;
1791 data_type
= ctx
->ac
.v2f32
;
1794 data_type
= ctx
->ac
.f32
;
1797 unreachable("Malformed vector store.");
1799 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1801 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1802 num_channels
, offset
,
1808 exit_waterfall(ctx
, &wctx
, NULL
);
1810 if (ctx
->ac
.postponed_kill
)
1811 ac_build_endif(&ctx
->ac
, 7000);
1814 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1815 LLVMValueRef descriptor
,
1816 LLVMValueRef offset
,
1817 LLVMValueRef compare
,
1818 LLVMValueRef exchange
)
1820 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1821 if (ctx
->abi
->robust_buffer_access
) {
1822 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1824 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1825 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1827 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1829 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1832 LLVMValueRef ptr_parts
[2] = {
1833 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1834 LLVMBuildAnd(ctx
->ac
.builder
,
1835 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1836 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1839 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1840 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1842 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1844 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1845 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1846 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1847 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1849 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1850 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1852 if (ctx
->abi
->robust_buffer_access
) {
1853 ac_build_endif(&ctx
->ac
, -1);
1855 LLVMBasicBlockRef incoming_blocks
[2] = {
1860 LLVMValueRef incoming_values
[2] = {
1861 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1864 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1865 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1872 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1873 nir_intrinsic_instr
*instr
)
1875 if (ctx
->ac
.postponed_kill
) {
1876 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1877 ctx
->ac
.postponed_kill
, "");
1878 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1881 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1883 char name
[64], type
[8];
1884 LLVMValueRef params
[6], descriptor
;
1885 LLVMValueRef result
;
1888 struct waterfall_context wctx
;
1889 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1891 switch (instr
->intrinsic
) {
1892 case nir_intrinsic_ssbo_atomic_add
:
1895 case nir_intrinsic_ssbo_atomic_imin
:
1898 case nir_intrinsic_ssbo_atomic_umin
:
1901 case nir_intrinsic_ssbo_atomic_imax
:
1904 case nir_intrinsic_ssbo_atomic_umax
:
1907 case nir_intrinsic_ssbo_atomic_and
:
1910 case nir_intrinsic_ssbo_atomic_or
:
1913 case nir_intrinsic_ssbo_atomic_xor
:
1916 case nir_intrinsic_ssbo_atomic_exchange
:
1919 case nir_intrinsic_ssbo_atomic_comp_swap
:
1926 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1930 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1931 return_type
== ctx
->ac
.i64
) {
1932 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1933 get_src(ctx
, instr
->src
[1]),
1934 get_src(ctx
, instr
->src
[2]),
1935 get_src(ctx
, instr
->src
[3]));
1937 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1938 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1940 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1941 params
[arg_count
++] = descriptor
;
1943 if (LLVM_VERSION_MAJOR
>= 9) {
1944 /* XXX: The new raw/struct atomic intrinsics are buggy with
1945 * LLVM 8, see r358579.
1947 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1948 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1949 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1951 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1952 snprintf(name
, sizeof(name
),
1953 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1955 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1956 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1957 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1959 assert(return_type
== ctx
->ac
.i32
);
1960 snprintf(name
, sizeof(name
),
1961 "llvm.amdgcn.buffer.atomic.%s", op
);
1964 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1968 result
= exit_waterfall(ctx
, &wctx
, result
);
1969 if (ctx
->ac
.postponed_kill
)
1970 ac_build_endif(&ctx
->ac
, 7001);
1974 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1975 nir_intrinsic_instr
*instr
)
1977 struct waterfall_context wctx
;
1978 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1980 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1981 int num_components
= instr
->num_components
;
1982 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1983 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1985 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1986 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1987 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1989 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1990 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1992 LLVMValueRef results
[4];
1993 for (int i
= 0; i
< num_components
;) {
1994 int num_elems
= num_components
- i
;
1995 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1997 if (num_elems
* elem_size_bytes
> 16)
1998 num_elems
= 16 / elem_size_bytes
;
1999 int load_bytes
= num_elems
* elem_size_bytes
;
2001 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
2005 if (load_bytes
== 1) {
2006 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
2012 } else if (load_bytes
== 2) {
2013 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
2020 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
2021 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2023 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2024 vindex
, offset
, immoffset
, 0,
2025 cache_policy
, can_speculate
, false);
2028 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2029 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2030 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2032 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2033 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2035 for (unsigned j
= 0; j
< num_elems
; j
++) {
2036 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2041 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2042 return exit_waterfall(ctx
, &wctx
, ret
);
2045 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2046 struct waterfall_context
*wctx
,
2047 const nir_intrinsic_instr
*instr
)
2049 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2050 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2053 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2054 nir_intrinsic_instr
*instr
)
2056 struct waterfall_context wctx
;
2057 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2060 LLVMValueRef rsrc
= rsrc_base
;
2061 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2062 int num_components
= instr
->num_components
;
2064 if (ctx
->abi
->load_ubo
)
2065 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2067 if (instr
->dest
.ssa
.bit_size
== 64)
2068 num_components
*= 2;
2070 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2071 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2072 LLVMValueRef results
[num_components
];
2073 for (unsigned i
= 0; i
< num_components
; ++i
) {
2074 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2077 if (load_bytes
== 1) {
2078 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2085 assert(load_bytes
== 2);
2086 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2094 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2096 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2097 NULL
, 0, 0, true, true);
2099 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2102 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2103 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2105 return exit_waterfall(ctx
, &wctx
, ret
);
2109 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2110 bool vs_in
, unsigned *vertex_index_out
,
2111 LLVMValueRef
*vertex_index_ref
,
2112 unsigned *const_out
, LLVMValueRef
*indir_out
)
2114 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2115 nir_deref_path path
;
2116 unsigned idx_lvl
= 1;
2118 nir_deref_path_init(&path
, instr
, NULL
);
2120 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2121 if (vertex_index_ref
) {
2122 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2123 if (vertex_index_out
)
2124 *vertex_index_out
= 0;
2126 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2131 uint32_t const_offset
= 0;
2132 LLVMValueRef offset
= NULL
;
2134 if (var
->data
.compact
) {
2135 assert(instr
->deref_type
== nir_deref_type_array
);
2136 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2140 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2141 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2142 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2143 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2145 for (unsigned i
= 0; i
< index
; i
++) {
2146 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2147 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2149 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2150 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2151 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2152 const_offset
+= size
*
2153 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2155 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2156 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2158 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2163 unreachable("Uhandled deref type in get_deref_instr_offset");
2167 nir_deref_path_finish(&path
);
2169 if (const_offset
&& offset
)
2170 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2171 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2174 *const_out
= const_offset
;
2175 *indir_out
= offset
;
2178 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2179 nir_intrinsic_instr
*instr
,
2182 LLVMValueRef result
;
2183 LLVMValueRef vertex_index
= NULL
;
2184 LLVMValueRef indir_index
= NULL
;
2185 unsigned const_index
= 0;
2187 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2189 unsigned location
= var
->data
.location
;
2190 unsigned driver_location
= var
->data
.driver_location
;
2191 const bool is_patch
= var
->data
.patch
||
2192 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2193 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2194 const bool is_compact
= var
->data
.compact
;
2196 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2197 false, NULL
, is_patch
? NULL
: &vertex_index
,
2198 &const_index
, &indir_index
);
2200 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2202 LLVMTypeRef src_component_type
;
2203 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2204 src_component_type
= LLVMGetElementType(dest_type
);
2206 src_component_type
= dest_type
;
2208 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2209 vertex_index
, indir_index
,
2210 const_index
, location
, driver_location
,
2211 var
->data
.location_frac
,
2212 instr
->num_components
,
2213 is_patch
, is_compact
, load_inputs
);
2214 if (instr
->dest
.ssa
.bit_size
== 16) {
2215 result
= ac_to_integer(&ctx
->ac
, result
);
2216 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2218 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2222 type_scalar_size_bytes(const struct glsl_type
*type
)
2224 assert(glsl_type_is_vector_or_scalar(type
) ||
2225 glsl_type_is_matrix(type
));
2226 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2229 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2230 nir_intrinsic_instr
*instr
)
2232 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2233 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2235 LLVMValueRef values
[8];
2237 int ve
= instr
->dest
.ssa
.num_components
;
2239 LLVMValueRef indir_index
;
2241 unsigned const_index
;
2242 unsigned stride
= 4;
2243 int mode
= deref
->mode
;
2246 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2247 var
->data
.mode
== nir_var_shader_in
;
2248 idx
= var
->data
.driver_location
;
2249 comp
= var
->data
.location_frac
;
2250 mode
= var
->data
.mode
;
2252 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2253 &const_index
, &indir_index
);
2255 if (var
->data
.compact
) {
2257 const_index
+= comp
;
2262 if (instr
->dest
.ssa
.bit_size
== 64 &&
2263 (deref
->mode
== nir_var_shader_in
||
2264 deref
->mode
== nir_var_shader_out
||
2265 deref
->mode
== nir_var_function_temp
))
2269 case nir_var_shader_in
:
2270 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2271 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2272 return load_tess_varyings(ctx
, instr
, true);
2275 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2276 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2277 LLVMValueRef indir_index
;
2278 unsigned const_index
, vertex_index
;
2279 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2280 &const_index
, &indir_index
);
2281 assert(indir_index
== NULL
);
2283 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2284 var
->data
.driver_location
,
2285 var
->data
.location_frac
,
2286 instr
->num_components
, vertex_index
, const_index
, type
);
2289 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2291 unsigned count
= glsl_count_attribute_slots(
2293 ctx
->stage
== MESA_SHADER_VERTEX
);
2295 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2296 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2297 stride
, false, true);
2299 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2303 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2306 case nir_var_function_temp
:
2307 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2309 unsigned count
= glsl_count_attribute_slots(
2312 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2313 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2314 stride
, true, true);
2316 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2320 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2324 case nir_var_shader_out
:
2325 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2326 return load_tess_varyings(ctx
, instr
, false);
2329 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2330 var
->data
.fb_fetch_output
&&
2331 ctx
->abi
->emit_fbfetch
)
2332 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2334 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2336 unsigned count
= glsl_count_attribute_slots(
2339 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2340 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2341 stride
, true, true);
2343 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2347 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2348 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2353 case nir_var_mem_global
: {
2354 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2355 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2356 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2357 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2358 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2359 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, result_type
) / 8;
2360 bool split_loads
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2362 if (stride
!= natural_stride
|| split_loads
) {
2363 if (LLVMGetTypeKind(result_type
) == LLVMVectorTypeKind
)
2364 result_type
= LLVMGetElementType(result_type
);
2366 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2367 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2368 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2370 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2371 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2372 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2373 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2375 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2377 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2378 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2379 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2380 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2385 unreachable("unhandle variable mode");
2387 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2388 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2392 visit_store_var(struct ac_nir_context
*ctx
,
2393 nir_intrinsic_instr
*instr
)
2395 if (ctx
->ac
.postponed_kill
) {
2396 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2397 ctx
->ac
.postponed_kill
, "");
2398 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2401 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2402 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2404 LLVMValueRef temp_ptr
, value
;
2407 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2408 int writemask
= instr
->const_index
[0];
2409 LLVMValueRef indir_index
;
2410 unsigned const_index
;
2413 get_deref_offset(ctx
, deref
, false,
2414 NULL
, NULL
, &const_index
, &indir_index
);
2415 idx
= var
->data
.driver_location
;
2416 comp
= var
->data
.location_frac
;
2418 if (var
->data
.compact
) {
2419 const_index
+= comp
;
2424 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2425 (deref
->mode
== nir_var_shader_out
||
2426 deref
->mode
== nir_var_function_temp
)) {
2428 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2429 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2432 writemask
= widen_mask(writemask
, 2);
2435 writemask
= writemask
<< comp
;
2437 switch (deref
->mode
) {
2438 case nir_var_shader_out
:
2440 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2441 LLVMValueRef vertex_index
= NULL
;
2442 LLVMValueRef indir_index
= NULL
;
2443 unsigned const_index
= 0;
2444 const bool is_patch
= var
->data
.patch
||
2445 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2446 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2448 get_deref_offset(ctx
, deref
, false, NULL
,
2449 is_patch
? NULL
: &vertex_index
,
2450 &const_index
, &indir_index
);
2452 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2453 vertex_index
, indir_index
,
2454 const_index
, src
, writemask
);
2458 for (unsigned chan
= 0; chan
< 8; chan
++) {
2460 if (!(writemask
& (1 << chan
)))
2463 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2465 if (var
->data
.compact
)
2468 unsigned count
= glsl_count_attribute_slots(
2471 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2472 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2473 stride
, true, true);
2475 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2476 value
, indir_index
, "");
2477 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2478 count
, stride
, tmp_vec
);
2481 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2483 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2487 case nir_var_function_temp
:
2488 for (unsigned chan
= 0; chan
< 8; chan
++) {
2489 if (!(writemask
& (1 << chan
)))
2492 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2494 unsigned count
= glsl_count_attribute_slots(
2497 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2498 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2501 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2502 value
, indir_index
, "");
2503 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2506 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2508 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2513 case nir_var_mem_global
: {
2514 int writemask
= instr
->const_index
[0];
2515 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2516 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2518 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2519 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2520 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2521 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(val
)) / 8;
2522 bool split_stores
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2524 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2525 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2526 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2528 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2529 stride
== natural_stride
&& !split_stores
) {
2530 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2531 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2532 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2534 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2535 LLVMGetElementType(LLVMTypeOf(address
)), "");
2536 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2538 LLVMTypeRef val_type
= LLVMTypeOf(val
);
2539 if (LLVMGetTypeKind(LLVMTypeOf(val
)) == LLVMVectorTypeKind
)
2540 val_type
= LLVMGetElementType(val_type
);
2542 LLVMTypeRef ptr_type
= LLVMPointerType(val_type
,
2543 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2544 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2545 for (unsigned chan
= 0; chan
< 4; chan
++) {
2546 if (!(writemask
& (1 << chan
)))
2549 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2551 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2552 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2554 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2555 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2556 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2566 if (ctx
->ac
.postponed_kill
)
2567 ac_build_endif(&ctx
->ac
, 7002);
2570 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2573 case GLSL_SAMPLER_DIM_BUF
:
2575 case GLSL_SAMPLER_DIM_1D
:
2576 return array
? 2 : 1;
2577 case GLSL_SAMPLER_DIM_2D
:
2578 return array
? 3 : 2;
2579 case GLSL_SAMPLER_DIM_MS
:
2580 return array
? 4 : 3;
2581 case GLSL_SAMPLER_DIM_3D
:
2582 case GLSL_SAMPLER_DIM_CUBE
:
2584 case GLSL_SAMPLER_DIM_RECT
:
2585 case GLSL_SAMPLER_DIM_SUBPASS
:
2587 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2595 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2596 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2597 LLVMValueRef coord_z
,
2598 LLVMValueRef sample_index
,
2599 LLVMValueRef fmask_desc_ptr
)
2601 unsigned sample_chan
= coord_z
? 3 : 2;
2602 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2603 addr
[sample_chan
] = sample_index
;
2605 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2606 return addr
[sample_chan
];
2609 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2611 assert(instr
->src
[0].is_ssa
);
2612 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2615 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2616 const nir_intrinsic_instr
*instr
,
2617 LLVMValueRef dynamic_index
,
2618 enum ac_descriptor_type desc_type
,
2621 nir_deref_instr
*deref_instr
=
2622 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2623 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2625 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2628 static void get_image_coords(struct ac_nir_context
*ctx
,
2629 const nir_intrinsic_instr
*instr
,
2630 LLVMValueRef dynamic_desc_index
,
2631 struct ac_image_args
*args
,
2632 enum glsl_sampler_dim dim
,
2635 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2636 LLVMValueRef masks
[] = {
2637 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2638 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2640 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2643 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2644 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2645 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2646 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2647 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2648 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2649 count
= image_type_to_components_count(dim
, is_array
);
2651 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2652 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2653 LLVMValueRef fmask_load_address
[3];
2655 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2656 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2658 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2660 fmask_load_address
[2] = NULL
;
2662 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2663 fmask_load_address
[0],
2664 fmask_load_address
[1],
2665 fmask_load_address
[2],
2667 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2668 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2670 if (count
== 1 && !gfx9_1d
) {
2671 if (instr
->src
[1].ssa
->num_components
)
2672 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2674 args
->coords
[0] = src0
;
2679 for (chan
= 0; chan
< count
; ++chan
) {
2680 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2685 args
->coords
[2] = args
->coords
[1];
2686 args
->coords
[1] = ctx
->ac
.i32_0
;
2688 args
->coords
[1] = ctx
->ac
.i32_0
;
2691 if (ctx
->ac
.chip_class
== GFX9
&&
2692 dim
== GLSL_SAMPLER_DIM_2D
&&
2694 /* The hw can't bind a slice of a 3D image as a 2D
2695 * image, because it ignores BASE_ARRAY if the target
2696 * is 3D. The workaround is to read BASE_ARRAY and set
2697 * it as the 3rd address operand for all 2D images.
2699 LLVMValueRef first_layer
, const5
, mask
;
2701 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2702 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2703 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2704 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2706 args
->coords
[count
] = first_layer
;
2712 args
->coords
[count
] = sample_index
;
2718 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2719 const nir_intrinsic_instr
*instr
,
2720 LLVMValueRef dynamic_index
,
2721 bool write
, bool atomic
)
2723 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2724 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2725 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2726 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2727 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2729 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2730 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2731 elem_count
, stride
, "");
2733 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2734 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2739 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2740 struct waterfall_context
*wctx
,
2741 const nir_intrinsic_instr
*instr
)
2743 nir_deref_instr
*deref_instr
= NULL
;
2745 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2746 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2748 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2749 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2752 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2753 const nir_intrinsic_instr
*instr
,
2758 enum glsl_sampler_dim dim
;
2759 enum gl_access_qualifier access
;
2762 dim
= nir_intrinsic_image_dim(instr
);
2763 access
= nir_intrinsic_access(instr
);
2764 is_array
= nir_intrinsic_image_array(instr
);
2766 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2767 const struct glsl_type
*type
= image_deref
->type
;
2768 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2769 dim
= glsl_get_sampler_dim(type
);
2770 access
= var
->data
.access
;
2771 is_array
= glsl_sampler_type_is_array(type
);
2774 struct waterfall_context wctx
;
2775 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2777 struct ac_image_args args
= {};
2779 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2781 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2782 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2783 unsigned num_channels
= util_last_bit(mask
);
2784 LLVMValueRef rsrc
, vindex
;
2786 rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, false, false);
2787 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2790 assert(instr
->dest
.is_ssa
);
2791 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2792 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2793 ctx
->ac
.i32_0
, num_channels
,
2796 instr
->dest
.ssa
.bit_size
== 16);
2797 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2799 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2800 res
= ac_to_integer(&ctx
->ac
, res
);
2802 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2804 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2805 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
2806 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2807 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2809 args
.lod
= get_src(ctx
, instr
->src
[3]);
2811 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2813 assert(instr
->dest
.is_ssa
);
2814 args
.d16
= instr
->dest
.ssa
.bit_size
== 16;
2816 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2818 return exit_waterfall(ctx
, &wctx
, res
);
2821 static void visit_image_store(struct ac_nir_context
*ctx
,
2822 const nir_intrinsic_instr
*instr
,
2825 if (ctx
->ac
.postponed_kill
) {
2826 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2827 ctx
->ac
.postponed_kill
, "");
2828 ac_build_ifcc(&ctx
->ac
, cond
, 7003);
2831 enum glsl_sampler_dim dim
;
2832 enum gl_access_qualifier access
;
2836 dim
= nir_intrinsic_image_dim(instr
);
2837 access
= nir_intrinsic_access(instr
);
2838 is_array
= nir_intrinsic_image_array(instr
);
2840 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2841 const struct glsl_type
*type
= image_deref
->type
;
2842 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2843 dim
= glsl_get_sampler_dim(type
);
2844 access
= var
->data
.access
;
2845 is_array
= glsl_sampler_type_is_array(type
);
2848 struct waterfall_context wctx
;
2849 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2851 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2852 struct ac_image_args args
= {};
2854 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2856 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2857 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, false);
2858 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2859 unsigned src_channels
= ac_get_llvm_num_components(src
);
2860 LLVMValueRef vindex
;
2862 if (src_channels
== 3)
2863 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2865 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2866 get_src(ctx
, instr
->src
[1]),
2869 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2870 ctx
->ac
.i32_0
, args
.cache_policy
);
2872 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2874 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2875 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2876 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
2877 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2878 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2880 args
.lod
= get_src(ctx
, instr
->src
[4]);
2882 args
.d16
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(args
.data
[0])) == 16;
2884 ac_build_image_opcode(&ctx
->ac
, &args
);
2887 exit_waterfall(ctx
, &wctx
, NULL
);
2888 if (ctx
->ac
.postponed_kill
)
2889 ac_build_endif(&ctx
->ac
, 7003);
2892 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2893 const nir_intrinsic_instr
*instr
,
2896 if (ctx
->ac
.postponed_kill
) {
2897 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2898 ctx
->ac
.postponed_kill
, "");
2899 ac_build_ifcc(&ctx
->ac
, cond
, 7004);
2902 LLVMValueRef params
[7];
2903 int param_count
= 0;
2905 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2906 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2907 const char *atomic_name
;
2908 char intrinsic_name
[64];
2909 enum ac_atomic_op atomic_subop
;
2910 ASSERTED
int length
;
2912 enum glsl_sampler_dim dim
;
2915 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2916 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2917 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2918 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2919 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2920 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2922 dim
= nir_intrinsic_image_dim(instr
);
2923 is_array
= nir_intrinsic_image_array(instr
);
2925 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2926 dim
= glsl_get_sampler_dim(type
);
2927 is_array
= glsl_sampler_type_is_array(type
);
2930 struct waterfall_context wctx
;
2931 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2933 switch (instr
->intrinsic
) {
2934 case nir_intrinsic_bindless_image_atomic_add
:
2935 case nir_intrinsic_image_deref_atomic_add
:
2936 atomic_name
= "add";
2937 atomic_subop
= ac_atomic_add
;
2939 case nir_intrinsic_bindless_image_atomic_imin
:
2940 case nir_intrinsic_image_deref_atomic_imin
:
2941 atomic_name
= "smin";
2942 atomic_subop
= ac_atomic_smin
;
2944 case nir_intrinsic_bindless_image_atomic_umin
:
2945 case nir_intrinsic_image_deref_atomic_umin
:
2946 atomic_name
= "umin";
2947 atomic_subop
= ac_atomic_umin
;
2949 case nir_intrinsic_bindless_image_atomic_imax
:
2950 case nir_intrinsic_image_deref_atomic_imax
:
2951 atomic_name
= "smax";
2952 atomic_subop
= ac_atomic_smax
;
2954 case nir_intrinsic_bindless_image_atomic_umax
:
2955 case nir_intrinsic_image_deref_atomic_umax
:
2956 atomic_name
= "umax";
2957 atomic_subop
= ac_atomic_umax
;
2959 case nir_intrinsic_bindless_image_atomic_and
:
2960 case nir_intrinsic_image_deref_atomic_and
:
2961 atomic_name
= "and";
2962 atomic_subop
= ac_atomic_and
;
2964 case nir_intrinsic_bindless_image_atomic_or
:
2965 case nir_intrinsic_image_deref_atomic_or
:
2967 atomic_subop
= ac_atomic_or
;
2969 case nir_intrinsic_bindless_image_atomic_xor
:
2970 case nir_intrinsic_image_deref_atomic_xor
:
2971 atomic_name
= "xor";
2972 atomic_subop
= ac_atomic_xor
;
2974 case nir_intrinsic_bindless_image_atomic_exchange
:
2975 case nir_intrinsic_image_deref_atomic_exchange
:
2976 atomic_name
= "swap";
2977 atomic_subop
= ac_atomic_swap
;
2979 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2980 case nir_intrinsic_image_deref_atomic_comp_swap
:
2981 atomic_name
= "cmpswap";
2982 atomic_subop
= 0; /* not used */
2984 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2985 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2986 atomic_name
= "inc";
2987 atomic_subop
= ac_atomic_inc_wrap
;
2988 /* ATOMIC_INC instruction does:
2989 * value = (value + 1) % (data + 1)
2991 * value = (value + 1) % data
2992 * So replace 'data' by 'data - 1'.
2994 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2995 LLVMBuildSub(ctx
->ac
.builder
,
2996 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
3000 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3001 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3002 atomic_name
= "dec";
3003 atomic_subop
= ac_atomic_dec_wrap
;
3010 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
3011 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
3013 LLVMValueRef result
;
3014 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3015 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, true);
3016 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
3017 ctx
->ac
.i32_0
, ""); /* vindex */
3018 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
3019 if (LLVM_VERSION_MAJOR
>= 9) {
3020 /* XXX: The new raw/struct atomic intrinsics are buggy
3021 * with LLVM 8, see r358579.
3023 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
3024 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
3026 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3027 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
3029 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3031 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3032 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
3035 assert(length
< sizeof(intrinsic_name
));
3036 result
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
3037 params
, param_count
, 0);
3039 struct ac_image_args args
= {};
3040 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
3041 args
.atomic
= atomic_subop
;
3042 args
.data
[0] = params
[0];
3044 args
.data
[1] = params
[1];
3045 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
3046 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
3047 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3049 result
= ac_build_image_opcode(&ctx
->ac
, &args
);
3052 result
= exit_waterfall(ctx
, &wctx
, result
);
3053 if (ctx
->ac
.postponed_kill
)
3054 ac_build_endif(&ctx
->ac
, 7004);
3058 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3059 nir_intrinsic_instr
*instr
)
3061 struct waterfall_context wctx
;
3062 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3063 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3065 LLVMValueRef ret
= ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
3067 return exit_waterfall(ctx
, &wctx
, ret
);
3070 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3071 const nir_intrinsic_instr
*instr
,
3076 enum glsl_sampler_dim dim
;
3079 dim
= nir_intrinsic_image_dim(instr
);
3080 is_array
= nir_intrinsic_image_array(instr
);
3082 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
3083 dim
= glsl_get_sampler_dim(type
);
3084 is_array
= glsl_sampler_type_is_array(type
);
3087 struct waterfall_context wctx
;
3088 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3090 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3091 res
= get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, false), true);
3094 struct ac_image_args args
= { 0 };
3096 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3098 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3099 args
.opcode
= ac_image_get_resinfo
;
3100 args
.lod
= ctx
->ac
.i32_0
;
3101 args
.attributes
= AC_FUNC_ATTR_READNONE
;
3103 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3105 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3107 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
3108 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3109 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3110 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3111 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3114 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
3115 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3116 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3120 return exit_waterfall(ctx
, &wctx
, res
);
3123 static void emit_membar(struct ac_llvm_context
*ac
,
3124 const nir_intrinsic_instr
*instr
)
3126 unsigned wait_flags
= 0;
3128 switch (instr
->intrinsic
) {
3129 case nir_intrinsic_memory_barrier
:
3130 case nir_intrinsic_group_memory_barrier
:
3131 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3133 case nir_intrinsic_memory_barrier_buffer
:
3134 case nir_intrinsic_memory_barrier_image
:
3135 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3137 case nir_intrinsic_memory_barrier_shared
:
3138 wait_flags
= AC_WAIT_LGKM
;
3144 ac_build_waitcnt(ac
, wait_flags
);
3147 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3149 /* GFX6 only (thanks to a hw bug workaround):
3150 * The real barrier instruction isn’t needed, because an entire patch
3151 * always fits into a single wave.
3153 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
3154 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
3157 ac_build_s_barrier(ac
);
3160 static void emit_discard(struct ac_nir_context
*ctx
,
3161 const nir_intrinsic_instr
*instr
)
3165 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3166 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3167 get_src(ctx
, instr
->src
[0]),
3170 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3171 cond
= ctx
->ac
.i1false
;
3174 ac_build_kill_if_false(&ctx
->ac
, cond
);
3177 static void emit_demote(struct ac_nir_context
*ctx
,
3178 const nir_intrinsic_instr
*instr
)
3182 if (instr
->intrinsic
== nir_intrinsic_demote_if
) {
3183 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3184 get_src(ctx
, instr
->src
[0]),
3187 assert(instr
->intrinsic
== nir_intrinsic_demote
);
3188 cond
= ctx
->ac
.i1false
;
3191 /* Kill immediately while maintaining WQM. */
3192 ac_build_kill_if_false(&ctx
->ac
, ac_build_wqm_vote(&ctx
->ac
, cond
));
3194 LLVMValueRef mask
= LLVMBuildLoad(ctx
->ac
.builder
, ctx
->ac
.postponed_kill
, "");
3195 mask
= LLVMBuildAnd(ctx
->ac
.builder
, mask
, cond
, "");
3196 LLVMBuildStore(ctx
->ac
.builder
, mask
, ctx
->ac
.postponed_kill
);
3201 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3203 LLVMValueRef result
;
3204 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3205 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3206 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3207 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3209 if (ctx
->ac
.wave_size
== 32)
3210 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
3211 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
3213 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3217 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3219 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3220 LLVMValueRef result
;
3221 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3222 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3223 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3224 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3226 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3231 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3233 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3234 return LLVMBuildAnd(ctx
->ac
.builder
,
3235 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3236 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3238 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3243 visit_first_invocation(struct ac_nir_context
*ctx
)
3245 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3246 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3248 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3249 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
3250 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
3251 ctx
->ac
.iN_wavemask
, args
, 2,
3252 AC_FUNC_ATTR_NOUNWIND
|
3253 AC_FUNC_ATTR_READNONE
);
3255 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3259 visit_load_shared(struct ac_nir_context
*ctx
,
3260 const nir_intrinsic_instr
*instr
)
3262 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3264 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3265 instr
->dest
.ssa
.bit_size
);
3267 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3268 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3269 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3270 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3273 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3274 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3278 visit_store_shared(struct ac_nir_context
*ctx
,
3279 const nir_intrinsic_instr
*instr
)
3281 LLVMValueRef derived_ptr
, data
,index
;
3282 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3284 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3285 instr
->src
[0].ssa
->bit_size
);
3286 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3288 int writemask
= nir_intrinsic_write_mask(instr
);
3289 for (int chan
= 0; chan
< 4; chan
++) {
3290 if (!(writemask
& (1 << chan
))) {
3293 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3294 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3295 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3296 LLVMBuildStore(builder
, data
, derived_ptr
);
3300 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3301 const nir_intrinsic_instr
*instr
,
3302 LLVMValueRef ptr
, int src_idx
)
3304 if (ctx
->ac
.postponed_kill
) {
3305 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
3306 ctx
->ac
.postponed_kill
, "");
3307 ac_build_ifcc(&ctx
->ac
, cond
, 7005);
3310 LLVMValueRef result
;
3311 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3313 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3315 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3316 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3317 if (deref
->mode
== nir_var_mem_global
) {
3318 /* use "singlethread" sync scope to implement relaxed ordering */
3319 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3321 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3322 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3326 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3327 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3328 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3329 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3330 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3332 LLVMAtomicRMWBinOp op
;
3333 switch (instr
->intrinsic
) {
3334 case nir_intrinsic_shared_atomic_add
:
3335 case nir_intrinsic_deref_atomic_add
:
3336 op
= LLVMAtomicRMWBinOpAdd
;
3338 case nir_intrinsic_shared_atomic_umin
:
3339 case nir_intrinsic_deref_atomic_umin
:
3340 op
= LLVMAtomicRMWBinOpUMin
;
3342 case nir_intrinsic_shared_atomic_umax
:
3343 case nir_intrinsic_deref_atomic_umax
:
3344 op
= LLVMAtomicRMWBinOpUMax
;
3346 case nir_intrinsic_shared_atomic_imin
:
3347 case nir_intrinsic_deref_atomic_imin
:
3348 op
= LLVMAtomicRMWBinOpMin
;
3350 case nir_intrinsic_shared_atomic_imax
:
3351 case nir_intrinsic_deref_atomic_imax
:
3352 op
= LLVMAtomicRMWBinOpMax
;
3354 case nir_intrinsic_shared_atomic_and
:
3355 case nir_intrinsic_deref_atomic_and
:
3356 op
= LLVMAtomicRMWBinOpAnd
;
3358 case nir_intrinsic_shared_atomic_or
:
3359 case nir_intrinsic_deref_atomic_or
:
3360 op
= LLVMAtomicRMWBinOpOr
;
3362 case nir_intrinsic_shared_atomic_xor
:
3363 case nir_intrinsic_deref_atomic_xor
:
3364 op
= LLVMAtomicRMWBinOpXor
;
3366 case nir_intrinsic_shared_atomic_exchange
:
3367 case nir_intrinsic_deref_atomic_exchange
:
3368 op
= LLVMAtomicRMWBinOpXchg
;
3374 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3377 if (ctx
->ac
.postponed_kill
)
3378 ac_build_endif(&ctx
->ac
, 7005);
3382 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3384 LLVMValueRef values
[2];
3385 LLVMValueRef pos
[2];
3387 pos
[0] = ac_to_float(&ctx
->ac
,
3388 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3389 pos
[1] = ac_to_float(&ctx
->ac
,
3390 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3392 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3393 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3394 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3397 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3398 enum glsl_interp_mode interp
, unsigned location
)
3401 case INTERP_MODE_FLAT
:
3404 case INTERP_MODE_SMOOTH
:
3405 case INTERP_MODE_NONE
:
3406 if (location
== INTERP_CENTER
)
3407 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3408 else if (location
== INTERP_CENTROID
)
3409 return ctx
->abi
->persp_centroid
;
3410 else if (location
== INTERP_SAMPLE
)
3411 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3413 case INTERP_MODE_NOPERSPECTIVE
:
3414 if (location
== INTERP_CENTER
)
3415 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3416 else if (location
== INTERP_CENTROID
)
3417 return ctx
->abi
->linear_centroid
;
3418 else if (location
== INTERP_SAMPLE
)
3419 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3425 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3428 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3429 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3432 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3434 LLVMValueRef offset
)
3436 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3437 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3438 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3440 LLVMValueRef ij_out
[2];
3441 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3444 * take the I then J parameters, and the DDX/Y for it, and
3445 * calculate the IJ inputs for the interpolator.
3446 * temp1 = ddx * offset/sample.x + I;
3447 * interp_param.I = ddy * offset/sample.y + temp1;
3448 * temp1 = ddx * offset/sample.x + J;
3449 * interp_param.J = ddy * offset/sample.y + temp1;
3451 for (unsigned i
= 0; i
< 2; i
++) {
3452 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3453 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3454 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3455 ddxy_out
, ix_ll
, "");
3456 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3457 ddxy_out
, iy_ll
, "");
3458 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3459 interp_param
, ix_ll
, "");
3460 LLVMValueRef temp1
, temp2
;
3462 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3465 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3466 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3468 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3469 temp2
, ctx
->ac
.i32
, "");
3471 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3472 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3475 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3478 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3479 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3482 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3484 LLVMValueRef sample_id
)
3486 if (ctx
->abi
->interp_at_sample_force_center
)
3487 return barycentric_center(ctx
, mode
);
3489 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3491 /* fetch sample ID */
3492 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3494 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3495 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3496 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3497 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3498 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3499 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3501 return barycentric_offset(ctx
, mode
, offset
);
3505 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3508 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3509 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3512 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3514 return LLVMBuildBitCast(ctx
->ac
.builder
,
3515 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3519 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3520 LLVMValueRef interp_param
,
3521 unsigned index
, unsigned comp_start
,
3522 unsigned num_components
,
3525 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3526 LLVMValueRef interp_param_f
;
3528 interp_param_f
= LLVMBuildBitCast(ctx
->ac
.builder
,
3529 interp_param
, ctx
->ac
.v2f32
, "");
3530 LLVMValueRef i
= LLVMBuildExtractElement(
3531 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_0
, "");
3532 LLVMValueRef j
= LLVMBuildExtractElement(
3533 ctx
->ac
.builder
, interp_param_f
, ctx
->ac
.i32_1
, "");
3535 /* Workaround for issue 2647: kill threads with infinite interpolation coeffs */
3536 if (ctx
->verified_interp
&&
3537 !_mesa_hash_table_search(ctx
->verified_interp
, interp_param
)) {
3538 LLVMValueRef args
[2];
3540 args
[1] = LLVMConstInt(ctx
->ac
.i32
, S_NAN
| Q_NAN
| N_INFINITY
| P_INFINITY
, false);
3541 LLVMValueRef cond
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.class.f32", ctx
->ac
.i1
,
3542 args
, 2, AC_FUNC_ATTR_READNONE
);
3543 ac_build_kill_if_false(&ctx
->ac
, LLVMBuildNot(ctx
->ac
.builder
, cond
, ""));
3544 _mesa_hash_table_insert(ctx
->verified_interp
, interp_param
, interp_param
);
3547 LLVMValueRef values
[4];
3548 assert(bitsize
== 16 || bitsize
== 32);
3549 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3550 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3551 if (bitsize
== 16) {
3552 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3553 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3555 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3556 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3560 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3563 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3564 nir_intrinsic_instr
*instr
)
3566 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3568 /* We only lower inputs for fragment shaders ATM */
3569 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3571 assert(offset
[0].i32
== 0);
3573 unsigned component
= nir_intrinsic_component(instr
);
3574 unsigned index
= nir_intrinsic_base(instr
);
3575 unsigned vertex_id
= 2; /* P0 */
3577 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3578 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3580 switch (src0
[0].i32
) {
3591 unreachable("Invalid vertex index");
3595 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3596 LLVMValueRef values
[8];
3598 /* Each component of a 64-bit value takes up two GL-level channels. */
3599 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3600 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3602 bit_size
== 64 ? num_components
* 2 : num_components
;
3604 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3605 if (component
+ chan
> 4)
3606 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3607 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3608 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3609 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3612 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3613 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3614 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3615 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3618 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3619 if (bit_size
== 64) {
3620 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3621 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3622 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3627 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3628 nir_intrinsic_instr
*instr
)
3630 LLVMValueRef result
= NULL
;
3632 switch (instr
->intrinsic
) {
3633 case nir_intrinsic_ballot
:
3634 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3635 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3636 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3638 case nir_intrinsic_read_invocation
:
3639 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3640 get_src(ctx
, instr
->src
[1]));
3642 case nir_intrinsic_read_first_invocation
:
3643 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3645 case nir_intrinsic_load_subgroup_invocation
:
3646 result
= ac_get_thread_id(&ctx
->ac
);
3648 case nir_intrinsic_load_work_group_id
: {
3649 LLVMValueRef values
[3];
3651 for (int i
= 0; i
< 3; i
++) {
3652 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3653 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3656 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3659 case nir_intrinsic_load_base_vertex
:
3660 case nir_intrinsic_load_first_vertex
:
3661 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3663 case nir_intrinsic_load_local_group_size
:
3664 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3666 case nir_intrinsic_load_vertex_id
:
3667 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3668 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3669 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3671 case nir_intrinsic_load_vertex_id_zero_base
: {
3672 result
= ctx
->abi
->vertex_id
;
3675 case nir_intrinsic_load_local_invocation_id
: {
3676 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3679 case nir_intrinsic_load_base_instance
:
3680 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3682 case nir_intrinsic_load_draw_id
:
3683 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3685 case nir_intrinsic_load_view_index
:
3686 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3688 case nir_intrinsic_load_invocation_id
:
3689 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3690 result
= ac_unpack_param(&ctx
->ac
,
3691 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3694 if (ctx
->ac
.chip_class
>= GFX10
) {
3695 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3696 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3697 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3699 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3703 case nir_intrinsic_load_primitive_id
:
3704 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3705 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3706 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3707 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3708 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3709 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3711 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3713 case nir_intrinsic_load_sample_id
:
3714 result
= ac_unpack_param(&ctx
->ac
,
3715 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3718 case nir_intrinsic_load_sample_pos
:
3719 result
= load_sample_pos(ctx
);
3721 case nir_intrinsic_load_sample_mask_in
:
3722 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3724 case nir_intrinsic_load_frag_coord
: {
3725 LLVMValueRef values
[4] = {
3726 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3727 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3728 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3729 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3730 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3732 result
= ac_to_integer(&ctx
->ac
,
3733 ac_build_gather_values(&ctx
->ac
, values
, 4));
3736 case nir_intrinsic_load_layer_id
:
3737 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3739 case nir_intrinsic_load_front_face
:
3740 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3742 case nir_intrinsic_load_helper_invocation
:
3743 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3745 case nir_intrinsic_is_helper_invocation
:
3746 result
= ac_build_is_helper_invocation(&ctx
->ac
);
3748 case nir_intrinsic_load_color0
:
3749 result
= ctx
->abi
->color0
;
3751 case nir_intrinsic_load_color1
:
3752 result
= ctx
->abi
->color1
;
3754 case nir_intrinsic_load_user_data_amd
:
3755 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3756 result
= ctx
->abi
->user_data
;
3758 case nir_intrinsic_load_instance_id
:
3759 result
= ctx
->abi
->instance_id
;
3761 case nir_intrinsic_load_num_work_groups
:
3762 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3764 case nir_intrinsic_load_local_invocation_index
:
3765 result
= visit_load_local_invocation_index(ctx
);
3767 case nir_intrinsic_load_subgroup_id
:
3768 result
= visit_load_subgroup_id(ctx
);
3770 case nir_intrinsic_load_num_subgroups
:
3771 result
= visit_load_num_subgroups(ctx
);
3773 case nir_intrinsic_first_invocation
:
3774 result
= visit_first_invocation(ctx
);
3776 case nir_intrinsic_load_push_constant
:
3777 result
= visit_load_push_constant(ctx
, instr
);
3779 case nir_intrinsic_vulkan_resource_index
: {
3780 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3781 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3782 unsigned binding
= nir_intrinsic_binding(instr
);
3784 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3788 case nir_intrinsic_vulkan_resource_reindex
:
3789 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3791 case nir_intrinsic_store_ssbo
:
3792 visit_store_ssbo(ctx
, instr
);
3794 case nir_intrinsic_load_ssbo
:
3795 result
= visit_load_buffer(ctx
, instr
);
3797 case nir_intrinsic_ssbo_atomic_add
:
3798 case nir_intrinsic_ssbo_atomic_imin
:
3799 case nir_intrinsic_ssbo_atomic_umin
:
3800 case nir_intrinsic_ssbo_atomic_imax
:
3801 case nir_intrinsic_ssbo_atomic_umax
:
3802 case nir_intrinsic_ssbo_atomic_and
:
3803 case nir_intrinsic_ssbo_atomic_or
:
3804 case nir_intrinsic_ssbo_atomic_xor
:
3805 case nir_intrinsic_ssbo_atomic_exchange
:
3806 case nir_intrinsic_ssbo_atomic_comp_swap
:
3807 result
= visit_atomic_ssbo(ctx
, instr
);
3809 case nir_intrinsic_load_ubo
:
3810 result
= visit_load_ubo_buffer(ctx
, instr
);
3812 case nir_intrinsic_get_buffer_size
:
3813 result
= visit_get_buffer_size(ctx
, instr
);
3815 case nir_intrinsic_load_deref
:
3816 result
= visit_load_var(ctx
, instr
);
3818 case nir_intrinsic_store_deref
:
3819 visit_store_var(ctx
, instr
);
3821 case nir_intrinsic_load_shared
:
3822 result
= visit_load_shared(ctx
, instr
);
3824 case nir_intrinsic_store_shared
:
3825 visit_store_shared(ctx
, instr
);
3827 case nir_intrinsic_bindless_image_samples
:
3828 case nir_intrinsic_image_deref_samples
:
3829 result
= visit_image_samples(ctx
, instr
);
3831 case nir_intrinsic_bindless_image_load
:
3832 result
= visit_image_load(ctx
, instr
, true);
3834 case nir_intrinsic_image_deref_load
:
3835 result
= visit_image_load(ctx
, instr
, false);
3837 case nir_intrinsic_bindless_image_store
:
3838 visit_image_store(ctx
, instr
, true);
3840 case nir_intrinsic_image_deref_store
:
3841 visit_image_store(ctx
, instr
, false);
3843 case nir_intrinsic_bindless_image_atomic_add
:
3844 case nir_intrinsic_bindless_image_atomic_imin
:
3845 case nir_intrinsic_bindless_image_atomic_umin
:
3846 case nir_intrinsic_bindless_image_atomic_imax
:
3847 case nir_intrinsic_bindless_image_atomic_umax
:
3848 case nir_intrinsic_bindless_image_atomic_and
:
3849 case nir_intrinsic_bindless_image_atomic_or
:
3850 case nir_intrinsic_bindless_image_atomic_xor
:
3851 case nir_intrinsic_bindless_image_atomic_exchange
:
3852 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3853 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3854 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3855 result
= visit_image_atomic(ctx
, instr
, true);
3857 case nir_intrinsic_image_deref_atomic_add
:
3858 case nir_intrinsic_image_deref_atomic_imin
:
3859 case nir_intrinsic_image_deref_atomic_umin
:
3860 case nir_intrinsic_image_deref_atomic_imax
:
3861 case nir_intrinsic_image_deref_atomic_umax
:
3862 case nir_intrinsic_image_deref_atomic_and
:
3863 case nir_intrinsic_image_deref_atomic_or
:
3864 case nir_intrinsic_image_deref_atomic_xor
:
3865 case nir_intrinsic_image_deref_atomic_exchange
:
3866 case nir_intrinsic_image_deref_atomic_comp_swap
:
3867 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3868 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3869 result
= visit_image_atomic(ctx
, instr
, false);
3871 case nir_intrinsic_bindless_image_size
:
3872 result
= visit_image_size(ctx
, instr
, true);
3874 case nir_intrinsic_image_deref_size
:
3875 result
= visit_image_size(ctx
, instr
, false);
3877 case nir_intrinsic_shader_clock
:
3878 result
= ac_build_shader_clock(&ctx
->ac
,
3879 nir_intrinsic_memory_scope(instr
));
3881 case nir_intrinsic_discard
:
3882 case nir_intrinsic_discard_if
:
3883 emit_discard(ctx
, instr
);
3885 case nir_intrinsic_demote
:
3886 case nir_intrinsic_demote_if
:
3887 emit_demote(ctx
, instr
);
3889 case nir_intrinsic_memory_barrier
:
3890 case nir_intrinsic_group_memory_barrier
:
3891 case nir_intrinsic_memory_barrier_buffer
:
3892 case nir_intrinsic_memory_barrier_image
:
3893 case nir_intrinsic_memory_barrier_shared
:
3894 emit_membar(&ctx
->ac
, instr
);
3896 case nir_intrinsic_memory_barrier_tcs_patch
:
3898 case nir_intrinsic_control_barrier
:
3899 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3901 case nir_intrinsic_shared_atomic_add
:
3902 case nir_intrinsic_shared_atomic_imin
:
3903 case nir_intrinsic_shared_atomic_umin
:
3904 case nir_intrinsic_shared_atomic_imax
:
3905 case nir_intrinsic_shared_atomic_umax
:
3906 case nir_intrinsic_shared_atomic_and
:
3907 case nir_intrinsic_shared_atomic_or
:
3908 case nir_intrinsic_shared_atomic_xor
:
3909 case nir_intrinsic_shared_atomic_exchange
:
3910 case nir_intrinsic_shared_atomic_comp_swap
: {
3911 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3912 instr
->src
[1].ssa
->bit_size
);
3913 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3916 case nir_intrinsic_deref_atomic_add
:
3917 case nir_intrinsic_deref_atomic_imin
:
3918 case nir_intrinsic_deref_atomic_umin
:
3919 case nir_intrinsic_deref_atomic_imax
:
3920 case nir_intrinsic_deref_atomic_umax
:
3921 case nir_intrinsic_deref_atomic_and
:
3922 case nir_intrinsic_deref_atomic_or
:
3923 case nir_intrinsic_deref_atomic_xor
:
3924 case nir_intrinsic_deref_atomic_exchange
:
3925 case nir_intrinsic_deref_atomic_comp_swap
: {
3926 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3927 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3930 case nir_intrinsic_load_barycentric_pixel
:
3931 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3933 case nir_intrinsic_load_barycentric_centroid
:
3934 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3936 case nir_intrinsic_load_barycentric_sample
:
3937 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3939 case nir_intrinsic_load_barycentric_model
:
3940 result
= barycentric_model(ctx
);
3942 case nir_intrinsic_load_barycentric_at_offset
: {
3943 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3944 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3947 case nir_intrinsic_load_barycentric_at_sample
: {
3948 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3949 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3952 case nir_intrinsic_load_interpolated_input
: {
3953 /* We assume any indirect loads have been lowered away */
3954 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3956 assert(offset
[0].i32
== 0);
3958 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3959 unsigned index
= nir_intrinsic_base(instr
);
3960 unsigned component
= nir_intrinsic_component(instr
);
3961 result
= load_interpolated_input(ctx
, interp_param
, index
,
3963 instr
->dest
.ssa
.num_components
,
3964 instr
->dest
.ssa
.bit_size
);
3967 case nir_intrinsic_load_input
:
3968 case nir_intrinsic_load_input_vertex
:
3969 result
= load_input(ctx
, instr
);
3971 case nir_intrinsic_emit_vertex
:
3972 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3974 case nir_intrinsic_emit_vertex_with_counter
: {
3975 unsigned stream
= nir_intrinsic_stream_id(instr
);
3976 LLVMValueRef next_vertex
= get_src(ctx
, instr
->src
[0]);
3977 ctx
->abi
->emit_vertex_with_counter(ctx
->abi
, stream
,
3982 case nir_intrinsic_end_primitive
:
3983 case nir_intrinsic_end_primitive_with_counter
:
3984 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3986 case nir_intrinsic_load_tess_coord
:
3987 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3989 case nir_intrinsic_load_tess_level_outer
:
3990 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3992 case nir_intrinsic_load_tess_level_inner
:
3993 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3995 case nir_intrinsic_load_tess_level_outer_default
:
3996 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3998 case nir_intrinsic_load_tess_level_inner_default
:
3999 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
4001 case nir_intrinsic_load_patch_vertices_in
:
4002 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
4004 case nir_intrinsic_vote_all
: {
4005 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4006 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4009 case nir_intrinsic_vote_any
: {
4010 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4011 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
4014 case nir_intrinsic_shuffle
:
4015 if (ctx
->ac
.chip_class
== GFX8
||
4016 ctx
->ac
.chip_class
== GFX9
||
4017 (ctx
->ac
.chip_class
== GFX10
&& ctx
->ac
.wave_size
== 32)) {
4018 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4019 get_src(ctx
, instr
->src
[1]));
4021 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4022 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
4023 LLVMTypeRef type
= LLVMTypeOf(src
);
4024 struct waterfall_context wctx
;
4025 LLVMValueRef index_val
;
4027 index_val
= enter_waterfall(ctx
, &wctx
, index
, true);
4029 src
= LLVMBuildZExt(ctx
->ac
.builder
, src
,
4032 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.readlane",
4034 (LLVMValueRef
[]) { src
, index_val
}, 2,
4035 AC_FUNC_ATTR_READNONE
|
4036 AC_FUNC_ATTR_CONVERGENT
);
4038 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, type
, "");
4040 result
= exit_waterfall(ctx
, &wctx
, result
);
4043 case nir_intrinsic_reduce
:
4044 result
= ac_build_reduce(&ctx
->ac
,
4045 get_src(ctx
, instr
->src
[0]),
4046 instr
->const_index
[0],
4047 instr
->const_index
[1]);
4049 case nir_intrinsic_inclusive_scan
:
4050 result
= ac_build_inclusive_scan(&ctx
->ac
,
4051 get_src(ctx
, instr
->src
[0]),
4052 instr
->const_index
[0]);
4054 case nir_intrinsic_exclusive_scan
:
4055 result
= ac_build_exclusive_scan(&ctx
->ac
,
4056 get_src(ctx
, instr
->src
[0]),
4057 instr
->const_index
[0]);
4059 case nir_intrinsic_quad_broadcast
: {
4060 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
4061 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4062 lane
, lane
, lane
, lane
);
4065 case nir_intrinsic_quad_swap_horizontal
:
4066 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
4068 case nir_intrinsic_quad_swap_vertical
:
4069 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
4071 case nir_intrinsic_quad_swap_diagonal
:
4072 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
4074 case nir_intrinsic_quad_swizzle_amd
: {
4075 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4076 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4077 mask
& 0x3, (mask
>> 2) & 0x3,
4078 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
4081 case nir_intrinsic_masked_swizzle_amd
: {
4082 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4083 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
4086 case nir_intrinsic_write_invocation_amd
:
4087 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4088 get_src(ctx
, instr
->src
[1]),
4089 get_src(ctx
, instr
->src
[2]));
4091 case nir_intrinsic_mbcnt_amd
:
4092 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4094 case nir_intrinsic_load_scratch
: {
4095 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4096 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4098 LLVMTypeRef comp_type
=
4099 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4100 LLVMTypeRef vec_type
=
4101 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4102 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4103 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4104 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4105 LLVMPointerType(vec_type
, addr_space
), "");
4106 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4109 case nir_intrinsic_store_scratch
: {
4110 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
4111 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4113 LLVMTypeRef comp_type
=
4114 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
4115 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4116 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4117 LLVMPointerType(comp_type
, addr_space
), "");
4118 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4119 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
4122 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
4124 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
4125 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
4126 LLVMTypeRef vec_type
=
4127 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
4128 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
4130 LLVMPointerType(vec_type
, addr_space
),
4132 LLVMValueRef offset_src
=
4133 ac_extract_components(&ctx
->ac
, src
, start
, count
);
4134 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
4138 case nir_intrinsic_load_constant
: {
4139 unsigned base
= nir_intrinsic_base(instr
);
4140 unsigned range
= nir_intrinsic_range(instr
);
4142 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4143 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
4144 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
4146 /* Clamp the offset to avoid out-of-bound access because global
4147 * instructions can't handle them.
4149 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
4150 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
4152 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
4154 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
4156 LLVMTypeRef comp_type
=
4157 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4158 LLVMTypeRef vec_type
=
4159 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4160 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4161 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4162 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4163 LLVMPointerType(vec_type
, addr_space
), "");
4164 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4168 fprintf(stderr
, "Unknown intrinsic: ");
4169 nir_print_instr(&instr
->instr
, stderr
);
4170 fprintf(stderr
, "\n");
4174 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4178 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
4179 unsigned base_index
,
4180 unsigned constant_index
,
4181 LLVMValueRef dynamic_index
)
4183 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
4184 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
4185 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
4187 /* Bindless uniforms are 64bit so multiple index by 8 */
4188 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
4189 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
4191 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
4193 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
4194 NULL
, 0, 0, true, true);
4196 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
4199 struct sampler_desc_address
{
4200 unsigned descriptor_set
;
4201 unsigned base_index
; /* binding in vulkan */
4202 unsigned constant_index
;
4203 LLVMValueRef dynamic_index
;
4208 static struct sampler_desc_address
4209 get_sampler_desc_internal(struct ac_nir_context
*ctx
,
4210 nir_deref_instr
*deref_instr
,
4211 const nir_instr
*instr
,
4214 LLVMValueRef index
= NULL
;
4215 unsigned constant_index
= 0;
4216 unsigned descriptor_set
;
4217 unsigned base_index
;
4218 bool bindless
= false;
4223 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
4226 index
= get_src(ctx
, img_instr
->src
[0]);
4228 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
4229 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
4230 nir_tex_src_sampler_handle
);
4231 if (sampSrcIdx
!= -1) {
4234 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
4236 assert(tex_instr
&& !image
);
4237 base_index
= tex_instr
->sampler_index
;
4241 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
4242 if (deref_instr
->deref_type
== nir_deref_type_array
) {
4243 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
4247 if (nir_src_is_const(deref_instr
->arr
.index
)) {
4248 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
4250 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
4252 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4253 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4258 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4261 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4262 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
4263 unsigned sidx
= deref_instr
->strct
.index
;
4264 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4265 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
4267 unreachable("Unsupported deref type");
4270 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
4272 if (deref_instr
->var
->data
.bindless
) {
4273 /* For now just assert on unhandled variable types */
4274 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
4276 base_index
= deref_instr
->var
->data
.driver_location
;
4279 index
= index
? index
: ctx
->ac
.i32_0
;
4280 index
= get_bindless_index_from_uniform(ctx
, base_index
,
4281 constant_index
, index
);
4283 base_index
= deref_instr
->var
->data
.binding
;
4285 return (struct sampler_desc_address
) {
4286 .descriptor_set
= descriptor_set
,
4287 .base_index
= base_index
,
4288 .constant_index
= constant_index
,
4289 .dynamic_index
= index
,
4291 .bindless
= bindless
,
4295 /* Extract any possibly divergent index into a separate value that can be fed
4296 * into get_sampler_desc with the same arguments. */
4297 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
4298 nir_deref_instr
*deref_instr
,
4299 const nir_instr
*instr
,
4302 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4303 return addr
.dynamic_index
;
4306 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4307 nir_deref_instr
*deref_instr
,
4308 enum ac_descriptor_type desc_type
,
4309 const nir_instr
*instr
,
4311 bool image
, bool write
)
4313 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4314 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4315 addr
.descriptor_set
,
4317 addr
.constant_index
, index
,
4318 desc_type
, addr
.image
, write
, addr
.bindless
);
4321 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4324 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4325 * filtering manually. The driver sets img7 to a mask clearing
4326 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4327 * s_and_b32 samp0, samp0, img7
4330 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4332 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4333 LLVMValueRef res
, LLVMValueRef samp
)
4335 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4336 LLVMValueRef img7
, samp0
;
4338 if (ctx
->ac
.chip_class
>= GFX8
)
4341 img7
= LLVMBuildExtractElement(builder
, res
,
4342 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4343 samp0
= LLVMBuildExtractElement(builder
, samp
,
4344 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4345 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4346 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4347 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4350 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4351 nir_tex_instr
*instr
,
4352 struct waterfall_context
*wctx
,
4353 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4354 LLVMValueRef
*fmask_ptr
)
4356 nir_deref_instr
*texture_deref_instr
= NULL
;
4357 nir_deref_instr
*sampler_deref_instr
= NULL
;
4360 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4361 switch (instr
->src
[i
].src_type
) {
4362 case nir_tex_src_texture_deref
:
4363 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4365 case nir_tex_src_sampler_deref
:
4366 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4368 case nir_tex_src_plane
:
4369 plane
= nir_src_as_int(instr
->src
[i
].src
);
4376 LLVMValueRef texture_dynamic_index
= get_sampler_desc_index(ctx
, texture_deref_instr
,
4377 &instr
->instr
, false);
4378 if (!sampler_deref_instr
)
4379 sampler_deref_instr
= texture_deref_instr
;
4381 LLVMValueRef sampler_dynamic_index
= get_sampler_desc_index(ctx
, sampler_deref_instr
,
4382 &instr
->instr
, false);
4383 if (instr
->texture_non_uniform
)
4384 texture_dynamic_index
= enter_waterfall(ctx
, wctx
+ 0, texture_dynamic_index
, true);
4386 if (instr
->sampler_non_uniform
)
4387 sampler_dynamic_index
= enter_waterfall(ctx
, wctx
+ 1, sampler_dynamic_index
, true);
4389 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4392 assert(instr
->op
!= nir_texop_txf_ms
&&
4393 instr
->op
!= nir_texop_samples_identical
);
4394 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4396 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4399 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4400 /* The fragment mask is fetched from the compressed
4401 * multisampled surface.
4403 main_descriptor
= AC_DESC_FMASK
;
4406 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
,
4407 texture_dynamic_index
, false, false);
4410 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
,
4411 sampler_dynamic_index
, false, false);
4412 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4413 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4415 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4416 instr
->op
== nir_texop_samples_identical
))
4417 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
,
4418 &instr
->instr
, texture_dynamic_index
, false, false);
4421 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4424 coord
= ac_to_float(ctx
, coord
);
4425 coord
= ac_build_round(ctx
, coord
);
4426 coord
= ac_to_integer(ctx
, coord
);
4430 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4432 LLVMValueRef result
= NULL
;
4433 struct ac_image_args args
= { 0 };
4434 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4435 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4436 unsigned offset_src
= 0;
4437 struct waterfall_context wctx
[2] = {{{0}}};
4439 tex_fetch_ptrs(ctx
, instr
, wctx
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4441 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4442 switch (instr
->src
[i
].src_type
) {
4443 case nir_tex_src_coord
: {
4444 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4445 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4446 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4449 case nir_tex_src_projector
:
4451 case nir_tex_src_comparator
:
4452 if (instr
->is_shadow
) {
4453 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4454 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4457 case nir_tex_src_offset
:
4458 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4461 case nir_tex_src_bias
:
4462 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4464 case nir_tex_src_lod
: {
4465 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4466 args
.level_zero
= true;
4468 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4471 case nir_tex_src_ms_index
:
4472 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4474 case nir_tex_src_ms_mcs
:
4476 case nir_tex_src_ddx
:
4477 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4479 case nir_tex_src_ddy
:
4480 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4482 case nir_tex_src_min_lod
:
4483 args
.min_lod
= get_src(ctx
, instr
->src
[i
].src
);
4485 case nir_tex_src_texture_offset
:
4486 case nir_tex_src_sampler_offset
:
4487 case nir_tex_src_plane
:
4493 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4494 result
= get_buffer_size(ctx
, args
.resource
, true);
4498 if (instr
->op
== nir_texop_texture_samples
) {
4499 LLVMValueRef res
, samples
, is_msaa
;
4500 LLVMValueRef default_sample
;
4502 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4503 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4504 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4505 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4506 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4507 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4508 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4509 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4510 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4512 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4513 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4514 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4515 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4516 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4519 if (ctx
->abi
->robust_buffer_access
) {
4520 LLVMValueRef dword1
, is_null_descriptor
;
4522 /* Extract the second dword of the descriptor, if it's
4523 * all zero, then it's a null descriptor.
4525 dword1
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4526 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
4527 is_null_descriptor
=
4528 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, dword1
,
4529 LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
4531 LLVMBuildSelect(ctx
->ac
.builder
, is_null_descriptor
,
4532 ctx
->ac
.i32_0
, ctx
->ac
.i32_1
, "");
4534 default_sample
= ctx
->ac
.i32_1
;
4537 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4538 default_sample
, "");
4543 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4544 LLVMValueRef offset
[3], pack
;
4545 for (unsigned chan
= 0; chan
< 3; ++chan
)
4546 offset
[chan
] = ctx
->ac
.i32_0
;
4548 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4549 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4550 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4551 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4552 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4554 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4555 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4557 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4558 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4562 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4563 * OpenGL 4.5 spec says:
4565 * "If the texture’s internal format indicates a fixed-point
4566 * depth texture, then D_t and D_ref are clamped to the
4567 * range [0, 1]; otherwise no clamping is performed."
4569 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4570 * so the depth comparison value isn't clamped for Z16 and
4571 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4572 * an explicitly clamped 32-bit float format.
4575 ctx
->ac
.chip_class
>= GFX8
&&
4576 ctx
->ac
.chip_class
<= GFX9
&&
4577 ctx
->abi
->clamp_shadow_reference
) {
4578 LLVMValueRef upgraded
, clamped
;
4580 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4581 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4582 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4583 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4584 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4585 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4586 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4590 /* pack derivatives */
4592 int num_src_deriv_channels
, num_dest_deriv_channels
;
4593 switch (instr
->sampler_dim
) {
4594 case GLSL_SAMPLER_DIM_3D
:
4595 case GLSL_SAMPLER_DIM_CUBE
:
4596 num_src_deriv_channels
= 3;
4597 num_dest_deriv_channels
= 3;
4599 case GLSL_SAMPLER_DIM_2D
:
4601 num_src_deriv_channels
= 2;
4602 num_dest_deriv_channels
= 2;
4604 case GLSL_SAMPLER_DIM_1D
:
4605 num_src_deriv_channels
= 1;
4606 if (ctx
->ac
.chip_class
== GFX9
) {
4607 num_dest_deriv_channels
= 2;
4609 num_dest_deriv_channels
= 1;
4614 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4615 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4616 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4617 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4618 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4620 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4621 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4622 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4626 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4627 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4628 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4629 if (instr
->coord_components
== 3)
4630 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4631 ac_prepare_cube_coords(&ctx
->ac
,
4632 instr
->op
== nir_texop_txd
, instr
->is_array
,
4633 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4636 /* Texture coordinates fixups */
4637 if (instr
->coord_components
> 1 &&
4638 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4640 instr
->op
!= nir_texop_txf
) {
4641 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4644 if (instr
->coord_components
> 2 &&
4645 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4646 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4647 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4648 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4650 instr
->op
!= nir_texop_txf
&&
4651 instr
->op
!= nir_texop_txf_ms
&&
4652 instr
->op
!= nir_texop_fragment_fetch
&&
4653 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4654 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4657 if (ctx
->ac
.chip_class
== GFX9
&&
4658 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4659 instr
->op
!= nir_texop_lod
) {
4660 LLVMValueRef filler
;
4661 if (instr
->op
== nir_texop_txf
)
4662 filler
= ctx
->ac
.i32_0
;
4664 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4666 if (instr
->is_array
)
4667 args
.coords
[2] = args
.coords
[1];
4668 args
.coords
[1] = filler
;
4671 /* Pack sample index */
4672 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4673 instr
->op
== nir_texop_fragment_fetch
))
4674 args
.coords
[instr
->coord_components
] = sample_index
;
4676 if (instr
->op
== nir_texop_samples_identical
) {
4677 struct ac_image_args txf_args
= { 0 };
4678 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4680 txf_args
.dmask
= 0xf;
4681 txf_args
.resource
= fmask_ptr
;
4682 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4683 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4685 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4686 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4690 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4691 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4692 instr
->op
!= nir_texop_txs
&&
4693 instr
->op
!= nir_texop_fragment_fetch
&&
4694 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4695 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4696 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4697 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4698 instr
->is_array
? args
.coords
[2] : NULL
,
4699 args
.coords
[sample_chan
], fmask_ptr
);
4702 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4703 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4704 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4705 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4706 args
.coords
[i
] = LLVMBuildAdd(
4707 ctx
->ac
.builder
, args
.coords
[i
],
4708 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4713 /* DMASK was repurposed for GATHER4. 4 components are always
4714 * returned and DMASK works like a swizzle - it selects
4715 * the component to fetch. The only valid DMASK values are
4716 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4717 * (red,red,red,red) etc.) The ISA document doesn't mention
4721 if (instr
->op
== nir_texop_tg4
) {
4722 if (instr
->is_shadow
)
4725 args
.dmask
= 1 << instr
->component
;
4728 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4729 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4730 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4733 /* Adjust the number of coordinates because we only need (x,y) for 2D
4734 * multisampled images and (x,y,layer) for 2D multisampled layered
4735 * images or for multisampled input attachments.
4737 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4738 if (args
.dim
== ac_image_2dmsaa
) {
4739 args
.dim
= ac_image_2d
;
4741 assert(args
.dim
== ac_image_2darraymsaa
);
4742 args
.dim
= ac_image_2darray
;
4746 assert(instr
->dest
.is_ssa
);
4747 args
.d16
= instr
->dest
.ssa
.bit_size
== 16;
4749 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4751 if (instr
->op
== nir_texop_query_levels
)
4752 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4753 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4754 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4755 instr
->op
!= nir_texop_tg4
)
4756 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4757 else if (instr
->op
== nir_texop_txs
&&
4758 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4760 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4761 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4762 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4763 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4764 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4765 } else if (ctx
->ac
.chip_class
== GFX9
&&
4766 instr
->op
== nir_texop_txs
&&
4767 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4769 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4770 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4771 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4773 } else if (instr
->dest
.ssa
.num_components
!= 4)
4774 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4778 assert(instr
->dest
.is_ssa
);
4779 result
= ac_to_integer(&ctx
->ac
, result
);
4781 for (int i
= ARRAY_SIZE(wctx
); --i
>= 0;) {
4782 result
= exit_waterfall(ctx
, wctx
+ i
, result
);
4785 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4789 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4791 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4792 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4794 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4795 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4798 static void visit_post_phi(struct ac_nir_context
*ctx
,
4799 nir_phi_instr
*instr
,
4800 LLVMValueRef llvm_phi
)
4802 nir_foreach_phi_src(src
, instr
) {
4803 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4804 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4806 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4810 static void phi_post_pass(struct ac_nir_context
*ctx
)
4812 hash_table_foreach(ctx
->phis
, entry
) {
4813 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4814 (LLVMValueRef
)entry
->data
);
4819 static bool is_def_used_in_an_export(const nir_ssa_def
* def
) {
4820 nir_foreach_use(use_src
, def
) {
4821 if (use_src
->parent_instr
->type
== nir_instr_type_intrinsic
) {
4822 nir_intrinsic_instr
*instr
= nir_instr_as_intrinsic(use_src
->parent_instr
);
4823 if (instr
->intrinsic
== nir_intrinsic_store_deref
)
4825 } else if (use_src
->parent_instr
->type
== nir_instr_type_alu
) {
4826 nir_alu_instr
*instr
= nir_instr_as_alu(use_src
->parent_instr
);
4827 if (instr
->op
== nir_op_vec4
&&
4828 is_def_used_in_an_export(&instr
->dest
.dest
.ssa
)) {
4836 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4837 const nir_ssa_undef_instr
*instr
)
4839 unsigned num_components
= instr
->def
.num_components
;
4840 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4842 if (!ctx
->abi
->convert_undef_to_zero
|| is_def_used_in_an_export(&instr
->def
)) {
4845 if (num_components
== 1)
4846 undef
= LLVMGetUndef(type
);
4848 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4850 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4852 LLVMValueRef zero
= LLVMConstInt(type
, 0, false);
4853 if (num_components
> 1) {
4854 zero
= ac_build_gather_values_extended(
4855 &ctx
->ac
, &zero
, 4, 0, false, false);
4857 ctx
->ssa_defs
[instr
->def
.index
] = zero
;
4861 static void visit_jump(struct ac_llvm_context
*ctx
,
4862 const nir_jump_instr
*instr
)
4864 switch (instr
->type
) {
4865 case nir_jump_break
:
4866 ac_build_break(ctx
);
4868 case nir_jump_continue
:
4869 ac_build_continue(ctx
);
4872 fprintf(stderr
, "Unknown NIR jump instr: ");
4873 nir_print_instr(&instr
->instr
, stderr
);
4874 fprintf(stderr
, "\n");
4880 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4881 enum glsl_base_type type
)
4885 case GLSL_TYPE_UINT
:
4886 case GLSL_TYPE_BOOL
:
4887 case GLSL_TYPE_SUBROUTINE
:
4889 case GLSL_TYPE_INT8
:
4890 case GLSL_TYPE_UINT8
:
4892 case GLSL_TYPE_INT16
:
4893 case GLSL_TYPE_UINT16
:
4895 case GLSL_TYPE_FLOAT
:
4897 case GLSL_TYPE_FLOAT16
:
4899 case GLSL_TYPE_INT64
:
4900 case GLSL_TYPE_UINT64
:
4902 case GLSL_TYPE_DOUBLE
:
4905 unreachable("unknown GLSL type");
4910 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4911 const struct glsl_type
*type
)
4913 if (glsl_type_is_scalar(type
)) {
4914 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4917 if (glsl_type_is_vector(type
)) {
4918 return LLVMVectorType(
4919 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4920 glsl_get_vector_elements(type
));
4923 if (glsl_type_is_matrix(type
)) {
4924 return LLVMArrayType(
4925 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4926 glsl_get_matrix_columns(type
));
4929 if (glsl_type_is_array(type
)) {
4930 return LLVMArrayType(
4931 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4932 glsl_get_length(type
));
4935 assert(glsl_type_is_struct_or_ifc(type
));
4937 LLVMTypeRef member_types
[glsl_get_length(type
)];
4939 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4941 glsl_to_llvm_type(ac
,
4942 glsl_get_struct_field(type
, i
));
4945 return LLVMStructTypeInContext(ac
->context
, member_types
,
4946 glsl_get_length(type
), false);
4949 static void visit_deref(struct ac_nir_context
*ctx
,
4950 nir_deref_instr
*instr
)
4952 if (instr
->mode
!= nir_var_mem_shared
&&
4953 instr
->mode
!= nir_var_mem_global
)
4956 LLVMValueRef result
= NULL
;
4957 switch(instr
->deref_type
) {
4958 case nir_deref_type_var
: {
4959 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4960 result
= entry
->data
;
4963 case nir_deref_type_struct
:
4964 if (instr
->mode
== nir_var_mem_global
) {
4965 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4966 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4967 instr
->strct
.index
);
4968 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4969 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4971 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4972 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4975 case nir_deref_type_array
:
4976 if (instr
->mode
== nir_var_mem_global
) {
4977 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4978 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4980 if ((glsl_type_is_matrix(parent
->type
) &&
4981 glsl_matrix_type_is_row_major(parent
->type
)) ||
4982 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4983 stride
= type_scalar_size_bytes(parent
->type
);
4986 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4987 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4988 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4990 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4992 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4994 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4995 get_src(ctx
, instr
->arr
.index
));
4998 case nir_deref_type_ptr_as_array
:
4999 if (instr
->mode
== nir_var_mem_global
) {
5000 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
5002 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
5003 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
5004 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
5006 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
5008 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
5010 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
5011 get_src(ctx
, instr
->arr
.index
));
5014 case nir_deref_type_cast
: {
5015 result
= get_src(ctx
, instr
->parent
);
5017 /* We can't use the structs from LLVM because the shader
5018 * specifies its own offsets. */
5019 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
5020 if (instr
->mode
== nir_var_mem_shared
)
5021 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
5023 unsigned address_space
;
5025 switch(instr
->mode
) {
5026 case nir_var_mem_shared
:
5027 address_space
= AC_ADDR_SPACE_LDS
;
5029 case nir_var_mem_global
:
5030 address_space
= AC_ADDR_SPACE_GLOBAL
;
5033 unreachable("Unhandled address space");
5036 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
5038 if (LLVMTypeOf(result
) != type
) {
5039 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
5040 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
5043 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
5050 unreachable("Unhandled deref_instr deref type");
5053 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
5056 static void visit_cf_list(struct ac_nir_context
*ctx
,
5057 struct exec_list
*list
);
5059 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
5061 nir_foreach_instr(instr
, block
)
5063 switch (instr
->type
) {
5064 case nir_instr_type_alu
:
5065 visit_alu(ctx
, nir_instr_as_alu(instr
));
5067 case nir_instr_type_load_const
:
5068 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
5070 case nir_instr_type_intrinsic
:
5071 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
5073 case nir_instr_type_tex
:
5074 visit_tex(ctx
, nir_instr_as_tex(instr
));
5076 case nir_instr_type_phi
:
5077 visit_phi(ctx
, nir_instr_as_phi(instr
));
5079 case nir_instr_type_ssa_undef
:
5080 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
5082 case nir_instr_type_jump
:
5083 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
5085 case nir_instr_type_deref
:
5086 visit_deref(ctx
, nir_instr_as_deref(instr
));
5089 fprintf(stderr
, "Unknown NIR instr type: ");
5090 nir_print_instr(instr
, stderr
);
5091 fprintf(stderr
, "\n");
5096 _mesa_hash_table_insert(ctx
->defs
, block
,
5097 LLVMGetInsertBlock(ctx
->ac
.builder
));
5100 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
5102 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
5104 nir_block
*then_block
=
5105 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
5107 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
5109 visit_cf_list(ctx
, &if_stmt
->then_list
);
5111 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
5112 nir_block
*else_block
=
5113 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5115 ac_build_else(&ctx
->ac
, else_block
->index
);
5116 visit_cf_list(ctx
, &if_stmt
->else_list
);
5119 ac_build_endif(&ctx
->ac
, then_block
->index
);
5122 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5124 nir_block
*first_loop_block
=
5125 (nir_block
*) exec_list_get_head(&loop
->body
);
5127 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5129 visit_cf_list(ctx
, &loop
->body
);
5131 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5134 static void visit_cf_list(struct ac_nir_context
*ctx
,
5135 struct exec_list
*list
)
5137 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5139 switch (node
->type
) {
5140 case nir_cf_node_block
:
5141 visit_block(ctx
, nir_cf_node_as_block(node
));
5144 case nir_cf_node_if
:
5145 visit_if(ctx
, nir_cf_node_as_if(node
));
5148 case nir_cf_node_loop
:
5149 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5159 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
5160 struct ac_shader_abi
*abi
,
5161 struct nir_shader
*nir
,
5162 struct nir_variable
*variable
,
5163 gl_shader_stage stage
)
5165 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5166 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5168 /* tess ctrl has it's own load/store paths for outputs */
5169 if (stage
== MESA_SHADER_TESS_CTRL
)
5172 if (stage
== MESA_SHADER_VERTEX
||
5173 stage
== MESA_SHADER_TESS_EVAL
||
5174 stage
== MESA_SHADER_GEOMETRY
) {
5175 int idx
= variable
->data
.location
+ variable
->data
.index
;
5176 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5177 int length
= nir
->info
.clip_distance_array_size
+
5178 nir
->info
.cull_distance_array_size
;
5187 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
5188 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
5189 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5190 for (unsigned chan
= 0; chan
< 4; chan
++) {
5191 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5192 ac_build_alloca_undef(ctx
, type
, "");
5198 setup_locals(struct ac_nir_context
*ctx
,
5199 struct nir_function
*func
)
5202 ctx
->num_locals
= 0;
5203 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5204 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5205 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5206 variable
->data
.location_frac
= 0;
5207 ctx
->num_locals
+= attrib_count
;
5209 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5213 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5214 for (j
= 0; j
< 4; j
++) {
5215 ctx
->locals
[i
* 4 + j
] =
5216 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5222 setup_scratch(struct ac_nir_context
*ctx
,
5223 struct nir_shader
*shader
)
5225 if (shader
->scratch_size
== 0)
5228 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
5229 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
5234 setup_constant_data(struct ac_nir_context
*ctx
,
5235 struct nir_shader
*shader
)
5237 if (!shader
->constant_data
)
5241 LLVMConstStringInContext(ctx
->ac
.context
,
5242 shader
->constant_data
,
5243 shader
->constant_data_size
,
5245 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
5247 /* We want to put the constant data in the CONST address space so that
5248 * we can use scalar loads. However, LLVM versions before 10 put these
5249 * variables in the same section as the code, which is unacceptable
5250 * for RadeonSI as it needs to relocate all the data sections after
5251 * the code sections. See https://reviews.llvm.org/D65813.
5253 unsigned address_space
=
5254 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
5256 LLVMValueRef global
=
5257 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5261 LLVMSetInitializer(global
, data
);
5262 LLVMSetGlobalConstant(global
, true);
5263 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
5264 ctx
->constant_data
= global
;
5268 setup_shared(struct ac_nir_context
*ctx
,
5269 struct nir_shader
*nir
)
5274 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
5275 nir
->info
.cs
.shared_size
);
5278 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5281 LLVMSetAlignment(lds
, 64 * 1024);
5283 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
5284 LLVMPointerType(ctx
->ac
.i8
,
5285 AC_ADDR_SPACE_LDS
), "");
5288 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
5289 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
5291 struct ac_nir_context ctx
= {};
5292 struct nir_function
*func
;
5298 ctx
.stage
= nir
->info
.stage
;
5299 ctx
.info
= &nir
->info
;
5301 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
5303 nir_foreach_variable(variable
, &nir
->outputs
)
5304 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
5307 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5308 _mesa_key_pointer_equal
);
5309 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5310 _mesa_key_pointer_equal
);
5311 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5312 _mesa_key_pointer_equal
);
5314 if (ctx
.abi
->kill_ps_if_inf_interp
)
5315 ctx
.verified_interp
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5316 _mesa_key_pointer_equal
);
5318 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
5320 nir_index_ssa_defs(func
->impl
);
5321 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
5323 setup_locals(&ctx
, func
);
5324 setup_scratch(&ctx
, nir
);
5325 setup_constant_data(&ctx
, nir
);
5327 if (gl_shader_stage_is_compute(nir
->info
.stage
))
5328 setup_shared(&ctx
, nir
);
5330 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& nir
->info
.fs
.uses_demote
) {
5331 ctx
.ac
.postponed_kill
= ac_build_alloca_undef(&ctx
.ac
, ac
->i1
, "");
5332 /* true = don't kill. */
5333 LLVMBuildStore(ctx
.ac
.builder
, ctx
.ac
.i1true
, ctx
.ac
.postponed_kill
);
5336 visit_cf_list(&ctx
, &func
->impl
->body
);
5337 phi_post_pass(&ctx
);
5339 if (ctx
.ac
.postponed_kill
)
5340 ac_build_kill_if_false(&ctx
.ac
, LLVMBuildLoad(ctx
.ac
.builder
,
5341 ctx
.ac
.postponed_kill
, ""));
5343 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
5344 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
5349 ralloc_free(ctx
.defs
);
5350 ralloc_free(ctx
.phis
);
5351 ralloc_free(ctx
.vars
);
5352 if (ctx
.abi
->kill_ps_if_inf_interp
)
5353 ralloc_free(ctx
.verified_interp
);
5357 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
5359 bool progress
= false;
5361 /* Lower large variables to scratch first so that we won't bloat the
5362 * shader by generating large if ladders for them. We later lower
5363 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
5365 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
5366 nir_var_function_temp
,
5368 glsl_get_natural_size_align_bytes
);
5370 /* While it would be nice not to have this flag, we are constrained
5371 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
5373 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
5375 /* TODO: Indirect indexing of GS inputs is unimplemented.
5377 * TCS and TES load inputs directly from LDS or offchip memory, so
5378 * indirect indexing is trivial.
5380 nir_variable_mode indirect_mask
= 0;
5381 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
5382 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
5383 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
5384 !llvm_has_working_vgpr_indexing
)) {
5385 indirect_mask
|= nir_var_shader_in
;
5387 if (!llvm_has_working_vgpr_indexing
&&
5388 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
5389 indirect_mask
|= nir_var_shader_out
;
5391 /* TODO: We shouldn't need to do this, however LLVM isn't currently
5392 * smart enough to handle indirects without causing excess spilling
5393 * causing the gpu to hang.
5395 * See the following thread for more details of the problem:
5396 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
5398 indirect_mask
|= nir_var_function_temp
;
5400 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
5405 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
5407 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
5411 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
5413 if (var
->data
.mode
!= nir_var_shader_out
)
5416 unsigned writemask
= 0;
5417 const int location
= var
->data
.location
;
5418 unsigned first_component
= var
->data
.location_frac
;
5419 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
5421 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
5422 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
5423 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
5424 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
5430 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
5431 unsigned *cond_block_tf_writemask
,
5432 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
5434 switch (cf_node
->type
) {
5435 case nir_cf_node_block
: {
5436 nir_block
*block
= nir_cf_node_as_block(cf_node
);
5437 nir_foreach_instr(instr
, block
) {
5438 if (instr
->type
!= nir_instr_type_intrinsic
)
5441 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
5442 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
5444 /* If we find a barrier in nested control flow put this in the
5445 * too hard basket. In GLSL this is not possible but it is in
5449 *tessfactors_are_def_in_all_invocs
= false;
5453 /* The following case must be prevented:
5454 * gl_TessLevelInner = ...;
5456 * if (gl_InvocationID == 1)
5457 * gl_TessLevelInner = ...;
5459 * If you consider disjoint code segments separated by barriers, each
5460 * such segment that writes tess factor channels should write the same
5461 * channels in all codepaths within that segment.
5463 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5464 /* Accumulate the result: */
5465 *tessfactors_are_def_in_all_invocs
&=
5466 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5468 /* Analyze the next code segment from scratch. */
5469 *upper_block_tf_writemask
= 0;
5470 *cond_block_tf_writemask
= 0;
5473 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5478 case nir_cf_node_if
: {
5479 unsigned then_tessfactor_writemask
= 0;
5480 unsigned else_tessfactor_writemask
= 0;
5482 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5483 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5484 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5485 cond_block_tf_writemask
,
5486 tessfactors_are_def_in_all_invocs
, true);
5489 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5490 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5491 cond_block_tf_writemask
,
5492 tessfactors_are_def_in_all_invocs
, true);
5495 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5496 /* If both statements write the same tess factor channels,
5497 * we can say that the upper block writes them too.
5499 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5500 else_tessfactor_writemask
;
5501 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5502 else_tessfactor_writemask
;
5507 case nir_cf_node_loop
: {
5508 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5509 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5510 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5511 cond_block_tf_writemask
,
5512 tessfactors_are_def_in_all_invocs
, true);
5518 unreachable("unknown cf node type");
5523 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5525 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5527 /* The pass works as follows:
5528 * If all codepaths write tess factors, we can say that all
5529 * invocations define tess factors.
5531 * Each tess factor channel is tracked separately.
5533 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5534 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5536 /* Initial value = true. Here the pass will accumulate results from
5537 * multiple segments surrounded by barriers. If tess factors aren't
5538 * written at all, it's a shader bug and we don't care if this will be
5541 bool tessfactors_are_def_in_all_invocs
= true;
5543 nir_foreach_function(function
, nir
) {
5544 if (function
->impl
) {
5545 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5546 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5547 &cond_block_tf_writemask
,
5548 &tessfactors_are_def_in_all_invocs
,
5554 /* Accumulate the result for the last code segment separated by a
5557 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5558 tessfactors_are_def_in_all_invocs
&=
5559 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5562 return tessfactors_are_def_in_all_invocs
;