2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
54 struct hash_table
*verified_interp
;
56 LLVMValueRef main_function
;
57 LLVMBasicBlockRef continue_block
;
58 LLVMBasicBlockRef break_block
;
64 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
65 nir_deref_instr
*deref_instr
,
66 const nir_instr
*instr
,
69 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
70 nir_deref_instr
*deref_instr
,
71 enum ac_descriptor_type desc_type
,
72 const nir_instr
*instr
,
74 bool image
, bool write
);
77 build_store_values_extended(struct ac_llvm_context
*ac
,
80 unsigned value_stride
,
83 LLVMBuilderRef builder
= ac
->builder
;
86 for (i
= 0; i
< value_count
; i
++) {
87 LLVMValueRef ptr
= values
[i
* value_stride
];
88 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
89 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
90 LLVMBuildStore(builder
, value
, ptr
);
94 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
95 const nir_ssa_def
*def
)
97 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
98 if (def
->num_components
> 1) {
99 type
= LLVMVectorType(type
, def
->num_components
);
104 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
107 return nir
->ssa_defs
[src
.ssa
->index
];
111 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
113 LLVMValueRef ptr
= get_src(ctx
, src
);
114 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
115 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
117 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
119 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
120 LLVMPointerType(type
, addr_space
), "");
123 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
124 const struct nir_block
*b
)
126 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
127 return (LLVMBasicBlockRef
)entry
->data
;
130 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
132 unsigned num_components
)
134 LLVMValueRef value
= get_src(ctx
, src
.src
);
135 bool need_swizzle
= false;
138 unsigned src_components
= ac_get_llvm_num_components(value
);
139 for (unsigned i
= 0; i
< num_components
; ++i
) {
140 assert(src
.swizzle
[i
] < src_components
);
141 if (src
.swizzle
[i
] != i
)
145 if (need_swizzle
|| num_components
!= src_components
) {
146 LLVMValueRef masks
[] = {
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
150 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
152 if (src_components
> 1 && num_components
== 1) {
153 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
155 } else if (src_components
== 1 && num_components
> 1) {
156 LLVMValueRef values
[] = {value
, value
, value
, value
};
157 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
159 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
160 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
169 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
170 LLVMIntPredicate pred
, LLVMValueRef src0
,
173 LLVMTypeRef src0_type
= LLVMTypeOf(src0
);
174 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
176 if (LLVMGetTypeKind(src0_type
) == LLVMPointerTypeKind
&&
177 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
178 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src0_type
, "");
179 } else if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
180 LLVMGetTypeKind(src0_type
) != LLVMPointerTypeKind
) {
181 src0
= LLVMBuildIntToPtr(ctx
->builder
, src0
, src1_type
, "");
184 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
185 return LLVMBuildSelect(ctx
->builder
, result
,
186 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
190 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
191 LLVMRealPredicate pred
, LLVMValueRef src0
,
195 src0
= ac_to_float(ctx
, src0
);
196 src1
= ac_to_float(ctx
, src1
);
197 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
198 return LLVMBuildSelect(ctx
->builder
, result
,
199 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
203 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
205 LLVMTypeRef result_type
,
208 char name
[64], type
[64];
209 LLVMValueRef params
[] = {
210 ac_to_float(ctx
, src0
),
213 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
214 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
215 assert(length
< sizeof(name
));
216 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
219 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
221 LLVMTypeRef result_type
,
222 LLVMValueRef src0
, LLVMValueRef src1
)
224 char name
[64], type
[64];
225 LLVMValueRef params
[] = {
226 ac_to_float(ctx
, src0
),
227 ac_to_float(ctx
, src1
),
230 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
231 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
232 assert(length
< sizeof(name
));
233 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
236 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
238 LLVMTypeRef result_type
,
239 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
241 char name
[64], type
[64];
242 LLVMValueRef params
[] = {
243 ac_to_float(ctx
, src0
),
244 ac_to_float(ctx
, src1
),
245 ac_to_float(ctx
, src2
),
248 ac_build_type_name_for_intr(LLVMTypeOf(params
[0]), type
, sizeof(type
));
249 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.%s", intrin
, type
);
250 assert(length
< sizeof(name
));
251 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
254 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
255 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
257 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
258 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
260 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
262 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
263 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
264 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
265 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
266 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
267 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
270 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
272 return LLVMBuildSelect(ctx
->builder
, v
,
273 ac_to_integer_or_pointer(ctx
, src1
),
274 ac_to_integer_or_pointer(ctx
, src2
), "");
277 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
280 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
283 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
285 LLVMValueRef src0
, LLVMValueRef src1
)
287 LLVMTypeRef ret_type
;
288 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
290 LLVMValueRef params
[] = { src0
, src1
};
291 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
294 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
295 params
, 2, AC_FUNC_ATTR_READNONE
);
297 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
298 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
302 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
306 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
307 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
309 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
313 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
317 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
319 unreachable("Unsupported bit size.");
323 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
326 src0
= ac_to_float(ctx
, src0
);
327 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
328 return LLVMBuildSExt(ctx
->builder
,
329 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
333 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
337 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
341 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
343 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
347 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
349 unreachable("Unsupported bit size.");
353 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
356 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
357 return LLVMBuildSExt(ctx
->builder
,
358 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
362 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
366 LLVMValueRef cond
= NULL
;
368 src0
= ac_to_float(ctx
, src0
);
369 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
371 if (ctx
->chip_class
>= GFX8
) {
372 LLVMValueRef args
[2];
373 /* Check if the result is a denormal - and flush to 0 if so. */
375 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
376 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
379 /* need to convert back up to f32 */
380 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
382 if (ctx
->chip_class
>= GFX8
)
383 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
386 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
387 * so compare the result and flush to 0 if it's smaller.
389 LLVMValueRef temp
, cond2
;
390 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
391 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
392 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
394 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
395 temp
, ctx
->f32_0
, "");
396 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
397 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
402 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
403 LLVMValueRef src0
, LLVMValueRef src1
)
405 LLVMValueRef dst64
, result
;
406 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
407 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
409 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
410 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
411 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
415 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
416 LLVMValueRef src0
, LLVMValueRef src1
)
418 LLVMValueRef dst64
, result
;
419 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
420 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
422 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
423 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
424 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
428 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
429 LLVMValueRef bits
, LLVMValueRef offset
)
431 /* mask = ((1 << bits) - 1) << offset */
432 return LLVMBuildShl(ctx
->builder
,
433 LLVMBuildSub(ctx
->builder
,
434 LLVMBuildShl(ctx
->builder
,
441 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
442 LLVMValueRef mask
, LLVMValueRef insert
,
446 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
447 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
449 return LLVMBuildXor(ctx
->builder
, base
,
450 LLVMBuildAnd(ctx
->builder
, mask
,
451 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
454 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
456 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
457 LLVMValueRef args
[2]))
459 LLVMValueRef comp
[2];
461 src0
= ac_to_float(ctx
, src0
);
462 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
463 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
465 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
468 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
471 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
472 LLVMValueRef temps
[2], val
;
475 for (i
= 0; i
< 2; i
++) {
476 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
477 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
478 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
479 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
481 return ac_build_gather_values(ctx
, temps
, 2);
484 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
492 if (op
== nir_op_fddx_fine
)
493 mask
= AC_TID_MASK_LEFT
;
494 else if (op
== nir_op_fddy_fine
)
495 mask
= AC_TID_MASK_TOP
;
497 mask
= AC_TID_MASK_TOP_LEFT
;
499 /* for DDX we want to next X pixel, DDY next Y pixel. */
500 if (op
== nir_op_fddx_fine
||
501 op
== nir_op_fddx_coarse
||
507 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
511 struct waterfall_context
{
512 LLVMBasicBlockRef phi_bb
[2];
516 /* To deal with divergent descriptors we can create a loop that handles all
517 * lanes with the same descriptor on a given iteration (henceforth a
520 * These helper create the begin and end of the loop leaving the caller
521 * to implement the body.
524 * - ctx is the usal nir context
525 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
526 * - value is the possibly divergent value for which we built the loop
527 * - divergent is whether value is actually divergent. If false we just pass
530 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
531 struct waterfall_context
*wctx
,
532 LLVMValueRef value
, bool divergent
)
534 /* If the app claims the value is divergent but it is constant we can
535 * end up with a dynamic index of NULL. */
539 wctx
->use_waterfall
= divergent
;
543 ac_build_bgnloop(&ctx
->ac
, 6000);
545 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
547 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
548 scalar_value
, "uniform_active");
550 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
551 ac_build_ifcc(&ctx
->ac
, active
, 6001);
556 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
557 struct waterfall_context
*wctx
,
560 LLVMValueRef ret
= NULL
;
561 LLVMValueRef phi_src
[2];
562 LLVMValueRef cc_phi_src
[2] = {
563 LLVMConstInt(ctx
->ac
.i32
, 0, false),
564 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
567 if (!wctx
->use_waterfall
)
570 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
572 ac_build_endif(&ctx
->ac
, 6001);
575 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
578 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
582 * By using the optimization barrier on the exit decision, we decouple
583 * the operations from the break, and hence avoid LLVM hoisting the
584 * opteration into the break block.
586 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
587 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
589 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
590 ac_build_ifcc(&ctx
->ac
, active
, 6002);
591 ac_build_break(&ctx
->ac
);
592 ac_build_endif(&ctx
->ac
, 6002);
594 ac_build_endloop(&ctx
->ac
, 6000);
598 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
600 LLVMValueRef src
[4], result
= NULL
;
601 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
602 unsigned src_components
;
603 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
604 bool saved_inexact
= false;
607 saved_inexact
= ac_disable_inexact_math(ctx
->ac
.builder
);
609 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
616 case nir_op_pack_half_2x16
:
617 case nir_op_pack_snorm_2x16
:
618 case nir_op_pack_unorm_2x16
:
621 case nir_op_unpack_half_2x16
:
624 case nir_op_cube_face_coord
:
625 case nir_op_cube_face_index
:
629 src_components
= num_components
;
632 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
633 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
640 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
641 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
642 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
643 /* fneg will be optimized by backend compiler with sign
644 * bit removed via XOR. This is probably a LLVM bug.
646 result
= ac_build_canonicalize(&ctx
->ac
, result
,
647 instr
->dest
.dest
.ssa
.bit_size
);
651 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
654 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
657 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
660 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
661 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
662 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
665 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
666 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
667 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
670 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
673 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
676 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
679 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
682 /* lower_fmod only lower 16-bit and 32-bit fmod */
683 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
684 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
685 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
686 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
687 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
688 ac_to_float_type(&ctx
->ac
, def_type
), result
);
689 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
690 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
693 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
696 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
699 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
702 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
703 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
704 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
707 /* For doubles, we need precise division to pass GLCTS. */
708 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DEFAULT_OPENGL
&&
709 ac_get_type_size(def_type
) == 8) {
710 result
= LLVMBuildFDiv(ctx
->ac
.builder
, ctx
->ac
.f64_1
,
711 ac_to_float(&ctx
->ac
, src
[0]), "");
713 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
714 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
716 if (ctx
->abi
->clamp_div_by_zero
)
717 result
= ac_build_fmin(&ctx
->ac
, result
,
718 LLVMConstReal(ac_to_float_type(&ctx
->ac
, def_type
), FLT_MAX
));
721 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
724 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
727 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
730 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
731 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
732 LLVMTypeOf(src
[0]), "");
733 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
734 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
735 LLVMTypeOf(src
[0]), "");
736 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
739 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
740 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
741 LLVMTypeOf(src
[0]), "");
742 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
743 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
744 LLVMTypeOf(src
[0]), "");
745 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
748 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
749 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
750 LLVMTypeOf(src
[0]), "");
751 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
752 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
753 LLVMTypeOf(src
[0]), "");
754 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
757 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
760 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
763 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
766 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
769 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
772 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
775 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
778 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
781 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
784 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
787 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
788 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
789 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
790 /* fabs will be optimized by backend compiler with sign
791 * bit removed via AND.
793 result
= ac_build_canonicalize(&ctx
->ac
, result
,
794 instr
->dest
.dest
.ssa
.bit_size
);
798 result
= emit_iabs(&ctx
->ac
, src
[0]);
801 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
804 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
807 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
810 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
813 result
= ac_build_isign(&ctx
->ac
, src
[0],
814 instr
->dest
.dest
.ssa
.bit_size
);
817 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
818 result
= ac_build_fsign(&ctx
->ac
, src
[0],
819 instr
->dest
.dest
.ssa
.bit_size
);
822 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
823 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
826 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
827 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
830 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
831 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
833 case nir_op_fround_even
:
834 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
835 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
838 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
839 result
= ac_build_fract(&ctx
->ac
, src
[0],
840 instr
->dest
.dest
.ssa
.bit_size
);
843 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
844 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
847 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
848 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
851 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
852 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
855 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
856 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
859 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
860 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
863 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rsq",
864 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
865 if (ctx
->abi
->clamp_div_by_zero
)
866 result
= ac_build_fmin(&ctx
->ac
, result
,
867 LLVMConstReal(ac_to_float_type(&ctx
->ac
, def_type
), FLT_MAX
));
869 case nir_op_frexp_exp
:
870 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
871 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
872 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
873 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
874 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
877 case nir_op_frexp_sig
:
878 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
879 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
880 instr
->dest
.dest
.ssa
.bit_size
);
883 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
884 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
887 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
888 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
889 if (ctx
->ac
.chip_class
< GFX9
&&
890 instr
->dest
.dest
.ssa
.bit_size
== 32) {
891 /* Only pre-GFX9 chips do not flush denorms. */
892 result
= ac_build_canonicalize(&ctx
->ac
, result
,
893 instr
->dest
.dest
.ssa
.bit_size
);
897 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
898 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
899 if (ctx
->ac
.chip_class
< GFX9
&&
900 instr
->dest
.dest
.ssa
.bit_size
== 32) {
901 /* Only pre-GFX9 chips do not flush denorms. */
902 result
= ac_build_canonicalize(&ctx
->ac
, result
,
903 instr
->dest
.dest
.ssa
.bit_size
);
907 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
908 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
909 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
912 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
913 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
914 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
915 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
916 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
918 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
921 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
923 case nir_op_bitfield_select
:
924 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
927 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
930 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
932 case nir_op_bitfield_reverse
:
933 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
935 case nir_op_bit_count
:
936 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
941 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
942 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
943 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
949 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
950 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
956 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
957 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
962 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
967 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
969 case nir_op_f2f16_rtz
:
972 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
974 /* For OpenGL, we want fast packing with v_cvt_pkrtz_f16, but if we use it,
975 * all f32->f16 conversions have to round towards zero, because both scalar
976 * and vec2 down-conversions have to round equally.
978 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DEFAULT_OPENGL
||
979 instr
->op
== nir_op_f2f16_rtz
) {
980 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
982 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
983 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
985 /* Fast path conversion. This only works if NIR is vectorized
988 if (LLVMTypeOf(src
[0]) == ctx
->ac
.v2f32
) {
989 LLVMValueRef args
[] = {
990 ac_llvm_extract_elem(&ctx
->ac
, src
[0], 0),
991 ac_llvm_extract_elem(&ctx
->ac
, src
[0], 1),
993 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, args
);
997 assert(ac_get_llvm_num_components(src
[0]) == 1);
998 LLVMValueRef param
[2] = { src
[0], LLVMGetUndef(ctx
->ac
.f32
) };
999 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
1000 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
1002 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1003 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1005 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1008 case nir_op_f2f16_rtne
:
1011 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1012 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1013 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1015 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
1022 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1023 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
1025 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
1032 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
1033 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
1035 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
1037 case nir_op_b32csel
:
1038 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
1040 case nir_op_find_lsb
:
1041 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
1043 case nir_op_ufind_msb
:
1044 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
1046 case nir_op_ifind_msb
:
1047 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
1049 case nir_op_uadd_carry
:
1050 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
1052 case nir_op_usub_borrow
:
1053 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
1058 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1061 result
= emit_f2b(&ctx
->ac
, src
[0]);
1067 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1070 result
= emit_i2b(&ctx
->ac
, src
[0]);
1072 case nir_op_fquantize2f16
:
1073 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1075 case nir_op_umul_high
:
1076 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1078 case nir_op_imul_high
:
1079 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1081 case nir_op_pack_half_2x16
:
1082 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1084 case nir_op_pack_snorm_2x16
:
1085 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1087 case nir_op_pack_unorm_2x16
:
1088 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1090 case nir_op_unpack_half_2x16
:
1091 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1095 case nir_op_fddx_fine
:
1096 case nir_op_fddy_fine
:
1097 case nir_op_fddx_coarse
:
1098 case nir_op_fddy_coarse
:
1099 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1102 case nir_op_unpack_64_2x32_split_x
: {
1103 assert(ac_get_llvm_num_components(src
[0]) == 1);
1104 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1107 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1112 case nir_op_unpack_64_2x32_split_y
: {
1113 assert(ac_get_llvm_num_components(src
[0]) == 1);
1114 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1117 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1122 case nir_op_pack_64_2x32_split
: {
1123 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1124 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1128 case nir_op_pack_32_2x16_split
: {
1129 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1130 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1134 case nir_op_unpack_32_2x16_split_x
: {
1135 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1138 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1143 case nir_op_unpack_32_2x16_split_y
: {
1144 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1147 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1152 case nir_op_cube_face_coord
: {
1153 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1154 LLVMValueRef results
[2];
1156 for (unsigned chan
= 0; chan
< 3; chan
++)
1157 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1158 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1159 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1160 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1161 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1162 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1163 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1164 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1165 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1166 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1167 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1168 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1169 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1173 case nir_op_cube_face_index
: {
1174 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1176 for (unsigned chan
= 0; chan
< 3; chan
++)
1177 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1178 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1179 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1184 fprintf(stderr
, "Unknown NIR alu instr: ");
1185 nir_print_instr(&instr
->instr
, stderr
);
1186 fprintf(stderr
, "\n");
1191 assert(instr
->dest
.dest
.is_ssa
);
1192 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1193 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1197 ac_restore_inexact_math(ctx
->ac
.builder
, saved_inexact
);
1200 static void visit_load_const(struct ac_nir_context
*ctx
,
1201 const nir_load_const_instr
*instr
)
1203 LLVMValueRef values
[4], value
= NULL
;
1204 LLVMTypeRef element_type
=
1205 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1207 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1208 switch (instr
->def
.bit_size
) {
1210 values
[i
] = LLVMConstInt(element_type
,
1211 instr
->value
[i
].u8
, false);
1214 values
[i
] = LLVMConstInt(element_type
,
1215 instr
->value
[i
].u16
, false);
1218 values
[i
] = LLVMConstInt(element_type
,
1219 instr
->value
[i
].u32
, false);
1222 values
[i
] = LLVMConstInt(element_type
,
1223 instr
->value
[i
].u64
, false);
1227 "unsupported nir load_const bit_size: %d\n",
1228 instr
->def
.bit_size
);
1232 if (instr
->def
.num_components
> 1) {
1233 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1237 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1241 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1244 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1245 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1248 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1249 /* On GFX8, the descriptor contains the size in bytes,
1250 * but TXQ must return the size in elements.
1251 * The stride is always non-zero for resources using TXQ.
1253 LLVMValueRef stride
=
1254 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1256 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1257 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1258 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1259 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1261 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1266 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1267 * incorrectly forces nearest filtering if the texture format is integer.
1268 * The only effect it has on Gather4, which always returns 4 texels for
1269 * bilinear filtering, is that the final coordinates are off by 0.5 of
1272 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1273 * or (0.5 / size) from the normalized coordinates.
1275 * However, cube textures with 8_8_8_8 data formats require a different
1276 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1277 * precision in 32-bit data formats, so it needs to be applied dynamically at
1278 * runtime. In this case, return an i1 value that indicates whether the
1279 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1281 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1283 struct ac_image_args
*args
,
1284 const nir_tex_instr
*instr
)
1286 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1287 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1288 LLVMValueRef wa_8888
= NULL
;
1289 LLVMValueRef half_texel
[2];
1290 LLVMValueRef result
;
1292 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1294 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1295 LLVMValueRef formats
;
1296 LLVMValueRef data_format
;
1297 LLVMValueRef wa_formats
;
1299 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1301 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1302 LLVMConstInt(ctx
->i32
, 20, false), "");
1303 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1304 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1305 wa_8888
= LLVMBuildICmp(
1306 ctx
->builder
, LLVMIntEQ
, data_format
,
1307 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1310 uint32_t wa_num_format
=
1311 stype
== GLSL_TYPE_UINT
?
1312 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1313 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1314 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1315 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1317 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1318 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1320 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1321 args
->resource
= LLVMBuildInsertElement(
1322 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1325 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1327 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1329 struct ac_image_args resinfo
= {};
1330 LLVMBasicBlockRef bbs
[2];
1332 LLVMValueRef unnorm
= NULL
;
1333 LLVMValueRef default_offset
= ctx
->f32_0
;
1334 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1336 /* In vulkan, whether the sampler uses unnormalized
1337 * coordinates or not is a dynamic property of the
1338 * sampler. Hence, to figure out whether or not we
1339 * need to divide by the texture size, we need to test
1340 * the sampler at runtime. This tests the bit set by
1341 * radv_init_sampler().
1343 LLVMValueRef sampler0
=
1344 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1345 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1346 LLVMConstInt(ctx
->i32
, 15, false), "");
1347 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1348 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1349 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1352 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1353 if (wa_8888
|| unnorm
) {
1354 assert(!(wa_8888
&& unnorm
));
1355 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1356 /* Skip the texture size query entirely if we don't need it. */
1357 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1358 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1361 /* Query the texture size. */
1362 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1363 resinfo
.opcode
= ac_image_get_resinfo
;
1364 resinfo
.dmask
= 0xf;
1365 resinfo
.lod
= ctx
->i32_0
;
1366 resinfo
.resource
= args
->resource
;
1367 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1368 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1370 /* Compute -0.5 / size. */
1371 for (unsigned c
= 0; c
< 2; c
++) {
1373 LLVMBuildExtractElement(ctx
->builder
, size
,
1374 LLVMConstInt(ctx
->i32
, c
, 0), "");
1375 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1376 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1377 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1378 LLVMConstReal(ctx
->f32
, -0.5), "");
1381 if (wa_8888
|| unnorm
) {
1382 ac_build_endif(ctx
, 2000);
1384 for (unsigned c
= 0; c
< 2; c
++) {
1385 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1386 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1392 for (unsigned c
= 0; c
< 2; c
++) {
1394 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1395 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1398 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1399 result
= ac_build_image_opcode(ctx
, args
);
1401 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1402 LLVMValueRef tmp
, tmp2
;
1404 /* if the cube workaround is in place, f2i the result. */
1405 for (unsigned c
= 0; c
< 4; c
++) {
1406 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1407 if (stype
== GLSL_TYPE_UINT
)
1408 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1410 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1411 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1412 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1413 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1414 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1415 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1421 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1423 nir_deref_instr
*texture_deref_instr
= NULL
;
1425 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1426 switch (instr
->src
[i
].src_type
) {
1427 case nir_tex_src_texture_deref
:
1428 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1434 return texture_deref_instr
;
1437 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1438 const nir_tex_instr
*instr
,
1439 struct ac_image_args
*args
)
1441 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1442 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1444 assert(instr
->dest
.is_ssa
);
1445 return ac_build_buffer_load_format(&ctx
->ac
,
1449 util_last_bit(mask
),
1451 instr
->dest
.ssa
.bit_size
== 16);
1454 args
->opcode
= ac_image_sample
;
1456 switch (instr
->op
) {
1458 case nir_texop_txf_ms
:
1459 case nir_texop_samples_identical
:
1460 args
->opcode
= args
->level_zero
||
1461 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1462 ac_image_load
: ac_image_load_mip
;
1463 args
->level_zero
= false;
1466 case nir_texop_query_levels
:
1467 args
->opcode
= ac_image_get_resinfo
;
1469 args
->lod
= ctx
->ac
.i32_0
;
1470 args
->level_zero
= false;
1473 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1475 args
->level_zero
= true;
1479 args
->opcode
= ac_image_gather4
;
1480 if (!args
->lod
&& !args
->bias
)
1481 args
->level_zero
= true;
1484 args
->opcode
= ac_image_get_lod
;
1486 case nir_texop_fragment_fetch
:
1487 case nir_texop_fragment_mask_fetch
:
1488 args
->opcode
= ac_image_load
;
1489 args
->level_zero
= false;
1495 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1496 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1497 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1498 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1499 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1500 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1501 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1505 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1506 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1507 if ((args
->dim
== ac_image_2darray
||
1508 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1509 args
->coords
[1] = ctx
->ac
.i32_0
;
1513 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1514 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1515 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1516 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1517 /* Prevent texture instructions with implicit derivatives from being
1518 * sinked into branches. */
1519 switch (instr
->op
) {
1523 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1530 return ac_build_image_opcode(&ctx
->ac
, args
);
1533 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1534 nir_intrinsic_instr
*instr
)
1536 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1537 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1539 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1540 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1544 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1545 nir_intrinsic_instr
*instr
)
1547 LLVMValueRef ptr
, addr
;
1548 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1549 unsigned index
= nir_intrinsic_base(instr
);
1551 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1552 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1554 /* Load constant values from user SGPRS when possible, otherwise
1555 * fallback to the default path that loads directly from memory.
1557 if (LLVMIsConstant(src0
) &&
1558 instr
->dest
.ssa
.bit_size
== 32) {
1559 unsigned count
= instr
->dest
.ssa
.num_components
;
1560 unsigned offset
= index
;
1562 offset
+= LLVMConstIntGetZExtValue(src0
);
1565 offset
-= ctx
->args
->base_inline_push_consts
;
1567 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1568 if (offset
+ count
<= num_inline_push_consts
) {
1569 LLVMValueRef push_constants
[num_inline_push_consts
];
1570 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1571 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1572 ctx
->args
->inline_push_consts
[i
]);
1573 return ac_build_gather_values(&ctx
->ac
,
1574 push_constants
+ offset
,
1579 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1580 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1582 if (instr
->dest
.ssa
.bit_size
== 8) {
1583 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1584 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i8
, 4 * load_dwords
);
1585 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1586 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1588 LLVMValueRef params
[3];
1589 if (load_dwords
> 1) {
1590 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.v2i32
, "");
1591 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1592 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1594 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1595 params
[0] = ctx
->ac
.i32_0
;
1599 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1601 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1602 if (instr
->dest
.ssa
.num_components
> 1)
1603 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i8
, instr
->dest
.ssa
.num_components
), "");
1605 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1606 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1607 LLVMTypeRef vec_type
= LLVMVectorType(ctx
->ac
.i16
, 2 * load_dwords
);
1608 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1609 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1610 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1611 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1612 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1613 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1614 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1615 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1616 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1617 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1618 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1619 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1620 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1621 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1624 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1626 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1629 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1630 const nir_intrinsic_instr
*instr
)
1632 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1634 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1637 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1639 uint32_t new_mask
= 0;
1640 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1641 if (mask
& (1u << i
))
1642 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1646 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1647 unsigned start
, unsigned count
)
1649 LLVMValueRef mask
[] = {
1650 ctx
->i32_0
, ctx
->i32_1
,
1651 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1653 unsigned src_elements
= ac_get_llvm_num_components(src
);
1655 if (count
== src_elements
) {
1658 } else if (count
== 1) {
1659 assert(start
< src_elements
);
1660 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1662 assert(start
+ count
<= src_elements
);
1664 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1665 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1669 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1670 enum gl_access_qualifier access
,
1671 bool may_store_unaligned
,
1672 bool writeonly_memory
)
1674 unsigned cache_policy
= 0;
1676 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1677 * store opcodes not aligned to a dword are affected. The only way to
1678 * get unaligned stores is through shader images.
1680 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1681 /* If this is write-only, don't keep data in L1 to prevent
1682 * evicting L1 cache lines that may be needed by other
1686 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1687 cache_policy
|= ac_glc
;
1690 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1691 cache_policy
|= ac_slc
| ac_glc
;
1693 return cache_policy
;
1696 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1697 struct waterfall_context
*wctx
,
1698 const nir_intrinsic_instr
*instr
,
1701 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1702 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1705 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1706 nir_intrinsic_instr
*instr
)
1708 if (ctx
->ac
.postponed_kill
) {
1709 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1710 ctx
->ac
.postponed_kill
, "");
1711 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1714 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1715 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1716 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1717 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1718 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1719 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1721 struct waterfall_context wctx
;
1722 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1724 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1725 LLVMValueRef base_data
= src_data
;
1726 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1727 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1731 LLVMValueRef data
, offset
;
1732 LLVMTypeRef data_type
;
1734 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1736 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1737 * writes into a 2-element and a 1-element write. */
1739 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1740 writemask
|= 1 << (start
+ 2);
1743 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1745 /* we can only store 4 DWords at the same time.
1746 * can only happen for 64 Bit vectors. */
1747 if (num_bytes
> 16) {
1748 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1753 /* check alignment of 16 Bit stores */
1754 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1755 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1760 /* Due to alignment issues, split stores of 8-bit/16-bit
1763 if (ctx
->ac
.chip_class
== GFX6
&& count
> 1 && elem_size_bytes
< 4) {
1764 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1766 num_bytes
= elem_size_bytes
;
1769 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1771 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1772 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1774 if (num_bytes
== 1) {
1775 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1776 offset
, ctx
->ac
.i32_0
,
1778 } else if (num_bytes
== 2) {
1779 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1780 offset
, ctx
->ac
.i32_0
,
1783 int num_channels
= num_bytes
/ 4;
1785 switch (num_bytes
) {
1786 case 16: /* v4f32 */
1787 data_type
= ctx
->ac
.v4f32
;
1789 case 12: /* v3f32 */
1790 data_type
= ctx
->ac
.v3f32
;
1793 data_type
= ctx
->ac
.v2f32
;
1796 data_type
= ctx
->ac
.f32
;
1799 unreachable("Malformed vector store.");
1801 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1803 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1804 num_channels
, offset
,
1810 exit_waterfall(ctx
, &wctx
, NULL
);
1812 if (ctx
->ac
.postponed_kill
)
1813 ac_build_endif(&ctx
->ac
, 7000);
1816 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1817 LLVMValueRef descriptor
,
1818 LLVMValueRef offset
,
1819 LLVMValueRef compare
,
1820 LLVMValueRef exchange
)
1822 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1823 if (ctx
->abi
->robust_buffer_access
) {
1824 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1826 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1827 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1829 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1831 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1834 LLVMValueRef ptr_parts
[2] = {
1835 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1836 LLVMBuildAnd(ctx
->ac
.builder
,
1837 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1838 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1841 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1842 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1844 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1846 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1847 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1848 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1849 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1851 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1852 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1854 if (ctx
->abi
->robust_buffer_access
) {
1855 ac_build_endif(&ctx
->ac
, -1);
1857 LLVMBasicBlockRef incoming_blocks
[2] = {
1862 LLVMValueRef incoming_values
[2] = {
1863 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1866 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1867 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1874 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1875 nir_intrinsic_instr
*instr
)
1877 if (ctx
->ac
.postponed_kill
) {
1878 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1879 ctx
->ac
.postponed_kill
, "");
1880 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1883 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1885 char name
[64], type
[8];
1886 LLVMValueRef params
[6], descriptor
;
1887 LLVMValueRef result
;
1890 struct waterfall_context wctx
;
1891 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1893 switch (instr
->intrinsic
) {
1894 case nir_intrinsic_ssbo_atomic_add
:
1897 case nir_intrinsic_ssbo_atomic_imin
:
1900 case nir_intrinsic_ssbo_atomic_umin
:
1903 case nir_intrinsic_ssbo_atomic_imax
:
1906 case nir_intrinsic_ssbo_atomic_umax
:
1909 case nir_intrinsic_ssbo_atomic_and
:
1912 case nir_intrinsic_ssbo_atomic_or
:
1915 case nir_intrinsic_ssbo_atomic_xor
:
1918 case nir_intrinsic_ssbo_atomic_exchange
:
1921 case nir_intrinsic_ssbo_atomic_comp_swap
:
1928 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1932 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1933 return_type
== ctx
->ac
.i64
) {
1934 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1935 get_src(ctx
, instr
->src
[1]),
1936 get_src(ctx
, instr
->src
[2]),
1937 get_src(ctx
, instr
->src
[3]));
1939 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1940 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1942 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1943 params
[arg_count
++] = descriptor
;
1945 if (LLVM_VERSION_MAJOR
>= 9) {
1946 /* XXX: The new raw/struct atomic intrinsics are buggy with
1947 * LLVM 8, see r358579.
1949 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1950 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1951 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1953 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1954 snprintf(name
, sizeof(name
),
1955 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1957 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1958 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1959 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1961 assert(return_type
== ctx
->ac
.i32
);
1962 snprintf(name
, sizeof(name
),
1963 "llvm.amdgcn.buffer.atomic.%s", op
);
1966 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1970 result
= exit_waterfall(ctx
, &wctx
, result
);
1971 if (ctx
->ac
.postponed_kill
)
1972 ac_build_endif(&ctx
->ac
, 7001);
1976 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1977 nir_intrinsic_instr
*instr
)
1979 struct waterfall_context wctx
;
1980 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1982 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1983 int num_components
= instr
->num_components
;
1984 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1985 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1987 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1988 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1989 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1991 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1992 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1994 LLVMValueRef results
[4];
1995 for (int i
= 0; i
< num_components
;) {
1996 int num_elems
= num_components
- i
;
1997 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1999 if (num_elems
* elem_size_bytes
> 16)
2000 num_elems
= 16 / elem_size_bytes
;
2001 int load_bytes
= num_elems
* elem_size_bytes
;
2003 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
2007 if (load_bytes
== 1) {
2008 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
2014 } else if (load_bytes
== 2) {
2015 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
2022 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
2023 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2025 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2026 vindex
, offset
, immoffset
, 0,
2027 cache_policy
, can_speculate
, false);
2030 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2031 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2032 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2034 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2035 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2037 for (unsigned j
= 0; j
< num_elems
; j
++) {
2038 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2043 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2044 return exit_waterfall(ctx
, &wctx
, ret
);
2047 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2048 struct waterfall_context
*wctx
,
2049 const nir_intrinsic_instr
*instr
)
2051 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2052 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2055 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2056 nir_intrinsic_instr
*instr
)
2058 struct waterfall_context wctx
;
2059 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2062 LLVMValueRef rsrc
= rsrc_base
;
2063 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2064 int num_components
= instr
->num_components
;
2066 if (ctx
->abi
->load_ubo
)
2067 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2069 if (instr
->dest
.ssa
.bit_size
== 64)
2070 num_components
*= 2;
2072 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2073 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2074 LLVMValueRef results
[num_components
];
2075 for (unsigned i
= 0; i
< num_components
; ++i
) {
2076 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2079 if (load_bytes
== 1) {
2080 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2087 assert(load_bytes
== 2);
2088 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2096 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2098 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2099 NULL
, 0, 0, true, true);
2101 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2104 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2105 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2107 return exit_waterfall(ctx
, &wctx
, ret
);
2111 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2112 bool vs_in
, unsigned *vertex_index_out
,
2113 LLVMValueRef
*vertex_index_ref
,
2114 unsigned *const_out
, LLVMValueRef
*indir_out
)
2116 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2117 nir_deref_path path
;
2118 unsigned idx_lvl
= 1;
2120 nir_deref_path_init(&path
, instr
, NULL
);
2122 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2123 if (vertex_index_ref
) {
2124 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2125 if (vertex_index_out
)
2126 *vertex_index_out
= 0;
2128 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2133 uint32_t const_offset
= 0;
2134 LLVMValueRef offset
= NULL
;
2136 if (var
->data
.compact
) {
2137 assert(instr
->deref_type
== nir_deref_type_array
);
2138 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2142 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2143 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2144 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2145 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2147 for (unsigned i
= 0; i
< index
; i
++) {
2148 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2149 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2151 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2152 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2153 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2154 const_offset
+= size
*
2155 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2157 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2158 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2160 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2165 unreachable("Uhandled deref type in get_deref_instr_offset");
2169 nir_deref_path_finish(&path
);
2171 if (const_offset
&& offset
)
2172 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2173 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2176 *const_out
= const_offset
;
2177 *indir_out
= offset
;
2180 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2181 nir_intrinsic_instr
*instr
,
2184 LLVMValueRef result
;
2185 LLVMValueRef vertex_index
= NULL
;
2186 LLVMValueRef indir_index
= NULL
;
2187 unsigned const_index
= 0;
2189 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2191 unsigned location
= var
->data
.location
;
2192 unsigned driver_location
= var
->data
.driver_location
;
2193 const bool is_patch
= var
->data
.patch
||
2194 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2195 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2196 const bool is_compact
= var
->data
.compact
;
2198 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2199 false, NULL
, is_patch
? NULL
: &vertex_index
,
2200 &const_index
, &indir_index
);
2202 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2204 LLVMTypeRef src_component_type
;
2205 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2206 src_component_type
= LLVMGetElementType(dest_type
);
2208 src_component_type
= dest_type
;
2210 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2211 vertex_index
, indir_index
,
2212 const_index
, location
, driver_location
,
2213 var
->data
.location_frac
,
2214 instr
->num_components
,
2215 is_patch
, is_compact
, load_inputs
);
2216 if (instr
->dest
.ssa
.bit_size
== 16) {
2217 result
= ac_to_integer(&ctx
->ac
, result
);
2218 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2220 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2224 type_scalar_size_bytes(const struct glsl_type
*type
)
2226 assert(glsl_type_is_vector_or_scalar(type
) ||
2227 glsl_type_is_matrix(type
));
2228 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2231 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2232 nir_intrinsic_instr
*instr
)
2234 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2235 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2237 LLVMValueRef values
[8];
2239 int ve
= instr
->dest
.ssa
.num_components
;
2241 LLVMValueRef indir_index
;
2243 unsigned const_index
;
2244 unsigned stride
= 4;
2245 int mode
= deref
->mode
;
2248 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2249 var
->data
.mode
== nir_var_shader_in
;
2250 idx
= var
->data
.driver_location
;
2251 comp
= var
->data
.location_frac
;
2252 mode
= var
->data
.mode
;
2254 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2255 &const_index
, &indir_index
);
2257 if (var
->data
.compact
) {
2259 const_index
+= comp
;
2264 if (instr
->dest
.ssa
.bit_size
== 64 &&
2265 (deref
->mode
== nir_var_shader_in
||
2266 deref
->mode
== nir_var_shader_out
||
2267 deref
->mode
== nir_var_function_temp
))
2271 case nir_var_shader_in
:
2272 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2273 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2274 return load_tess_varyings(ctx
, instr
, true);
2277 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2278 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2279 LLVMValueRef indir_index
;
2280 unsigned const_index
, vertex_index
;
2281 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2282 &const_index
, &indir_index
);
2283 assert(indir_index
== NULL
);
2285 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2286 var
->data
.driver_location
,
2287 var
->data
.location_frac
,
2288 instr
->num_components
, vertex_index
, const_index
, type
);
2291 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2293 unsigned count
= glsl_count_attribute_slots(
2295 ctx
->stage
== MESA_SHADER_VERTEX
);
2297 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2298 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2299 stride
, false, true);
2301 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2305 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2308 case nir_var_function_temp
:
2309 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2311 unsigned count
= glsl_count_attribute_slots(
2314 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2315 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2316 stride
, true, true);
2318 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2322 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2326 case nir_var_shader_out
:
2327 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2328 return load_tess_varyings(ctx
, instr
, false);
2331 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2332 var
->data
.fb_fetch_output
&&
2333 ctx
->abi
->emit_fbfetch
)
2334 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2336 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2338 unsigned count
= glsl_count_attribute_slots(
2341 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2342 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2343 stride
, true, true);
2345 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2349 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2350 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2355 case nir_var_mem_global
: {
2356 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2357 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2358 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2359 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2360 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2361 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, result_type
) / 8;
2362 bool split_loads
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2364 if (stride
!= natural_stride
|| split_loads
) {
2365 if (LLVMGetTypeKind(result_type
) == LLVMVectorTypeKind
)
2366 result_type
= LLVMGetElementType(result_type
);
2368 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2369 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2370 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2372 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2373 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2374 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2375 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2377 if (nir_intrinsic_access(instr
) & (ACCESS_COHERENT
| ACCESS_VOLATILE
))
2378 LLVMSetOrdering(values
[i
], LLVMAtomicOrderingMonotonic
);
2380 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2382 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2383 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2384 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2385 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2387 if (nir_intrinsic_access(instr
) & (ACCESS_COHERENT
| ACCESS_VOLATILE
))
2388 LLVMSetOrdering(val
, LLVMAtomicOrderingMonotonic
);
2393 unreachable("unhandle variable mode");
2395 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2396 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2400 visit_store_var(struct ac_nir_context
*ctx
,
2401 nir_intrinsic_instr
*instr
)
2403 if (ctx
->ac
.postponed_kill
) {
2404 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2405 ctx
->ac
.postponed_kill
, "");
2406 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2409 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2410 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2412 LLVMValueRef temp_ptr
, value
;
2415 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2416 int writemask
= instr
->const_index
[0];
2417 LLVMValueRef indir_index
;
2418 unsigned const_index
;
2421 get_deref_offset(ctx
, deref
, false,
2422 NULL
, NULL
, &const_index
, &indir_index
);
2423 idx
= var
->data
.driver_location
;
2424 comp
= var
->data
.location_frac
;
2426 if (var
->data
.compact
) {
2427 const_index
+= comp
;
2432 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2433 (deref
->mode
== nir_var_shader_out
||
2434 deref
->mode
== nir_var_function_temp
)) {
2436 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2437 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2440 writemask
= widen_mask(writemask
, 2);
2443 writemask
= writemask
<< comp
;
2445 switch (deref
->mode
) {
2446 case nir_var_shader_out
:
2448 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2449 LLVMValueRef vertex_index
= NULL
;
2450 LLVMValueRef indir_index
= NULL
;
2451 unsigned const_index
= 0;
2452 const bool is_patch
= var
->data
.patch
||
2453 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2454 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2456 get_deref_offset(ctx
, deref
, false, NULL
,
2457 is_patch
? NULL
: &vertex_index
,
2458 &const_index
, &indir_index
);
2460 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2461 vertex_index
, indir_index
,
2462 const_index
, src
, writemask
);
2466 for (unsigned chan
= 0; chan
< 8; chan
++) {
2468 if (!(writemask
& (1 << chan
)))
2471 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2473 if (var
->data
.compact
)
2476 unsigned count
= glsl_count_attribute_slots(
2479 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2480 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2481 stride
, true, true);
2483 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2484 value
, indir_index
, "");
2485 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2486 count
, stride
, tmp_vec
);
2489 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2491 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2495 case nir_var_function_temp
:
2496 for (unsigned chan
= 0; chan
< 8; chan
++) {
2497 if (!(writemask
& (1 << chan
)))
2500 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2502 unsigned count
= glsl_count_attribute_slots(
2505 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2506 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2509 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2510 value
, indir_index
, "");
2511 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2514 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2516 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2521 case nir_var_mem_global
: {
2522 int writemask
= instr
->const_index
[0];
2523 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2524 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2526 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2527 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2528 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2529 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(val
)) / 8;
2530 bool split_stores
= ctx
->ac
.chip_class
== GFX6
&& elem_size_bytes
< 4;
2532 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2533 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2534 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2536 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2537 stride
== natural_stride
&& !split_stores
) {
2538 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2539 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2540 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2542 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2543 LLVMGetElementType(LLVMTypeOf(address
)), "");
2544 LLVMValueRef store
= LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2546 if (nir_intrinsic_access(instr
) & (ACCESS_COHERENT
| ACCESS_VOLATILE
))
2547 LLVMSetOrdering(store
, LLVMAtomicOrderingMonotonic
);
2549 LLVMTypeRef val_type
= LLVMTypeOf(val
);
2550 if (LLVMGetTypeKind(LLVMTypeOf(val
)) == LLVMVectorTypeKind
)
2551 val_type
= LLVMGetElementType(val_type
);
2553 LLVMTypeRef ptr_type
= LLVMPointerType(val_type
,
2554 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2555 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2556 for (unsigned chan
= 0; chan
< 4; chan
++) {
2557 if (!(writemask
& (1 << chan
)))
2560 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2562 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2563 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2565 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2566 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2567 LLVMValueRef store
= LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2569 if (nir_intrinsic_access(instr
) & (ACCESS_COHERENT
| ACCESS_VOLATILE
))
2570 LLVMSetOrdering(store
, LLVMAtomicOrderingMonotonic
);
2580 if (ctx
->ac
.postponed_kill
)
2581 ac_build_endif(&ctx
->ac
, 7002);
2584 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2587 case GLSL_SAMPLER_DIM_BUF
:
2589 case GLSL_SAMPLER_DIM_1D
:
2590 return array
? 2 : 1;
2591 case GLSL_SAMPLER_DIM_2D
:
2592 return array
? 3 : 2;
2593 case GLSL_SAMPLER_DIM_MS
:
2594 return array
? 4 : 3;
2595 case GLSL_SAMPLER_DIM_3D
:
2596 case GLSL_SAMPLER_DIM_CUBE
:
2598 case GLSL_SAMPLER_DIM_RECT
:
2599 case GLSL_SAMPLER_DIM_SUBPASS
:
2601 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2609 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2610 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2611 LLVMValueRef coord_z
,
2612 LLVMValueRef sample_index
,
2613 LLVMValueRef fmask_desc_ptr
)
2615 unsigned sample_chan
= coord_z
? 3 : 2;
2616 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2617 addr
[sample_chan
] = sample_index
;
2619 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2620 return addr
[sample_chan
];
2623 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2625 assert(instr
->src
[0].is_ssa
);
2626 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2629 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2630 const nir_intrinsic_instr
*instr
,
2631 LLVMValueRef dynamic_index
,
2632 enum ac_descriptor_type desc_type
,
2635 nir_deref_instr
*deref_instr
=
2636 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2637 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2639 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2642 static void get_image_coords(struct ac_nir_context
*ctx
,
2643 const nir_intrinsic_instr
*instr
,
2644 LLVMValueRef dynamic_desc_index
,
2645 struct ac_image_args
*args
,
2646 enum glsl_sampler_dim dim
,
2649 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2650 LLVMValueRef masks
[] = {
2651 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2652 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2654 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2657 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2658 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2659 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2660 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2661 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2662 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2663 count
= image_type_to_components_count(dim
, is_array
);
2665 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2666 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2667 LLVMValueRef fmask_load_address
[3];
2669 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2670 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2672 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2674 fmask_load_address
[2] = NULL
;
2676 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2677 fmask_load_address
[0],
2678 fmask_load_address
[1],
2679 fmask_load_address
[2],
2681 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2682 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2684 if (count
== 1 && !gfx9_1d
) {
2685 if (instr
->src
[1].ssa
->num_components
)
2686 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2688 args
->coords
[0] = src0
;
2693 for (chan
= 0; chan
< count
; ++chan
) {
2694 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2699 args
->coords
[2] = args
->coords
[1];
2700 args
->coords
[1] = ctx
->ac
.i32_0
;
2702 args
->coords
[1] = ctx
->ac
.i32_0
;
2705 if (ctx
->ac
.chip_class
== GFX9
&&
2706 dim
== GLSL_SAMPLER_DIM_2D
&&
2708 /* The hw can't bind a slice of a 3D image as a 2D
2709 * image, because it ignores BASE_ARRAY if the target
2710 * is 3D. The workaround is to read BASE_ARRAY and set
2711 * it as the 3rd address operand for all 2D images.
2713 LLVMValueRef first_layer
, const5
, mask
;
2715 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2716 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2717 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2718 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2720 args
->coords
[count
] = first_layer
;
2726 args
->coords
[count
] = sample_index
;
2732 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2733 const nir_intrinsic_instr
*instr
,
2734 LLVMValueRef dynamic_index
,
2735 bool write
, bool atomic
)
2737 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2738 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2739 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2740 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2741 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2743 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2744 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2745 elem_count
, stride
, "");
2747 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2748 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2753 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2754 struct waterfall_context
*wctx
,
2755 const nir_intrinsic_instr
*instr
)
2757 nir_deref_instr
*deref_instr
= NULL
;
2759 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2760 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2762 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2763 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2766 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,