2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
55 LLVMValueRef main_function
;
56 LLVMBasicBlockRef continue_block
;
57 LLVMBasicBlockRef break_block
;
63 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
64 nir_deref_instr
*deref_instr
,
65 const nir_instr
*instr
,
68 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
69 nir_deref_instr
*deref_instr
,
70 enum ac_descriptor_type desc_type
,
71 const nir_instr
*instr
,
73 bool image
, bool write
);
76 build_store_values_extended(struct ac_llvm_context
*ac
,
79 unsigned value_stride
,
82 LLVMBuilderRef builder
= ac
->builder
;
85 for (i
= 0; i
< value_count
; i
++) {
86 LLVMValueRef ptr
= values
[i
* value_stride
];
87 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
88 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
89 LLVMBuildStore(builder
, value
, ptr
);
93 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
94 const nir_ssa_def
*def
)
96 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
97 if (def
->num_components
> 1) {
98 type
= LLVMVectorType(type
, def
->num_components
);
103 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
106 return nir
->ssa_defs
[src
.ssa
->index
];
110 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
112 LLVMValueRef ptr
= get_src(ctx
, src
);
113 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
114 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
116 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
118 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
119 LLVMPointerType(type
, addr_space
), "");
122 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
123 const struct nir_block
*b
)
125 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
126 return (LLVMBasicBlockRef
)entry
->data
;
129 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
131 unsigned num_components
)
133 LLVMValueRef value
= get_src(ctx
, src
.src
);
134 bool need_swizzle
= false;
137 unsigned src_components
= ac_get_llvm_num_components(value
);
138 for (unsigned i
= 0; i
< num_components
; ++i
) {
139 assert(src
.swizzle
[i
] < src_components
);
140 if (src
.swizzle
[i
] != i
)
144 if (need_swizzle
|| num_components
!= src_components
) {
145 LLVMValueRef masks
[] = {
146 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
151 if (src_components
> 1 && num_components
== 1) {
152 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
154 } else if (src_components
== 1 && num_components
> 1) {
155 LLVMValueRef values
[] = {value
, value
, value
, value
};
156 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
158 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
159 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
168 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
169 LLVMIntPredicate pred
, LLVMValueRef src0
,
172 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
173 return LLVMBuildSelect(ctx
->builder
, result
,
174 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
178 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
179 LLVMRealPredicate pred
, LLVMValueRef src0
,
183 src0
= ac_to_float(ctx
, src0
);
184 src1
= ac_to_float(ctx
, src1
);
185 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
186 return LLVMBuildSelect(ctx
->builder
, result
,
187 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
191 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
193 LLVMTypeRef result_type
,
197 LLVMValueRef params
[] = {
198 ac_to_float(ctx
, src0
),
201 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
202 ac_get_elem_bits(ctx
, result_type
));
203 assert(length
< sizeof(name
));
204 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
207 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
209 LLVMTypeRef result_type
,
210 LLVMValueRef src0
, LLVMValueRef src1
)
213 LLVMValueRef params
[] = {
214 ac_to_float(ctx
, src0
),
215 ac_to_float(ctx
, src1
),
218 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
219 ac_get_elem_bits(ctx
, result_type
));
220 assert(length
< sizeof(name
));
221 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
224 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
226 LLVMTypeRef result_type
,
227 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
230 LLVMValueRef params
[] = {
231 ac_to_float(ctx
, src0
),
232 ac_to_float(ctx
, src1
),
233 ac_to_float(ctx
, src2
),
236 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
237 ac_get_elem_bits(ctx
, result_type
));
238 assert(length
< sizeof(name
));
239 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
242 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
243 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
245 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
246 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
248 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
250 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
251 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
252 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
253 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
254 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
255 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
258 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
260 return LLVMBuildSelect(ctx
->builder
, v
,
261 ac_to_integer_or_pointer(ctx
, src1
),
262 ac_to_integer_or_pointer(ctx
, src2
), "");
265 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
268 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
271 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
273 LLVMValueRef src0
, LLVMValueRef src1
)
275 LLVMTypeRef ret_type
;
276 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
278 LLVMValueRef params
[] = { src0
, src1
};
279 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
282 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
283 params
, 2, AC_FUNC_ATTR_READNONE
);
285 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
286 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
290 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
294 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
295 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
297 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
301 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
305 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
307 unreachable("Unsupported bit size.");
311 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
314 src0
= ac_to_float(ctx
, src0
);
315 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
316 return LLVMBuildSExt(ctx
->builder
,
317 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
321 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
325 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
329 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
331 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
335 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
337 unreachable("Unsupported bit size.");
341 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
344 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
345 return LLVMBuildSExt(ctx
->builder
,
346 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
350 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
354 LLVMValueRef cond
= NULL
;
356 src0
= ac_to_float(ctx
, src0
);
357 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
359 if (ctx
->chip_class
>= GFX8
) {
360 LLVMValueRef args
[2];
361 /* Check if the result is a denormal - and flush to 0 if so. */
363 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
364 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
367 /* need to convert back up to f32 */
368 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
370 if (ctx
->chip_class
>= GFX8
)
371 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
374 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
375 * so compare the result and flush to 0 if it's smaller.
377 LLVMValueRef temp
, cond2
;
378 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
379 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
380 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
382 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
383 temp
, ctx
->f32_0
, "");
384 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
385 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
390 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
391 LLVMValueRef src0
, LLVMValueRef src1
)
393 LLVMValueRef dst64
, result
;
394 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
395 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
397 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
398 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
399 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
403 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
404 LLVMValueRef src0
, LLVMValueRef src1
)
406 LLVMValueRef dst64
, result
;
407 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
408 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
410 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
411 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
412 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
416 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
417 LLVMValueRef bits
, LLVMValueRef offset
)
419 /* mask = ((1 << bits) - 1) << offset */
420 return LLVMBuildShl(ctx
->builder
,
421 LLVMBuildSub(ctx
->builder
,
422 LLVMBuildShl(ctx
->builder
,
429 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
430 LLVMValueRef mask
, LLVMValueRef insert
,
434 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
435 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
437 return LLVMBuildXor(ctx
->builder
, base
,
438 LLVMBuildAnd(ctx
->builder
, mask
,
439 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
442 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
444 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
445 LLVMValueRef args
[2]))
447 LLVMValueRef comp
[2];
449 src0
= ac_to_float(ctx
, src0
);
450 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
451 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
453 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
456 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
459 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
460 LLVMValueRef temps
[2], val
;
463 for (i
= 0; i
< 2; i
++) {
464 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
465 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
466 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
467 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
469 return ac_build_gather_values(ctx
, temps
, 2);
472 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
480 if (op
== nir_op_fddx_fine
)
481 mask
= AC_TID_MASK_LEFT
;
482 else if (op
== nir_op_fddy_fine
)
483 mask
= AC_TID_MASK_TOP
;
485 mask
= AC_TID_MASK_TOP_LEFT
;
487 /* for DDX we want to next X pixel, DDY next Y pixel. */
488 if (op
== nir_op_fddx_fine
||
489 op
== nir_op_fddx_coarse
||
495 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
499 struct waterfall_context
{
500 LLVMBasicBlockRef phi_bb
[2];
504 /* To deal with divergent descriptors we can create a loop that handles all
505 * lanes with the same descriptor on a given iteration (henceforth a
508 * These helper create the begin and end of the loop leaving the caller
509 * to implement the body.
512 * - ctx is the usal nir context
513 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
514 * - value is the possibly divergent value for which we built the loop
515 * - divergent is whether value is actually divergent. If false we just pass
518 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
519 struct waterfall_context
*wctx
,
520 LLVMValueRef value
, bool divergent
)
522 /* If the app claims the value is divergent but it is constant we can
523 * end up with a dynamic index of NULL. */
527 wctx
->use_waterfall
= divergent
;
531 ac_build_bgnloop(&ctx
->ac
, 6000);
533 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
535 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
536 scalar_value
, "uniform_active");
538 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
539 ac_build_ifcc(&ctx
->ac
, active
, 6001);
544 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
545 struct waterfall_context
*wctx
,
548 LLVMValueRef ret
= NULL
;
549 LLVMValueRef phi_src
[2];
550 LLVMValueRef cc_phi_src
[2] = {
551 LLVMConstInt(ctx
->ac
.i32
, 0, false),
552 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
555 if (!wctx
->use_waterfall
)
558 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
560 ac_build_endif(&ctx
->ac
, 6001);
563 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
566 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
570 * By using the optimization barrier on the exit decision, we decouple
571 * the operations from the break, and hence avoid LLVM hoisting the
572 * opteration into the break block.
574 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
575 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
577 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
578 ac_build_ifcc(&ctx
->ac
, active
, 6002);
579 ac_build_break(&ctx
->ac
);
580 ac_build_endif(&ctx
->ac
, 6002);
582 ac_build_endloop(&ctx
->ac
, 6000);
586 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
588 LLVMValueRef src
[4], result
= NULL
;
589 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
590 unsigned src_components
;
591 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
593 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
600 case nir_op_pack_half_2x16
:
601 case nir_op_pack_snorm_2x16
:
602 case nir_op_pack_unorm_2x16
:
605 case nir_op_unpack_half_2x16
:
608 case nir_op_cube_face_coord
:
609 case nir_op_cube_face_index
:
613 src_components
= num_components
;
616 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
617 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
624 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
625 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
626 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
627 /* fneg will be optimized by backend compiler with sign
628 * bit removed via XOR. This is probably a LLVM bug.
630 result
= ac_build_canonicalize(&ctx
->ac
, result
,
631 instr
->dest
.dest
.ssa
.bit_size
);
635 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
638 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
641 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
644 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
645 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
646 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
649 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
650 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
651 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
654 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
657 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
660 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
663 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
666 /* lower_fmod only lower 16-bit and 32-bit fmod */
667 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
668 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
669 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
670 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
671 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
672 ac_to_float_type(&ctx
->ac
, def_type
), result
);
673 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
674 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
677 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
680 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
683 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
686 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
687 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
688 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
691 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
692 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
695 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
698 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
701 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
704 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
705 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
706 LLVMTypeOf(src
[0]), "");
707 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
708 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
709 LLVMTypeOf(src
[0]), "");
710 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
713 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
714 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
715 LLVMTypeOf(src
[0]), "");
716 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
717 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
718 LLVMTypeOf(src
[0]), "");
719 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
722 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
723 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
724 LLVMTypeOf(src
[0]), "");
725 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
726 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
727 LLVMTypeOf(src
[0]), "");
728 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
731 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
734 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
737 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
740 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
743 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
746 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
749 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
752 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
755 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
758 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
761 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
762 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
763 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
764 /* fabs will be optimized by backend compiler with sign
765 * bit removed via AND.
767 result
= ac_build_canonicalize(&ctx
->ac
, result
,
768 instr
->dest
.dest
.ssa
.bit_size
);
772 result
= emit_iabs(&ctx
->ac
, src
[0]);
775 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
778 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
781 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
784 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
787 result
= ac_build_isign(&ctx
->ac
, src
[0],
788 instr
->dest
.dest
.ssa
.bit_size
);
791 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
792 result
= ac_build_fsign(&ctx
->ac
, src
[0],
793 instr
->dest
.dest
.ssa
.bit_size
);
796 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
797 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
800 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
801 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
804 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
805 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
807 case nir_op_fround_even
:
808 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
809 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
812 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
813 result
= ac_build_fract(&ctx
->ac
, src
[0],
814 instr
->dest
.dest
.ssa
.bit_size
);
817 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
818 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
821 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
822 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
825 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
826 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
829 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
830 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
833 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
834 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
837 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
838 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
839 result
= ac_build_fdiv(&ctx
->ac
, LLVMConstReal(LLVMTypeOf(result
), 1.0), result
);
841 case nir_op_frexp_exp
:
842 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
843 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
844 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
845 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
846 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
849 case nir_op_frexp_sig
:
850 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
851 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
852 instr
->dest
.dest
.ssa
.bit_size
);
855 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
856 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
859 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
860 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
861 if (ctx
->ac
.chip_class
< GFX9
&&
862 instr
->dest
.dest
.ssa
.bit_size
== 32) {
863 /* Only pre-GFX9 chips do not flush denorms. */
864 result
= ac_build_canonicalize(&ctx
->ac
, result
,
865 instr
->dest
.dest
.ssa
.bit_size
);
869 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
870 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
871 if (ctx
->ac
.chip_class
< GFX9
&&
872 instr
->dest
.dest
.ssa
.bit_size
== 32) {
873 /* Only pre-GFX9 chips do not flush denorms. */
874 result
= ac_build_canonicalize(&ctx
->ac
, result
,
875 instr
->dest
.dest
.ssa
.bit_size
);
879 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
880 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
881 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
884 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
885 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
886 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
887 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
888 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
890 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
893 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
895 case nir_op_bitfield_select
:
896 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
899 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
902 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
904 case nir_op_bitfield_reverse
:
905 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
907 case nir_op_bit_count
:
908 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
913 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
914 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
915 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
921 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
922 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
928 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
929 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
934 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
939 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
941 case nir_op_f2f16_rtz
:
942 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
943 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
944 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
945 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
946 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
947 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
949 case nir_op_f2f16_rtne
:
953 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
954 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
955 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
957 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
963 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
964 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
966 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
972 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
973 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
975 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
978 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
980 case nir_op_find_lsb
:
981 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
983 case nir_op_ufind_msb
:
984 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
986 case nir_op_ifind_msb
:
987 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
989 case nir_op_uadd_carry
:
990 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
992 case nir_op_usub_borrow
:
993 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
998 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1001 result
= emit_f2b(&ctx
->ac
, src
[0]);
1007 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1010 result
= emit_i2b(&ctx
->ac
, src
[0]);
1012 case nir_op_fquantize2f16
:
1013 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1015 case nir_op_umul_high
:
1016 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1018 case nir_op_imul_high
:
1019 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1021 case nir_op_pack_half_2x16
:
1022 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1024 case nir_op_pack_snorm_2x16
:
1025 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1027 case nir_op_pack_unorm_2x16
:
1028 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1030 case nir_op_unpack_half_2x16
:
1031 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1035 case nir_op_fddx_fine
:
1036 case nir_op_fddy_fine
:
1037 case nir_op_fddx_coarse
:
1038 case nir_op_fddy_coarse
:
1039 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1042 case nir_op_unpack_64_2x32_split_x
: {
1043 assert(ac_get_llvm_num_components(src
[0]) == 1);
1044 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1047 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1052 case nir_op_unpack_64_2x32_split_y
: {
1053 assert(ac_get_llvm_num_components(src
[0]) == 1);
1054 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1057 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1062 case nir_op_pack_64_2x32_split
: {
1063 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1064 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1068 case nir_op_pack_32_2x16_split
: {
1069 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1070 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1074 case nir_op_unpack_32_2x16_split_x
: {
1075 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1078 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1083 case nir_op_unpack_32_2x16_split_y
: {
1084 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1087 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1092 case nir_op_cube_face_coord
: {
1093 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1094 LLVMValueRef results
[2];
1096 for (unsigned chan
= 0; chan
< 3; chan
++)
1097 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1098 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1099 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1100 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1101 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1102 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1103 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1104 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1105 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1106 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1107 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1108 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1109 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1113 case nir_op_cube_face_index
: {
1114 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1116 for (unsigned chan
= 0; chan
< 3; chan
++)
1117 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1118 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1119 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1124 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1125 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1126 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1127 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1130 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1131 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1134 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1135 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1138 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1139 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1140 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1141 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1144 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1145 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1148 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1149 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1151 case nir_op_fmed3
: {
1152 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1153 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1154 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1155 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1156 instr
->dest
.dest
.ssa
.bit_size
);
1159 case nir_op_imed3
: {
1160 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1161 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1162 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1163 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1166 case nir_op_umed3
: {
1167 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1168 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1169 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1170 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1175 fprintf(stderr
, "Unknown NIR alu instr: ");
1176 nir_print_instr(&instr
->instr
, stderr
);
1177 fprintf(stderr
, "\n");
1182 assert(instr
->dest
.dest
.is_ssa
);
1183 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1184 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1188 static void visit_load_const(struct ac_nir_context
*ctx
,
1189 const nir_load_const_instr
*instr
)
1191 LLVMValueRef values
[4], value
= NULL
;
1192 LLVMTypeRef element_type
=
1193 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1195 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1196 switch (instr
->def
.bit_size
) {
1198 values
[i
] = LLVMConstInt(element_type
,
1199 instr
->value
[i
].u8
, false);
1202 values
[i
] = LLVMConstInt(element_type
,
1203 instr
->value
[i
].u16
, false);
1206 values
[i
] = LLVMConstInt(element_type
,
1207 instr
->value
[i
].u32
, false);
1210 values
[i
] = LLVMConstInt(element_type
,
1211 instr
->value
[i
].u64
, false);
1215 "unsupported nir load_const bit_size: %d\n",
1216 instr
->def
.bit_size
);
1220 if (instr
->def
.num_components
> 1) {
1221 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1225 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1229 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1232 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1233 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1236 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1237 /* On GFX8, the descriptor contains the size in bytes,
1238 * but TXQ must return the size in elements.
1239 * The stride is always non-zero for resources using TXQ.
1241 LLVMValueRef stride
=
1242 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1244 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1245 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1246 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1247 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1249 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1254 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1255 * incorrectly forces nearest filtering if the texture format is integer.
1256 * The only effect it has on Gather4, which always returns 4 texels for
1257 * bilinear filtering, is that the final coordinates are off by 0.5 of
1260 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1261 * or (0.5 / size) from the normalized coordinates.
1263 * However, cube textures with 8_8_8_8 data formats require a different
1264 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1265 * precision in 32-bit data formats, so it needs to be applied dynamically at
1266 * runtime. In this case, return an i1 value that indicates whether the
1267 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1269 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1271 struct ac_image_args
*args
,
1272 const nir_tex_instr
*instr
)
1274 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1275 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1276 LLVMValueRef wa_8888
= NULL
;
1277 LLVMValueRef half_texel
[2];
1278 LLVMValueRef result
;
1280 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1282 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1283 LLVMValueRef formats
;
1284 LLVMValueRef data_format
;
1285 LLVMValueRef wa_formats
;
1287 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1289 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1290 LLVMConstInt(ctx
->i32
, 20, false), "");
1291 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1292 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1293 wa_8888
= LLVMBuildICmp(
1294 ctx
->builder
, LLVMIntEQ
, data_format
,
1295 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1298 uint32_t wa_num_format
=
1299 stype
== GLSL_TYPE_UINT
?
1300 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1301 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1302 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1303 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1305 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1306 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1308 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1309 args
->resource
= LLVMBuildInsertElement(
1310 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1313 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1315 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1317 struct ac_image_args resinfo
= {};
1318 LLVMBasicBlockRef bbs
[2];
1320 LLVMValueRef unnorm
= NULL
;
1321 LLVMValueRef default_offset
= ctx
->f32_0
;
1322 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1324 /* In vulkan, whether the sampler uses unnormalized
1325 * coordinates or not is a dynamic property of the
1326 * sampler. Hence, to figure out whether or not we
1327 * need to divide by the texture size, we need to test
1328 * the sampler at runtime. This tests the bit set by
1329 * radv_init_sampler().
1331 LLVMValueRef sampler0
=
1332 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1333 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1334 LLVMConstInt(ctx
->i32
, 15, false), "");
1335 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1336 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1337 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1340 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1341 if (wa_8888
|| unnorm
) {
1342 assert(!(wa_8888
&& unnorm
));
1343 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1344 /* Skip the texture size query entirely if we don't need it. */
1345 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1346 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1349 /* Query the texture size. */
1350 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1351 resinfo
.opcode
= ac_image_get_resinfo
;
1352 resinfo
.dmask
= 0xf;
1353 resinfo
.lod
= ctx
->i32_0
;
1354 resinfo
.resource
= args
->resource
;
1355 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1356 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1358 /* Compute -0.5 / size. */
1359 for (unsigned c
= 0; c
< 2; c
++) {
1361 LLVMBuildExtractElement(ctx
->builder
, size
,
1362 LLVMConstInt(ctx
->i32
, c
, 0), "");
1363 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1364 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1365 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1366 LLVMConstReal(ctx
->f32
, -0.5), "");
1369 if (wa_8888
|| unnorm
) {
1370 ac_build_endif(ctx
, 2000);
1372 for (unsigned c
= 0; c
< 2; c
++) {
1373 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1374 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1380 for (unsigned c
= 0; c
< 2; c
++) {
1382 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1383 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1386 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1387 result
= ac_build_image_opcode(ctx
, args
);
1389 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1390 LLVMValueRef tmp
, tmp2
;
1392 /* if the cube workaround is in place, f2i the result. */
1393 for (unsigned c
= 0; c
< 4; c
++) {
1394 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1395 if (stype
== GLSL_TYPE_UINT
)
1396 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1398 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1399 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1400 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1401 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1402 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1403 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1409 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1411 nir_deref_instr
*texture_deref_instr
= NULL
;
1413 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1414 switch (instr
->src
[i
].src_type
) {
1415 case nir_tex_src_texture_deref
:
1416 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1422 return texture_deref_instr
;
1425 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1426 const nir_tex_instr
*instr
,
1427 struct ac_image_args
*args
)
1429 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1430 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1432 return ac_build_buffer_load_format(&ctx
->ac
,
1436 util_last_bit(mask
),
1440 args
->opcode
= ac_image_sample
;
1442 switch (instr
->op
) {
1444 case nir_texop_txf_ms
:
1445 case nir_texop_samples_identical
:
1446 args
->opcode
= args
->level_zero
||
1447 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1448 ac_image_load
: ac_image_load_mip
;
1449 args
->level_zero
= false;
1452 case nir_texop_query_levels
:
1453 args
->opcode
= ac_image_get_resinfo
;
1455 args
->lod
= ctx
->ac
.i32_0
;
1456 args
->level_zero
= false;
1459 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1461 args
->level_zero
= true;
1465 args
->opcode
= ac_image_gather4
;
1466 args
->level_zero
= true;
1469 args
->opcode
= ac_image_get_lod
;
1471 case nir_texop_fragment_fetch
:
1472 case nir_texop_fragment_mask_fetch
:
1473 args
->opcode
= ac_image_load
;
1474 args
->level_zero
= false;
1480 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1481 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1482 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1483 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1484 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1485 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1486 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1490 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1491 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1492 if ((args
->dim
== ac_image_2darray
||
1493 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1494 args
->coords
[1] = ctx
->ac
.i32_0
;
1498 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1499 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1500 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1501 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1502 /* Prevent texture instructions with implicit derivatives from being
1503 * sinked into branches. */
1504 switch (instr
->op
) {
1508 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1515 return ac_build_image_opcode(&ctx
->ac
, args
);
1518 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1519 nir_intrinsic_instr
*instr
)
1521 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1522 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1524 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1525 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1529 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1530 nir_intrinsic_instr
*instr
)
1532 LLVMValueRef ptr
, addr
;
1533 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1534 unsigned index
= nir_intrinsic_base(instr
);
1536 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1537 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1539 /* Load constant values from user SGPRS when possible, otherwise
1540 * fallback to the default path that loads directly from memory.
1542 if (LLVMIsConstant(src0
) &&
1543 instr
->dest
.ssa
.bit_size
== 32) {
1544 unsigned count
= instr
->dest
.ssa
.num_components
;
1545 unsigned offset
= index
;
1547 offset
+= LLVMConstIntGetZExtValue(src0
);
1550 offset
-= ctx
->args
->base_inline_push_consts
;
1552 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1553 if (offset
+ count
<= num_inline_push_consts
) {
1554 LLVMValueRef push_constants
[num_inline_push_consts
];
1555 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1556 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1557 ctx
->args
->inline_push_consts
[i
]);
1558 return ac_build_gather_values(&ctx
->ac
,
1559 push_constants
+ offset
,
1564 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1565 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1567 if (instr
->dest
.ssa
.bit_size
== 8) {
1568 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1569 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), 4 * load_dwords
);
1570 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1571 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1573 LLVMValueRef params
[3];
1574 if (load_dwords
> 1) {
1575 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i32
, 2), "");
1576 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1577 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1579 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1580 params
[0] = ctx
->ac
.i32_0
;
1584 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1586 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1587 if (instr
->dest
.ssa
.num_components
> 1)
1588 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), instr
->dest
.ssa
.num_components
), "");
1590 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1591 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1592 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt16TypeInContext(ctx
->ac
.context
), 2 * load_dwords
);
1593 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1594 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1595 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1596 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1597 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1598 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1599 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1600 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1601 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1602 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1603 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1604 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1605 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1606 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1609 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1611 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1614 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1615 const nir_intrinsic_instr
*instr
)
1617 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1619 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1622 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1624 uint32_t new_mask
= 0;
1625 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1626 if (mask
& (1u << i
))
1627 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1631 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1632 unsigned start
, unsigned count
)
1634 LLVMValueRef mask
[] = {
1635 ctx
->i32_0
, ctx
->i32_1
,
1636 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1638 unsigned src_elements
= ac_get_llvm_num_components(src
);
1640 if (count
== src_elements
) {
1643 } else if (count
== 1) {
1644 assert(start
< src_elements
);
1645 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1647 assert(start
+ count
<= src_elements
);
1649 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1650 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1654 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1655 enum gl_access_qualifier access
,
1656 bool may_store_unaligned
,
1657 bool writeonly_memory
)
1659 unsigned cache_policy
= 0;
1661 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1662 * store opcodes not aligned to a dword are affected. The only way to
1663 * get unaligned stores is through shader images.
1665 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1666 /* If this is write-only, don't keep data in L1 to prevent
1667 * evicting L1 cache lines that may be needed by other
1671 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1672 cache_policy
|= ac_glc
;
1675 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1676 cache_policy
|= ac_slc
;
1678 return cache_policy
;
1681 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1682 struct waterfall_context
*wctx
,
1683 const nir_intrinsic_instr
*instr
,
1686 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1687 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1690 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1691 nir_intrinsic_instr
*instr
)
1693 if (ctx
->ac
.postponed_kill
) {
1694 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1695 ctx
->ac
.postponed_kill
, "");
1696 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1699 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1700 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1701 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1702 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1703 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1704 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1706 struct waterfall_context wctx
;
1707 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1709 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1710 LLVMValueRef base_data
= src_data
;
1711 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1712 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1716 LLVMValueRef data
, offset
;
1717 LLVMTypeRef data_type
;
1719 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1721 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1722 * writes into a 2-element and a 1-element write. */
1724 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1725 writemask
|= 1 << (start
+ 2);
1728 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1730 /* we can only store 4 DWords at the same time.
1731 * can only happen for 64 Bit vectors. */
1732 if (num_bytes
> 16) {
1733 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1738 /* check alignment of 16 Bit stores */
1739 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1740 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1744 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1746 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1747 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1749 if (num_bytes
== 1) {
1750 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1751 offset
, ctx
->ac
.i32_0
,
1753 } else if (num_bytes
== 2) {
1754 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1755 offset
, ctx
->ac
.i32_0
,
1758 int num_channels
= num_bytes
/ 4;
1760 switch (num_bytes
) {
1761 case 16: /* v4f32 */
1762 data_type
= ctx
->ac
.v4f32
;
1764 case 12: /* v3f32 */
1765 data_type
= ctx
->ac
.v3f32
;
1768 data_type
= ctx
->ac
.v2f32
;
1771 data_type
= ctx
->ac
.f32
;
1774 unreachable("Malformed vector store.");
1776 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1778 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1779 num_channels
, offset
,
1785 exit_waterfall(ctx
, &wctx
, NULL
);
1787 if (ctx
->ac
.postponed_kill
)
1788 ac_build_endif(&ctx
->ac
, 7000);
1791 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1792 LLVMValueRef descriptor
,
1793 LLVMValueRef offset
,
1794 LLVMValueRef compare
,
1795 LLVMValueRef exchange
)
1797 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1798 if (ctx
->abi
->robust_buffer_access
) {
1799 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1801 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1802 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1804 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1806 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1809 LLVMValueRef ptr_parts
[2] = {
1810 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1811 LLVMBuildAnd(ctx
->ac
.builder
,
1812 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1813 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1816 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1817 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1819 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1821 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1822 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1823 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1824 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1826 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1827 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1829 if (ctx
->abi
->robust_buffer_access
) {
1830 ac_build_endif(&ctx
->ac
, -1);
1832 LLVMBasicBlockRef incoming_blocks
[2] = {
1837 LLVMValueRef incoming_values
[2] = {
1838 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1841 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1842 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1849 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1850 nir_intrinsic_instr
*instr
)
1852 if (ctx
->ac
.postponed_kill
) {
1853 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1854 ctx
->ac
.postponed_kill
, "");
1855 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1858 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1860 char name
[64], type
[8];
1861 LLVMValueRef params
[6], descriptor
;
1862 LLVMValueRef result
;
1865 struct waterfall_context wctx
;
1866 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1868 switch (instr
->intrinsic
) {
1869 case nir_intrinsic_ssbo_atomic_add
:
1872 case nir_intrinsic_ssbo_atomic_imin
:
1875 case nir_intrinsic_ssbo_atomic_umin
:
1878 case nir_intrinsic_ssbo_atomic_imax
:
1881 case nir_intrinsic_ssbo_atomic_umax
:
1884 case nir_intrinsic_ssbo_atomic_and
:
1887 case nir_intrinsic_ssbo_atomic_or
:
1890 case nir_intrinsic_ssbo_atomic_xor
:
1893 case nir_intrinsic_ssbo_atomic_exchange
:
1896 case nir_intrinsic_ssbo_atomic_comp_swap
:
1903 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1907 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1908 return_type
== ctx
->ac
.i64
) {
1909 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1910 get_src(ctx
, instr
->src
[1]),
1911 get_src(ctx
, instr
->src
[2]),
1912 get_src(ctx
, instr
->src
[3]));
1914 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1915 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1917 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1918 params
[arg_count
++] = descriptor
;
1920 if (LLVM_VERSION_MAJOR
>= 9) {
1921 /* XXX: The new raw/struct atomic intrinsics are buggy with
1922 * LLVM 8, see r358579.
1924 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1925 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1926 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1928 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1929 snprintf(name
, sizeof(name
),
1930 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1932 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1933 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1934 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1936 assert(return_type
== ctx
->ac
.i32
);
1937 snprintf(name
, sizeof(name
),
1938 "llvm.amdgcn.buffer.atomic.%s", op
);
1941 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1945 result
= exit_waterfall(ctx
, &wctx
, result
);
1946 if (ctx
->ac
.postponed_kill
)
1947 ac_build_endif(&ctx
->ac
, 7001);
1951 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1952 nir_intrinsic_instr
*instr
)
1954 struct waterfall_context wctx
;
1955 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1957 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1958 int num_components
= instr
->num_components
;
1959 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1960 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1962 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1963 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1964 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1966 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1967 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1969 LLVMValueRef results
[4];
1970 for (int i
= 0; i
< num_components
;) {
1971 int num_elems
= num_components
- i
;
1972 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1974 if (num_elems
* elem_size_bytes
> 16)
1975 num_elems
= 16 / elem_size_bytes
;
1976 int load_bytes
= num_elems
* elem_size_bytes
;
1978 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
1982 if (load_bytes
== 1) {
1983 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
1989 } else if (load_bytes
== 2) {
1990 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
1997 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
1998 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2000 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2001 vindex
, offset
, immoffset
, 0,
2002 cache_policy
, can_speculate
, false);
2005 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2006 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2007 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2009 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2010 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2012 for (unsigned j
= 0; j
< num_elems
; j
++) {
2013 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2018 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2019 return exit_waterfall(ctx
, &wctx
, ret
);
2022 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2023 struct waterfall_context
*wctx
,
2024 const nir_intrinsic_instr
*instr
)
2026 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2027 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2030 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2031 nir_intrinsic_instr
*instr
)
2033 struct waterfall_context wctx
;
2034 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2037 LLVMValueRef rsrc
= rsrc_base
;
2038 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2039 int num_components
= instr
->num_components
;
2041 if (ctx
->abi
->load_ubo
)
2042 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2044 if (instr
->dest
.ssa
.bit_size
== 64)
2045 num_components
*= 2;
2047 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2048 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2049 LLVMValueRef results
[num_components
];
2050 for (unsigned i
= 0; i
< num_components
; ++i
) {
2051 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2054 if (load_bytes
== 1) {
2055 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2062 assert(load_bytes
== 2);
2063 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2071 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2073 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2074 NULL
, 0, 0, true, true);
2076 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2079 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2080 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2082 return exit_waterfall(ctx
, &wctx
, ret
);
2086 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2087 bool vs_in
, unsigned *vertex_index_out
,
2088 LLVMValueRef
*vertex_index_ref
,
2089 unsigned *const_out
, LLVMValueRef
*indir_out
)
2091 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2092 nir_deref_path path
;
2093 unsigned idx_lvl
= 1;
2095 nir_deref_path_init(&path
, instr
, NULL
);
2097 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2098 if (vertex_index_ref
) {
2099 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2100 if (vertex_index_out
)
2101 *vertex_index_out
= 0;
2103 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2108 uint32_t const_offset
= 0;
2109 LLVMValueRef offset
= NULL
;
2111 if (var
->data
.compact
) {
2112 assert(instr
->deref_type
== nir_deref_type_array
);
2113 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2117 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2118 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2119 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2120 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2122 for (unsigned i
= 0; i
< index
; i
++) {
2123 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2124 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2126 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2127 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2128 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2129 const_offset
+= size
*
2130 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2132 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2133 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2135 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2140 unreachable("Uhandled deref type in get_deref_instr_offset");
2144 nir_deref_path_finish(&path
);
2146 if (const_offset
&& offset
)
2147 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2148 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2151 *const_out
= const_offset
;
2152 *indir_out
= offset
;
2155 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2156 nir_intrinsic_instr
*instr
,
2159 LLVMValueRef result
;
2160 LLVMValueRef vertex_index
= NULL
;
2161 LLVMValueRef indir_index
= NULL
;
2162 unsigned const_index
= 0;
2164 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2166 unsigned location
= var
->data
.location
;
2167 unsigned driver_location
= var
->data
.driver_location
;
2168 const bool is_patch
= var
->data
.patch
||
2169 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2170 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2171 const bool is_compact
= var
->data
.compact
;
2173 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2174 false, NULL
, is_patch
? NULL
: &vertex_index
,
2175 &const_index
, &indir_index
);
2177 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2179 LLVMTypeRef src_component_type
;
2180 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2181 src_component_type
= LLVMGetElementType(dest_type
);
2183 src_component_type
= dest_type
;
2185 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2186 vertex_index
, indir_index
,
2187 const_index
, location
, driver_location
,
2188 var
->data
.location_frac
,
2189 instr
->num_components
,
2190 is_patch
, is_compact
, load_inputs
);
2191 if (instr
->dest
.ssa
.bit_size
== 16) {
2192 result
= ac_to_integer(&ctx
->ac
, result
);
2193 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2195 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2199 type_scalar_size_bytes(const struct glsl_type
*type
)
2201 assert(glsl_type_is_vector_or_scalar(type
) ||
2202 glsl_type_is_matrix(type
));
2203 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2206 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2207 nir_intrinsic_instr
*instr
)
2209 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2210 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2212 LLVMValueRef values
[8];
2214 int ve
= instr
->dest
.ssa
.num_components
;
2216 LLVMValueRef indir_index
;
2218 unsigned const_index
;
2219 unsigned stride
= 4;
2220 int mode
= deref
->mode
;
2223 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2224 var
->data
.mode
== nir_var_shader_in
;
2225 idx
= var
->data
.driver_location
;
2226 comp
= var
->data
.location_frac
;
2227 mode
= var
->data
.mode
;
2229 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2230 &const_index
, &indir_index
);
2232 if (var
->data
.compact
) {
2234 const_index
+= comp
;
2239 if (instr
->dest
.ssa
.bit_size
== 64 &&
2240 (deref
->mode
== nir_var_shader_in
||
2241 deref
->mode
== nir_var_shader_out
||
2242 deref
->mode
== nir_var_function_temp
))
2246 case nir_var_shader_in
:
2247 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2248 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2249 return load_tess_varyings(ctx
, instr
, true);
2252 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2253 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2254 LLVMValueRef indir_index
;
2255 unsigned const_index
, vertex_index
;
2256 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2257 &const_index
, &indir_index
);
2258 assert(indir_index
== NULL
);
2260 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2261 var
->data
.driver_location
,
2262 var
->data
.location_frac
,
2263 instr
->num_components
, vertex_index
, const_index
, type
);
2266 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2268 unsigned count
= glsl_count_attribute_slots(
2270 ctx
->stage
== MESA_SHADER_VERTEX
);
2272 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2273 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2274 stride
, false, true);
2276 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2280 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2283 case nir_var_function_temp
:
2284 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2286 unsigned count
= glsl_count_attribute_slots(
2289 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2290 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2291 stride
, true, true);
2293 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2297 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2301 case nir_var_shader_out
:
2302 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2303 return load_tess_varyings(ctx
, instr
, false);
2306 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2307 var
->data
.fb_fetch_output
&&
2308 ctx
->abi
->emit_fbfetch
)
2309 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2311 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2313 unsigned count
= glsl_count_attribute_slots(
2316 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2317 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2318 stride
, true, true);
2320 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2324 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2325 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2330 case nir_var_mem_global
: {
2331 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2332 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2333 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2334 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2336 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2337 if (stride
!= natural_stride
) {
2338 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(result_type
),
2339 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2340 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2342 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2343 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2344 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2345 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2347 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2349 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2350 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2351 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2352 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2357 unreachable("unhandle variable mode");
2359 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2360 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2364 visit_store_var(struct ac_nir_context
*ctx
,
2365 nir_intrinsic_instr
*instr
)
2367 if (ctx
->ac
.postponed_kill
) {
2368 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2369 ctx
->ac
.postponed_kill
, "");
2370 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2373 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2374 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2376 LLVMValueRef temp_ptr
, value
;
2379 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2380 int writemask
= instr
->const_index
[0];
2381 LLVMValueRef indir_index
;
2382 unsigned const_index
;
2385 get_deref_offset(ctx
, deref
, false,
2386 NULL
, NULL
, &const_index
, &indir_index
);
2387 idx
= var
->data
.driver_location
;
2388 comp
= var
->data
.location_frac
;
2390 if (var
->data
.compact
) {
2391 const_index
+= comp
;
2396 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2397 (deref
->mode
== nir_var_shader_out
||
2398 deref
->mode
== nir_var_function_temp
)) {
2400 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2401 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2404 writemask
= widen_mask(writemask
, 2);
2407 writemask
= writemask
<< comp
;
2409 switch (deref
->mode
) {
2410 case nir_var_shader_out
:
2412 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2413 LLVMValueRef vertex_index
= NULL
;
2414 LLVMValueRef indir_index
= NULL
;
2415 unsigned const_index
= 0;
2416 const bool is_patch
= var
->data
.patch
||
2417 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2418 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2420 get_deref_offset(ctx
, deref
, false, NULL
,
2421 is_patch
? NULL
: &vertex_index
,
2422 &const_index
, &indir_index
);
2424 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2425 vertex_index
, indir_index
,
2426 const_index
, src
, writemask
);
2430 for (unsigned chan
= 0; chan
< 8; chan
++) {
2432 if (!(writemask
& (1 << chan
)))
2435 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2437 if (var
->data
.compact
)
2440 unsigned count
= glsl_count_attribute_slots(
2443 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2444 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2445 stride
, true, true);
2447 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2448 value
, indir_index
, "");
2449 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2450 count
, stride
, tmp_vec
);
2453 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2455 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2459 case nir_var_function_temp
:
2460 for (unsigned chan
= 0; chan
< 8; chan
++) {
2461 if (!(writemask
& (1 << chan
)))
2464 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2466 unsigned count
= glsl_count_attribute_slots(
2469 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2470 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2473 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2474 value
, indir_index
, "");
2475 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2478 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2480 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2485 case nir_var_mem_global
: {
2486 int writemask
= instr
->const_index
[0];
2487 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2488 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2490 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2491 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2492 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2494 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2495 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2496 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2498 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2499 stride
== natural_stride
) {
2500 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2501 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2502 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2504 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2505 LLVMGetElementType(LLVMTypeOf(address
)), "");
2506 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2508 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(LLVMTypeOf(val
)),
2509 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2510 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2511 for (unsigned chan
= 0; chan
< 4; chan
++) {
2512 if (!(writemask
& (1 << chan
)))
2515 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2517 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2518 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2520 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2521 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2522 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2532 if (ctx
->ac
.postponed_kill
)
2533 ac_build_endif(&ctx
->ac
, 7002);
2536 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2539 case GLSL_SAMPLER_DIM_BUF
:
2541 case GLSL_SAMPLER_DIM_1D
:
2542 return array
? 2 : 1;
2543 case GLSL_SAMPLER_DIM_2D
:
2544 return array
? 3 : 2;
2545 case GLSL_SAMPLER_DIM_MS
:
2546 return array
? 4 : 3;
2547 case GLSL_SAMPLER_DIM_3D
:
2548 case GLSL_SAMPLER_DIM_CUBE
:
2550 case GLSL_SAMPLER_DIM_RECT
:
2551 case GLSL_SAMPLER_DIM_SUBPASS
:
2553 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2561 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2562 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2563 LLVMValueRef coord_z
,
2564 LLVMValueRef sample_index
,
2565 LLVMValueRef fmask_desc_ptr
)
2567 unsigned sample_chan
= coord_z
? 3 : 2;
2568 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2569 addr
[sample_chan
] = sample_index
;
2571 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2572 return addr
[sample_chan
];
2575 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2577 assert(instr
->src
[0].is_ssa
);
2578 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2581 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2582 const nir_intrinsic_instr
*instr
,
2583 LLVMValueRef dynamic_index
,
2584 enum ac_descriptor_type desc_type
,
2587 nir_deref_instr
*deref_instr
=
2588 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2589 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2591 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2594 static void get_image_coords(struct ac_nir_context
*ctx
,
2595 const nir_intrinsic_instr
*instr
,
2596 LLVMValueRef dynamic_desc_index
,
2597 struct ac_image_args
*args
,
2598 enum glsl_sampler_dim dim
,
2601 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2602 LLVMValueRef masks
[] = {
2603 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2604 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2606 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2609 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2610 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2611 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2612 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2613 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2614 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2615 count
= image_type_to_components_count(dim
, is_array
);
2617 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2618 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2619 LLVMValueRef fmask_load_address
[3];
2621 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2622 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2624 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2626 fmask_load_address
[2] = NULL
;
2628 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2629 fmask_load_address
[0],
2630 fmask_load_address
[1],
2631 fmask_load_address
[2],
2633 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2634 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2636 if (count
== 1 && !gfx9_1d
) {
2637 if (instr
->src
[1].ssa
->num_components
)
2638 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2640 args
->coords
[0] = src0
;
2645 for (chan
= 0; chan
< count
; ++chan
) {
2646 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2651 args
->coords
[2] = args
->coords
[1];
2652 args
->coords
[1] = ctx
->ac
.i32_0
;
2654 args
->coords
[1] = ctx
->ac
.i32_0
;
2657 if (ctx
->ac
.chip_class
== GFX9
&&
2658 dim
== GLSL_SAMPLER_DIM_2D
&&
2660 /* The hw can't bind a slice of a 3D image as a 2D
2661 * image, because it ignores BASE_ARRAY if the target
2662 * is 3D. The workaround is to read BASE_ARRAY and set
2663 * it as the 3rd address operand for all 2D images.
2665 LLVMValueRef first_layer
, const5
, mask
;
2667 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2668 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2669 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2670 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2672 args
->coords
[count
] = first_layer
;
2678 args
->coords
[count
] = sample_index
;
2684 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2685 const nir_intrinsic_instr
*instr
,
2686 LLVMValueRef dynamic_index
,
2687 bool write
, bool atomic
)
2689 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2690 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2691 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2692 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2693 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2695 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2696 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2697 elem_count
, stride
, "");
2699 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2700 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2705 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2706 struct waterfall_context
*wctx
,
2707 const nir_intrinsic_instr
*instr
)
2709 nir_deref_instr
*deref_instr
= NULL
;
2711 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2712 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2714 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2715 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2718 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2719 const nir_intrinsic_instr
*instr
,
2724 enum glsl_sampler_dim dim
;
2725 enum gl_access_qualifier access
;
2728 dim
= nir_intrinsic_image_dim(instr
);
2729 access
= nir_intrinsic_access(instr
);
2730 is_array
= nir_intrinsic_image_array(instr
);
2732 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2733 const struct glsl_type
*type
= image_deref
->type
;
2734 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2735 dim
= glsl_get_sampler_dim(type
);
2736 access
= var
->data
.access
;
2737 is_array
= glsl_sampler_type_is_array(type
);
2740 struct waterfall_context wctx
;
2741 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2743 struct ac_image_args args
= {};
2745 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2747 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2748 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2749 unsigned num_channels
= util_last_bit(mask
);
2750 LLVMValueRef rsrc
, vindex
;
2752 rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, false, false);
2753 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2756 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2757 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2758 ctx
->ac
.i32_0
, num_channels
,
2761 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2763 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2764 res
= ac_to_integer(&ctx
->ac
, res
);
2766 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2768 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2769 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
2770 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2771 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2773 args
.lod
= get_src(ctx
, instr
->src
[3]);
2775 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2777 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2779 return exit_waterfall(ctx
, &wctx
, res
);
2782 static void visit_image_store(struct ac_nir_context
*ctx
,
2783 const nir_intrinsic_instr
*instr
,
2786 if (ctx
->ac
.postponed_kill
) {
2787 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2788 ctx
->ac
.postponed_kill
, "");
2789 ac_build_ifcc(&ctx
->ac
, cond
, 7003);
2792 enum glsl_sampler_dim dim
;
2793 enum gl_access_qualifier access
;
2797 dim
= nir_intrinsic_image_dim(instr
);
2798 access
= nir_intrinsic_access(instr
);
2799 is_array
= nir_intrinsic_image_array(instr
);
2801 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2802 const struct glsl_type
*type
= image_deref
->type
;
2803 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2804 dim
= glsl_get_sampler_dim(type
);
2805 access
= var
->data
.access
;
2806 is_array
= glsl_sampler_type_is_array(type
);
2809 struct waterfall_context wctx
;
2810 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2812 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2813 struct ac_image_args args
= {};
2815 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2817 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2818 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, false);
2819 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2820 unsigned src_channels
= ac_get_llvm_num_components(src
);
2821 LLVMValueRef vindex
;
2823 if (src_channels
== 3)
2824 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2826 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2827 get_src(ctx
, instr
->src
[1]),
2830 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2831 ctx
->ac
.i32_0
, src_channels
,
2834 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2836 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2837 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2838 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
2839 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2840 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2842 args
.lod
= get_src(ctx
, instr
->src
[4]);
2845 ac_build_image_opcode(&ctx
->ac
, &args
);
2848 exit_waterfall(ctx
, &wctx
, NULL
);
2849 if (ctx
->ac
.postponed_kill
)
2850 ac_build_endif(&ctx
->ac
, 7003);
2853 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2854 const nir_intrinsic_instr
*instr
,
2857 if (ctx
->ac
.postponed_kill
) {
2858 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2859 ctx
->ac
.postponed_kill
, "");
2860 ac_build_ifcc(&ctx
->ac
, cond
, 7004);
2863 LLVMValueRef params
[7];
2864 int param_count
= 0;
2866 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2867 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2868 const char *atomic_name
;
2869 char intrinsic_name
[64];
2870 enum ac_atomic_op atomic_subop
;
2871 ASSERTED
int length
;
2873 enum glsl_sampler_dim dim
;
2876 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2877 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2878 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2879 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2880 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2881 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2883 dim
= nir_intrinsic_image_dim(instr
);
2884 is_array
= nir_intrinsic_image_array(instr
);
2886 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2887 dim
= glsl_get_sampler_dim(type
);
2888 is_array
= glsl_sampler_type_is_array(type
);
2891 struct waterfall_context wctx
;
2892 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2894 switch (instr
->intrinsic
) {
2895 case nir_intrinsic_bindless_image_atomic_add
:
2896 case nir_intrinsic_image_deref_atomic_add
:
2897 atomic_name
= "add";
2898 atomic_subop
= ac_atomic_add
;
2900 case nir_intrinsic_bindless_image_atomic_imin
:
2901 case nir_intrinsic_image_deref_atomic_imin
:
2902 atomic_name
= "smin";
2903 atomic_subop
= ac_atomic_smin
;
2905 case nir_intrinsic_bindless_image_atomic_umin
:
2906 case nir_intrinsic_image_deref_atomic_umin
:
2907 atomic_name
= "umin";
2908 atomic_subop
= ac_atomic_umin
;
2910 case nir_intrinsic_bindless_image_atomic_imax
:
2911 case nir_intrinsic_image_deref_atomic_imax
:
2912 atomic_name
= "smax";
2913 atomic_subop
= ac_atomic_smax
;
2915 case nir_intrinsic_bindless_image_atomic_umax
:
2916 case nir_intrinsic_image_deref_atomic_umax
:
2917 atomic_name
= "umax";
2918 atomic_subop
= ac_atomic_umax
;
2920 case nir_intrinsic_bindless_image_atomic_and
:
2921 case nir_intrinsic_image_deref_atomic_and
:
2922 atomic_name
= "and";
2923 atomic_subop
= ac_atomic_and
;
2925 case nir_intrinsic_bindless_image_atomic_or
:
2926 case nir_intrinsic_image_deref_atomic_or
:
2928 atomic_subop
= ac_atomic_or
;
2930 case nir_intrinsic_bindless_image_atomic_xor
:
2931 case nir_intrinsic_image_deref_atomic_xor
:
2932 atomic_name
= "xor";
2933 atomic_subop
= ac_atomic_xor
;
2935 case nir_intrinsic_bindless_image_atomic_exchange
:
2936 case nir_intrinsic_image_deref_atomic_exchange
:
2937 atomic_name
= "swap";
2938 atomic_subop
= ac_atomic_swap
;
2940 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2941 case nir_intrinsic_image_deref_atomic_comp_swap
:
2942 atomic_name
= "cmpswap";
2943 atomic_subop
= 0; /* not used */
2945 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2946 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2947 atomic_name
= "inc";
2948 atomic_subop
= ac_atomic_inc_wrap
;
2949 /* ATOMIC_INC instruction does:
2950 * value = (value + 1) % (data + 1)
2952 * value = (value + 1) % data
2953 * So replace 'data' by 'data - 1'.
2955 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2956 LLVMBuildSub(ctx
->ac
.builder
,
2957 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2961 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2962 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2963 atomic_name
= "dec";
2964 atomic_subop
= ac_atomic_dec_wrap
;
2971 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
2972 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
2974 LLVMValueRef result
;
2975 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2976 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, true);
2977 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2978 ctx
->ac
.i32_0
, ""); /* vindex */
2979 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
2980 if (LLVM_VERSION_MAJOR
>= 9) {
2981 /* XXX: The new raw/struct atomic intrinsics are buggy
2982 * with LLVM 8, see r358579.
2984 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
2985 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
2987 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2988 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
2990 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
2992 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2993 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
2996 assert(length
< sizeof(intrinsic_name
));
2997 result
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
2998 params
, param_count
, 0);
3000 struct ac_image_args args
= {};
3001 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
3002 args
.atomic
= atomic_subop
;
3003 args
.data
[0] = params
[0];
3005 args
.data
[1] = params
[1];
3006 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
3007 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
3008 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3010 result
= ac_build_image_opcode(&ctx
->ac
, &args
);
3013 result
= exit_waterfall(ctx
, &wctx
, result
);
3014 if (ctx
->ac
.postponed_kill
)
3015 ac_build_endif(&ctx
->ac
, 7004);
3019 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3020 nir_intrinsic_instr
*instr
)
3022 struct waterfall_context wctx
;
3023 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3024 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3026 LLVMValueRef ret
= ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
3028 return exit_waterfall(ctx
, &wctx
, ret
);
3031 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3032 const nir_intrinsic_instr
*instr
,
3037 enum glsl_sampler_dim dim
;
3040 dim
= nir_intrinsic_image_dim(instr
);
3041 is_array
= nir_intrinsic_image_array(instr
);
3043 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
3044 dim
= glsl_get_sampler_dim(type
);
3045 is_array
= glsl_sampler_type_is_array(type
);
3048 struct waterfall_context wctx
;
3049 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3051 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3052 res
= get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, false), true);
3055 struct ac_image_args args
= { 0 };
3057 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3059 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3060 args
.opcode
= ac_image_get_resinfo
;
3061 args
.lod
= ctx
->ac
.i32_0
;
3062 args
.attributes
= AC_FUNC_ATTR_READNONE
;
3064 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3066 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3068 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
3069 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3070 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3071 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3072 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3075 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
3076 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3077 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3081 return exit_waterfall(ctx
, &wctx
, res
);
3084 static void emit_membar(struct ac_llvm_context
*ac
,
3085 const nir_intrinsic_instr
*instr
)
3087 unsigned wait_flags
= 0;
3089 switch (instr
->intrinsic
) {
3090 case nir_intrinsic_memory_barrier
:
3091 case nir_intrinsic_group_memory_barrier
:
3092 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3094 case nir_intrinsic_memory_barrier_buffer
:
3095 case nir_intrinsic_memory_barrier_image
:
3096 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3098 case nir_intrinsic_memory_barrier_shared
:
3099 wait_flags
= AC_WAIT_LGKM
;
3105 ac_build_waitcnt(ac
, wait_flags
);
3108 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3110 /* GFX6 only (thanks to a hw bug workaround):
3111 * The real barrier instruction isn’t needed, because an entire patch
3112 * always fits into a single wave.
3114 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
3115 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
3118 ac_build_s_barrier(ac
);
3121 static void emit_discard(struct ac_nir_context
*ctx
,
3122 const nir_intrinsic_instr
*instr
)
3126 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3127 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3128 get_src(ctx
, instr
->src
[0]),
3131 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3132 cond
= ctx
->ac
.i1false
;
3135 ac_build_kill_if_false(&ctx
->ac
, cond
);
3138 static void emit_demote(struct ac_nir_context
*ctx
,
3139 const nir_intrinsic_instr
*instr
)
3143 if (instr
->intrinsic
== nir_intrinsic_demote_if
) {
3144 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3145 get_src(ctx
, instr
->src
[0]),
3148 assert(instr
->intrinsic
== nir_intrinsic_demote
);
3149 cond
= ctx
->ac
.i1false
;
3152 /* Kill immediately while maintaining WQM. */
3153 ac_build_kill_if_false(&ctx
->ac
, ac_build_wqm_vote(&ctx
->ac
, cond
));
3155 LLVMValueRef mask
= LLVMBuildLoad(ctx
->ac
.builder
, ctx
->ac
.postponed_kill
, "");
3156 mask
= LLVMBuildAnd(ctx
->ac
.builder
, mask
, cond
, "");
3157 LLVMBuildStore(ctx
->ac
.builder
, mask
, ctx
->ac
.postponed_kill
);
3162 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3164 LLVMValueRef result
;
3165 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3166 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3167 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3168 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3170 if (ctx
->ac
.wave_size
== 32)
3171 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
3172 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
3174 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3178 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3180 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3181 LLVMValueRef result
;
3182 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3183 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3184 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3185 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3187 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3192 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3194 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3195 return LLVMBuildAnd(ctx
->ac
.builder
,
3196 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3197 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3199 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3204 visit_first_invocation(struct ac_nir_context
*ctx
)
3206 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3207 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3209 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3210 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
3211 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
3212 ctx
->ac
.iN_wavemask
, args
, 2,
3213 AC_FUNC_ATTR_NOUNWIND
|
3214 AC_FUNC_ATTR_READNONE
);
3216 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3220 visit_load_shared(struct ac_nir_context
*ctx
,
3221 const nir_intrinsic_instr
*instr
)
3223 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3225 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3226 instr
->dest
.ssa
.bit_size
);
3228 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3229 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3230 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3231 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3234 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3235 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3239 visit_store_shared(struct ac_nir_context
*ctx
,
3240 const nir_intrinsic_instr
*instr
)
3242 LLVMValueRef derived_ptr
, data
,index
;
3243 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3245 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3246 instr
->src
[0].ssa
->bit_size
);
3247 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3249 int writemask
= nir_intrinsic_write_mask(instr
);
3250 for (int chan
= 0; chan
< 4; chan
++) {
3251 if (!(writemask
& (1 << chan
))) {
3254 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3255 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3256 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3257 LLVMBuildStore(builder
, data
, derived_ptr
);
3261 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3262 const nir_intrinsic_instr
*instr
,
3263 LLVMValueRef ptr
, int src_idx
)
3265 if (ctx
->ac
.postponed_kill
) {
3266 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
3267 ctx
->ac
.postponed_kill
, "");
3268 ac_build_ifcc(&ctx
->ac
, cond
, 7005);
3271 LLVMValueRef result
;
3272 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3274 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3276 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3277 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3278 if (deref
->mode
== nir_var_mem_global
) {
3279 /* use "singlethread" sync scope to implement relaxed ordering */
3280 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3282 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3283 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3287 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3288 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3289 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3290 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3291 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3293 LLVMAtomicRMWBinOp op
;
3294 switch (instr
->intrinsic
) {
3295 case nir_intrinsic_shared_atomic_add
:
3296 case nir_intrinsic_deref_atomic_add
:
3297 op
= LLVMAtomicRMWBinOpAdd
;
3299 case nir_intrinsic_shared_atomic_umin
:
3300 case nir_intrinsic_deref_atomic_umin
:
3301 op
= LLVMAtomicRMWBinOpUMin
;
3303 case nir_intrinsic_shared_atomic_umax
:
3304 case nir_intrinsic_deref_atomic_umax
:
3305 op
= LLVMAtomicRMWBinOpUMax
;
3307 case nir_intrinsic_shared_atomic_imin
:
3308 case nir_intrinsic_deref_atomic_imin
:
3309 op
= LLVMAtomicRMWBinOpMin
;
3311 case nir_intrinsic_shared_atomic_imax
:
3312 case nir_intrinsic_deref_atomic_imax
:
3313 op
= LLVMAtomicRMWBinOpMax
;
3315 case nir_intrinsic_shared_atomic_and
:
3316 case nir_intrinsic_deref_atomic_and
:
3317 op
= LLVMAtomicRMWBinOpAnd
;
3319 case nir_intrinsic_shared_atomic_or
:
3320 case nir_intrinsic_deref_atomic_or
:
3321 op
= LLVMAtomicRMWBinOpOr
;
3323 case nir_intrinsic_shared_atomic_xor
:
3324 case nir_intrinsic_deref_atomic_xor
:
3325 op
= LLVMAtomicRMWBinOpXor
;
3327 case nir_intrinsic_shared_atomic_exchange
:
3328 case nir_intrinsic_deref_atomic_exchange
:
3329 op
= LLVMAtomicRMWBinOpXchg
;
3335 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3338 if (ctx
->ac
.postponed_kill
)
3339 ac_build_endif(&ctx
->ac
, 7005);
3343 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3345 LLVMValueRef values
[2];
3346 LLVMValueRef pos
[2];
3348 pos
[0] = ac_to_float(&ctx
->ac
,
3349 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3350 pos
[1] = ac_to_float(&ctx
->ac
,
3351 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3353 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3354 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3355 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3358 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3359 enum glsl_interp_mode interp
, unsigned location
)
3362 case INTERP_MODE_FLAT
:
3365 case INTERP_MODE_SMOOTH
:
3366 case INTERP_MODE_NONE
:
3367 if (location
== INTERP_CENTER
)
3368 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3369 else if (location
== INTERP_CENTROID
)
3370 return ctx
->abi
->persp_centroid
;
3371 else if (location
== INTERP_SAMPLE
)
3372 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3374 case INTERP_MODE_NOPERSPECTIVE
:
3375 if (location
== INTERP_CENTER
)
3376 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3377 else if (location
== INTERP_CENTROID
)
3378 return ctx
->abi
->linear_centroid
;
3379 else if (location
== INTERP_SAMPLE
)
3380 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3386 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3389 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3390 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3393 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3395 LLVMValueRef offset
)
3397 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3398 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3399 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3401 LLVMValueRef ij_out
[2];
3402 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3405 * take the I then J parameters, and the DDX/Y for it, and
3406 * calculate the IJ inputs for the interpolator.
3407 * temp1 = ddx * offset/sample.x + I;
3408 * interp_param.I = ddy * offset/sample.y + temp1;
3409 * temp1 = ddx * offset/sample.x + J;
3410 * interp_param.J = ddy * offset/sample.y + temp1;
3412 for (unsigned i
= 0; i
< 2; i
++) {
3413 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3414 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3415 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3416 ddxy_out
, ix_ll
, "");
3417 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3418 ddxy_out
, iy_ll
, "");
3419 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3420 interp_param
, ix_ll
, "");
3421 LLVMValueRef temp1
, temp2
;
3423 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3426 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3427 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3429 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3430 temp2
, ctx
->ac
.i32
, "");
3432 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3433 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3436 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3439 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3440 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3443 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3445 LLVMValueRef sample_id
)
3447 if (ctx
->abi
->interp_at_sample_force_center
)
3448 return barycentric_center(ctx
, mode
);
3450 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3452 /* fetch sample ID */
3453 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3455 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3456 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3457 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3458 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3459 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3460 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3462 return barycentric_offset(ctx
, mode
, offset
);
3466 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3469 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3470 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3473 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3475 return LLVMBuildBitCast(ctx
->ac
.builder
,
3476 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3480 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3481 LLVMValueRef interp_param
,
3482 unsigned index
, unsigned comp_start
,
3483 unsigned num_components
,
3486 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3488 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
,
3489 interp_param
, ctx
->ac
.v2f32
, "");
3490 LLVMValueRef i
= LLVMBuildExtractElement(
3491 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_0
, "");
3492 LLVMValueRef j
= LLVMBuildExtractElement(
3493 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_1
, "");
3495 LLVMValueRef values
[4];
3496 assert(bitsize
== 16 || bitsize
== 32);
3497 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3498 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3499 if (bitsize
== 16) {
3500 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3501 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3503 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3504 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3508 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3511 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3512 nir_intrinsic_instr
*instr
)
3514 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3516 /* We only lower inputs for fragment shaders ATM */
3517 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3519 assert(offset
[0].i32
== 0);
3521 unsigned component
= nir_intrinsic_component(instr
);
3522 unsigned index
= nir_intrinsic_base(instr
);
3523 unsigned vertex_id
= 2; /* P0 */
3525 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3526 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3528 switch (src0
[0].i32
) {
3539 unreachable("Invalid vertex index");
3543 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3544 LLVMValueRef values
[8];
3546 /* Each component of a 64-bit value takes up two GL-level channels. */
3547 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3548 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3550 bit_size
== 64 ? num_components
* 2 : num_components
;
3552 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3553 if (component
+ chan
> 4)
3554 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3555 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3556 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3557 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3560 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3561 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3562 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3563 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3566 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3567 if (bit_size
== 64) {
3568 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3569 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3570 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3575 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3576 nir_intrinsic_instr
*instr
)
3578 LLVMValueRef result
= NULL
;
3580 switch (instr
->intrinsic
) {
3581 case nir_intrinsic_ballot
:
3582 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3583 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3584 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3586 case nir_intrinsic_read_invocation
:
3587 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3588 get_src(ctx
, instr
->src
[1]));
3590 case nir_intrinsic_read_first_invocation
:
3591 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3593 case nir_intrinsic_load_subgroup_invocation
:
3594 result
= ac_get_thread_id(&ctx
->ac
);
3596 case nir_intrinsic_load_work_group_id
: {
3597 LLVMValueRef values
[3];
3599 for (int i
= 0; i
< 3; i
++) {
3600 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3601 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3604 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3607 case nir_intrinsic_load_base_vertex
:
3608 case nir_intrinsic_load_first_vertex
:
3609 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3611 case nir_intrinsic_load_local_group_size
:
3612 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3614 case nir_intrinsic_load_vertex_id
:
3615 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3616 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3617 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3619 case nir_intrinsic_load_vertex_id_zero_base
: {
3620 result
= ctx
->abi
->vertex_id
;
3623 case nir_intrinsic_load_local_invocation_id
: {
3624 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3627 case nir_intrinsic_load_base_instance
:
3628 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3630 case nir_intrinsic_load_draw_id
:
3631 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3633 case nir_intrinsic_load_view_index
:
3634 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3636 case nir_intrinsic_load_invocation_id
:
3637 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3638 result
= ac_unpack_param(&ctx
->ac
,
3639 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3642 if (ctx
->ac
.chip_class
>= GFX10
) {
3643 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3644 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3645 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3647 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3651 case nir_intrinsic_load_primitive_id
:
3652 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3653 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3654 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3655 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3656 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3657 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3659 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3661 case nir_intrinsic_load_sample_id
:
3662 result
= ac_unpack_param(&ctx
->ac
,
3663 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3666 case nir_intrinsic_load_sample_pos
:
3667 result
= load_sample_pos(ctx
);
3669 case nir_intrinsic_load_sample_mask_in
:
3670 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3672 case nir_intrinsic_load_frag_coord
: {
3673 LLVMValueRef values
[4] = {
3674 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3675 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3676 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3677 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3678 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3680 result
= ac_to_integer(&ctx
->ac
,
3681 ac_build_gather_values(&ctx
->ac
, values
, 4));
3684 case nir_intrinsic_load_layer_id
:
3685 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3687 case nir_intrinsic_load_front_face
:
3688 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3690 case nir_intrinsic_load_helper_invocation
:
3691 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3693 case nir_intrinsic_is_helper_invocation
:
3694 result
= ac_build_is_helper_invocation(&ctx
->ac
);
3696 case nir_intrinsic_load_color0
:
3697 result
= ctx
->abi
->color0
;
3699 case nir_intrinsic_load_color1
:
3700 result
= ctx
->abi
->color1
;
3702 case nir_intrinsic_load_user_data_amd
:
3703 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3704 result
= ctx
->abi
->user_data
;
3706 case nir_intrinsic_load_instance_id
:
3707 result
= ctx
->abi
->instance_id
;
3709 case nir_intrinsic_load_num_work_groups
:
3710 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3712 case nir_intrinsic_load_local_invocation_index
:
3713 result
= visit_load_local_invocation_index(ctx
);
3715 case nir_intrinsic_load_subgroup_id
:
3716 result
= visit_load_subgroup_id(ctx
);
3718 case nir_intrinsic_load_num_subgroups
:
3719 result
= visit_load_num_subgroups(ctx
);
3721 case nir_intrinsic_first_invocation
:
3722 result
= visit_first_invocation(ctx
);
3724 case nir_intrinsic_load_push_constant
:
3725 result
= visit_load_push_constant(ctx
, instr
);
3727 case nir_intrinsic_vulkan_resource_index
: {
3728 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3729 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3730 unsigned binding
= nir_intrinsic_binding(instr
);
3732 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3736 case nir_intrinsic_vulkan_resource_reindex
:
3737 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3739 case nir_intrinsic_store_ssbo
:
3740 visit_store_ssbo(ctx
, instr
);
3742 case nir_intrinsic_load_ssbo
:
3743 result
= visit_load_buffer(ctx
, instr
);
3745 case nir_intrinsic_ssbo_atomic_add
:
3746 case nir_intrinsic_ssbo_atomic_imin
:
3747 case nir_intrinsic_ssbo_atomic_umin
:
3748 case nir_intrinsic_ssbo_atomic_imax
:
3749 case nir_intrinsic_ssbo_atomic_umax
:
3750 case nir_intrinsic_ssbo_atomic_and
:
3751 case nir_intrinsic_ssbo_atomic_or
:
3752 case nir_intrinsic_ssbo_atomic_xor
:
3753 case nir_intrinsic_ssbo_atomic_exchange
:
3754 case nir_intrinsic_ssbo_atomic_comp_swap
:
3755 result
= visit_atomic_ssbo(ctx
, instr
);
3757 case nir_intrinsic_load_ubo
:
3758 result
= visit_load_ubo_buffer(ctx
, instr
);
3760 case nir_intrinsic_get_buffer_size
:
3761 result
= visit_get_buffer_size(ctx
, instr
);
3763 case nir_intrinsic_load_deref
:
3764 result
= visit_load_var(ctx
, instr
);
3766 case nir_intrinsic_store_deref
:
3767 visit_store_var(ctx
, instr
);
3769 case nir_intrinsic_load_shared
:
3770 result
= visit_load_shared(ctx
, instr
);
3772 case nir_intrinsic_store_shared
:
3773 visit_store_shared(ctx
, instr
);
3775 case nir_intrinsic_bindless_image_samples
:
3776 case nir_intrinsic_image_deref_samples
:
3777 result
= visit_image_samples(ctx
, instr
);
3779 case nir_intrinsic_bindless_image_load
:
3780 result
= visit_image_load(ctx
, instr
, true);
3782 case nir_intrinsic_image_deref_load
:
3783 result
= visit_image_load(ctx
, instr
, false);
3785 case nir_intrinsic_bindless_image_store
:
3786 visit_image_store(ctx
, instr
, true);
3788 case nir_intrinsic_image_deref_store
:
3789 visit_image_store(ctx
, instr
, false);
3791 case nir_intrinsic_bindless_image_atomic_add
:
3792 case nir_intrinsic_bindless_image_atomic_imin
:
3793 case nir_intrinsic_bindless_image_atomic_umin
:
3794 case nir_intrinsic_bindless_image_atomic_imax
:
3795 case nir_intrinsic_bindless_image_atomic_umax
:
3796 case nir_intrinsic_bindless_image_atomic_and
:
3797 case nir_intrinsic_bindless_image_atomic_or
:
3798 case nir_intrinsic_bindless_image_atomic_xor
:
3799 case nir_intrinsic_bindless_image_atomic_exchange
:
3800 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3801 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3802 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3803 result
= visit_image_atomic(ctx
, instr
, true);
3805 case nir_intrinsic_image_deref_atomic_add
:
3806 case nir_intrinsic_image_deref_atomic_imin
:
3807 case nir_intrinsic_image_deref_atomic_umin
:
3808 case nir_intrinsic_image_deref_atomic_imax
:
3809 case nir_intrinsic_image_deref_atomic_umax
:
3810 case nir_intrinsic_image_deref_atomic_and
:
3811 case nir_intrinsic_image_deref_atomic_or
:
3812 case nir_intrinsic_image_deref_atomic_xor
:
3813 case nir_intrinsic_image_deref_atomic_exchange
:
3814 case nir_intrinsic_image_deref_atomic_comp_swap
:
3815 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3816 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3817 result
= visit_image_atomic(ctx
, instr
, false);
3819 case nir_intrinsic_bindless_image_size
:
3820 result
= visit_image_size(ctx
, instr
, true);
3822 case nir_intrinsic_image_deref_size
:
3823 result
= visit_image_size(ctx
, instr
, false);
3825 case nir_intrinsic_shader_clock
:
3826 result
= ac_build_shader_clock(&ctx
->ac
);
3828 case nir_intrinsic_discard
:
3829 case nir_intrinsic_discard_if
:
3830 emit_discard(ctx
, instr
);
3832 case nir_intrinsic_demote
:
3833 case nir_intrinsic_demote_if
:
3834 emit_demote(ctx
, instr
);
3836 case nir_intrinsic_memory_barrier
:
3837 case nir_intrinsic_group_memory_barrier
:
3838 case nir_intrinsic_memory_barrier_buffer
:
3839 case nir_intrinsic_memory_barrier_image
:
3840 case nir_intrinsic_memory_barrier_shared
:
3841 emit_membar(&ctx
->ac
, instr
);
3843 case nir_intrinsic_memory_barrier_tcs_patch
:
3845 case nir_intrinsic_control_barrier
:
3846 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3848 case nir_intrinsic_shared_atomic_add
:
3849 case nir_intrinsic_shared_atomic_imin
:
3850 case nir_intrinsic_shared_atomic_umin
:
3851 case nir_intrinsic_shared_atomic_imax
:
3852 case nir_intrinsic_shared_atomic_umax
:
3853 case nir_intrinsic_shared_atomic_and
:
3854 case nir_intrinsic_shared_atomic_or
:
3855 case nir_intrinsic_shared_atomic_xor
:
3856 case nir_intrinsic_shared_atomic_exchange
:
3857 case nir_intrinsic_shared_atomic_comp_swap
: {
3858 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3859 instr
->src
[1].ssa
->bit_size
);
3860 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3863 case nir_intrinsic_deref_atomic_add
:
3864 case nir_intrinsic_deref_atomic_imin
:
3865 case nir_intrinsic_deref_atomic_umin
:
3866 case nir_intrinsic_deref_atomic_imax
:
3867 case nir_intrinsic_deref_atomic_umax
:
3868 case nir_intrinsic_deref_atomic_and
:
3869 case nir_intrinsic_deref_atomic_or
:
3870 case nir_intrinsic_deref_atomic_xor
:
3871 case nir_intrinsic_deref_atomic_exchange
:
3872 case nir_intrinsic_deref_atomic_comp_swap
: {
3873 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3874 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3877 case nir_intrinsic_load_barycentric_pixel
:
3878 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3880 case nir_intrinsic_load_barycentric_centroid
:
3881 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3883 case nir_intrinsic_load_barycentric_sample
:
3884 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3886 case nir_intrinsic_load_barycentric_model
:
3887 result
= barycentric_model(ctx
);
3889 case nir_intrinsic_load_barycentric_at_offset
: {
3890 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3891 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3894 case nir_intrinsic_load_barycentric_at_sample
: {
3895 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3896 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3899 case nir_intrinsic_load_interpolated_input
: {
3900 /* We assume any indirect loads have been lowered away */
3901 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3903 assert(offset
[0].i32
== 0);
3905 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3906 unsigned index
= nir_intrinsic_base(instr
);
3907 unsigned component
= nir_intrinsic_component(instr
);
3908 result
= load_interpolated_input(ctx
, interp_param
, index
,
3910 instr
->dest
.ssa
.num_components
,
3911 instr
->dest
.ssa
.bit_size
);
3914 case nir_intrinsic_load_input
:
3915 case nir_intrinsic_load_input_vertex
:
3916 result
= load_input(ctx
, instr
);
3918 case nir_intrinsic_emit_vertex
:
3919 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3921 case nir_intrinsic_end_primitive
:
3922 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3924 case nir_intrinsic_load_tess_coord
:
3925 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3927 case nir_intrinsic_load_tess_level_outer
:
3928 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3930 case nir_intrinsic_load_tess_level_inner
:
3931 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3933 case nir_intrinsic_load_tess_level_outer_default
:
3934 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3936 case nir_intrinsic_load_tess_level_inner_default
:
3937 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3939 case nir_intrinsic_load_patch_vertices_in
:
3940 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
3942 case nir_intrinsic_vote_all
: {
3943 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3944 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3947 case nir_intrinsic_vote_any
: {
3948 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3949 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3952 case nir_intrinsic_shuffle
:
3953 if (ctx
->ac
.chip_class
== GFX8
||
3954 ctx
->ac
.chip_class
== GFX9
||
3955 (ctx
->ac
.chip_class
== GFX10
&& ctx
->ac
.wave_size
== 32)) {
3956 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3957 get_src(ctx
, instr
->src
[1]));
3959 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3960 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
3961 LLVMTypeRef type
= LLVMTypeOf(src
);
3962 struct waterfall_context wctx
;
3963 LLVMValueRef index_val
;
3965 index_val
= enter_waterfall(ctx
, &wctx
, index
, true);
3967 src
= LLVMBuildZExt(ctx
->ac
.builder
, src
,
3970 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.readlane",
3972 (LLVMValueRef
[]) { src
, index_val
}, 2,
3973 AC_FUNC_ATTR_READNONE
|
3974 AC_FUNC_ATTR_CONVERGENT
);
3976 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, type
, "");
3978 result
= exit_waterfall(ctx
, &wctx
, result
);
3981 case nir_intrinsic_reduce
:
3982 result
= ac_build_reduce(&ctx
->ac
,
3983 get_src(ctx
, instr
->src
[0]),
3984 instr
->const_index
[0],
3985 instr
->const_index
[1]);
3987 case nir_intrinsic_inclusive_scan
:
3988 result
= ac_build_inclusive_scan(&ctx
->ac
,
3989 get_src(ctx
, instr
->src
[0]),
3990 instr
->const_index
[0]);
3992 case nir_intrinsic_exclusive_scan
:
3993 result
= ac_build_exclusive_scan(&ctx
->ac
,
3994 get_src(ctx
, instr
->src
[0]),
3995 instr
->const_index
[0]);
3997 case nir_intrinsic_quad_broadcast
: {
3998 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
3999 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4000 lane
, lane
, lane
, lane
);
4003 case nir_intrinsic_quad_swap_horizontal
:
4004 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
4006 case nir_intrinsic_quad_swap_vertical
:
4007 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
4009 case nir_intrinsic_quad_swap_diagonal
:
4010 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
4012 case nir_intrinsic_quad_swizzle_amd
: {
4013 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4014 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4015 mask
& 0x3, (mask
>> 2) & 0x3,
4016 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
4019 case nir_intrinsic_masked_swizzle_amd
: {
4020 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4021 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
4024 case nir_intrinsic_write_invocation_amd
:
4025 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4026 get_src(ctx
, instr
->src
[1]),
4027 get_src(ctx
, instr
->src
[2]));
4029 case nir_intrinsic_mbcnt_amd
:
4030 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4032 case nir_intrinsic_load_scratch
: {
4033 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4034 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4036 LLVMTypeRef comp_type
=
4037 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4038 LLVMTypeRef vec_type
=
4039 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4040 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4041 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4042 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4043 LLVMPointerType(vec_type
, addr_space
), "");
4044 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4047 case nir_intrinsic_store_scratch
: {
4048 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
4049 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4051 LLVMTypeRef comp_type
=
4052 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
4053 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4054 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4055 LLVMPointerType(comp_type
, addr_space
), "");
4056 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4057 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
4060 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
4062 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
4063 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
4064 LLVMTypeRef vec_type
=
4065 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
4066 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
4068 LLVMPointerType(vec_type
, addr_space
),
4070 LLVMValueRef offset_src
=
4071 ac_extract_components(&ctx
->ac
, src
, start
, count
);
4072 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
4076 case nir_intrinsic_load_constant
: {
4077 unsigned base
= nir_intrinsic_base(instr
);
4078 unsigned range
= nir_intrinsic_range(instr
);
4080 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4081 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
4082 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
4084 /* Clamp the offset to avoid out-of-bound access because global
4085 * instructions can't handle them.
4087 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
4088 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
4090 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
4092 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
4094 LLVMTypeRef comp_type
=
4095 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4096 LLVMTypeRef vec_type
=
4097 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4098 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4099 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4100 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4101 LLVMPointerType(vec_type
, addr_space
), "");
4102 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4106 fprintf(stderr
, "Unknown intrinsic: ");
4107 nir_print_instr(&instr
->instr
, stderr
);
4108 fprintf(stderr
, "\n");
4112 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4116 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
4117 unsigned base_index
,
4118 unsigned constant_index
,
4119 LLVMValueRef dynamic_index
)
4121 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
4122 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
4123 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
4125 /* Bindless uniforms are 64bit so multiple index by 8 */
4126 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
4127 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
4129 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
4131 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
4132 NULL
, 0, 0, true, true);
4134 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
4137 struct sampler_desc_address
{
4138 unsigned descriptor_set
;
4139 unsigned base_index
; /* binding in vulkan */
4140 unsigned constant_index
;
4141 LLVMValueRef dynamic_index
;
4146 static struct sampler_desc_address
4147 get_sampler_desc_internal(struct ac_nir_context
*ctx
,
4148 nir_deref_instr
*deref_instr
,
4149 const nir_instr
*instr
,
4152 LLVMValueRef index
= NULL
;
4153 unsigned constant_index
= 0;
4154 unsigned descriptor_set
;
4155 unsigned base_index
;
4156 bool bindless
= false;
4161 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
4164 index
= get_src(ctx
, img_instr
->src
[0]);
4166 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
4167 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
4168 nir_tex_src_sampler_handle
);
4169 if (sampSrcIdx
!= -1) {
4172 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
4174 assert(tex_instr
&& !image
);
4175 base_index
= tex_instr
->sampler_index
;
4179 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
4180 if (deref_instr
->deref_type
== nir_deref_type_array
) {
4181 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
4185 if (nir_src_is_const(deref_instr
->arr
.index
)) {
4186 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
4188 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
4190 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4191 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4196 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4199 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4200 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
4201 unsigned sidx
= deref_instr
->strct
.index
;
4202 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4203 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
4205 unreachable("Unsupported deref type");
4208 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
4210 if (deref_instr
->var
->data
.bindless
) {
4211 /* For now just assert on unhandled variable types */
4212 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
4214 base_index
= deref_instr
->var
->data
.driver_location
;
4217 index
= index
? index
: ctx
->ac
.i32_0
;
4218 index
= get_bindless_index_from_uniform(ctx
, base_index
,
4219 constant_index
, index
);
4221 base_index
= deref_instr
->var
->data
.binding
;
4223 return (struct sampler_desc_address
) {
4224 .descriptor_set
= descriptor_set
,
4225 .base_index
= base_index
,
4226 .constant_index
= constant_index
,
4227 .dynamic_index
= index
,
4229 .bindless
= bindless
,
4233 /* Extract any possibly divergent index into a separate value that can be fed
4234 * into get_sampler_desc with the same arguments. */
4235 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
4236 nir_deref_instr
*deref_instr
,
4237 const nir_instr
*instr
,
4240 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4241 return addr
.dynamic_index
;
4244 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4245 nir_deref_instr
*deref_instr
,
4246 enum ac_descriptor_type desc_type
,
4247 const nir_instr
*instr
,
4249 bool image
, bool write
)
4251 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4252 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4253 addr
.descriptor_set
,
4255 addr
.constant_index
, index
,
4256 desc_type
, addr
.image
, write
, addr
.bindless
);
4259 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4262 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4263 * filtering manually. The driver sets img7 to a mask clearing
4264 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4265 * s_and_b32 samp0, samp0, img7
4268 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4270 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4271 LLVMValueRef res
, LLVMValueRef samp
)
4273 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4274 LLVMValueRef img7
, samp0
;
4276 if (ctx
->ac
.chip_class
>= GFX8
)
4279 img7
= LLVMBuildExtractElement(builder
, res
,
4280 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4281 samp0
= LLVMBuildExtractElement(builder
, samp
,
4282 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4283 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4284 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4285 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4288 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4289 nir_tex_instr
*instr
,
4290 struct waterfall_context
*wctx
,
4291 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4292 LLVMValueRef
*fmask_ptr
)
4294 nir_deref_instr
*texture_deref_instr
= NULL
;
4295 nir_deref_instr
*sampler_deref_instr
= NULL
;
4298 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4299 switch (instr
->src
[i
].src_type
) {
4300 case nir_tex_src_texture_deref
:
4301 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4303 case nir_tex_src_sampler_deref
:
4304 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4306 case nir_tex_src_plane
:
4307 plane
= nir_src_as_int(instr
->src
[i
].src
);
4314 LLVMValueRef texture_dynamic_index
= get_sampler_desc_index(ctx
, texture_deref_instr
,
4315 &instr
->instr
, false);
4316 if (!sampler_deref_instr
)
4317 sampler_deref_instr
= texture_deref_instr
;
4319 LLVMValueRef sampler_dynamic_index
= get_sampler_desc_index(ctx
, sampler_deref_instr
,
4320 &instr
->instr
, false);
4321 if (instr
->texture_non_uniform
)
4322 texture_dynamic_index
= enter_waterfall(ctx
, wctx
+ 0, texture_dynamic_index
, true);
4324 if (instr
->sampler_non_uniform
)
4325 sampler_dynamic_index
= enter_waterfall(ctx
, wctx
+ 1, sampler_dynamic_index
, true);
4327 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4330 assert(instr
->op
!= nir_texop_txf_ms
&&
4331 instr
->op
!= nir_texop_samples_identical
);
4332 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4334 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4337 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4338 /* The fragment mask is fetched from the compressed
4339 * multisampled surface.
4341 main_descriptor
= AC_DESC_FMASK
;
4344 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
,
4345 texture_dynamic_index
, false, false);
4348 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
,
4349 sampler_dynamic_index
, false, false);
4350 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4351 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4353 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4354 instr
->op
== nir_texop_samples_identical
))
4355 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
,
4356 &instr
->instr
, texture_dynamic_index
, false, false);
4359 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4362 coord
= ac_to_float(ctx
, coord
);
4363 coord
= ac_build_round(ctx
, coord
);
4364 coord
= ac_to_integer(ctx
, coord
);
4368 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4370 LLVMValueRef result
= NULL
;
4371 struct ac_image_args args
= { 0 };
4372 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4373 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4374 unsigned offset_src
= 0;
4375 struct waterfall_context wctx
[2] = {{{0}}};
4377 tex_fetch_ptrs(ctx
, instr
, wctx
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4379 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4380 switch (instr
->src
[i
].src_type
) {
4381 case nir_tex_src_coord
: {
4382 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4383 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4384 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4387 case nir_tex_src_projector
:
4389 case nir_tex_src_comparator
:
4390 if (instr
->is_shadow
) {
4391 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4392 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4395 case nir_tex_src_offset
:
4396 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4399 case nir_tex_src_bias
:
4400 if (instr
->op
== nir_texop_txb
)
4401 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4403 case nir_tex_src_lod
: {
4404 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4405 args
.level_zero
= true;
4407 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4410 case nir_tex_src_ms_index
:
4411 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4413 case nir_tex_src_ms_mcs
:
4415 case nir_tex_src_ddx
:
4416 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4418 case nir_tex_src_ddy
:
4419 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4421 case nir_tex_src_texture_offset
:
4422 case nir_tex_src_sampler_offset
:
4423 case nir_tex_src_plane
:
4429 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4430 result
= get_buffer_size(ctx
, args
.resource
, true);
4434 if (instr
->op
== nir_texop_texture_samples
) {
4435 LLVMValueRef res
, samples
, is_msaa
;
4436 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4437 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4438 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4439 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4440 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4441 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4442 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4443 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4444 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4446 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4447 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4448 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4449 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4450 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4452 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4458 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4459 LLVMValueRef offset
[3], pack
;
4460 for (unsigned chan
= 0; chan
< 3; ++chan
)
4461 offset
[chan
] = ctx
->ac
.i32_0
;
4463 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4464 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4465 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4466 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4467 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4469 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4470 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4472 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4473 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4477 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4478 * OpenGL 4.5 spec says:
4480 * "If the texture’s internal format indicates a fixed-point
4481 * depth texture, then D_t and D_ref are clamped to the
4482 * range [0, 1]; otherwise no clamping is performed."
4484 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4485 * so the depth comparison value isn't clamped for Z16 and
4486 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4487 * an explicitly clamped 32-bit float format.
4490 ctx
->ac
.chip_class
>= GFX8
&&
4491 ctx
->ac
.chip_class
<= GFX9
&&
4492 ctx
->abi
->clamp_shadow_reference
) {
4493 LLVMValueRef upgraded
, clamped
;
4495 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4496 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4497 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4498 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4499 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4500 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4501 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4505 /* pack derivatives */
4507 int num_src_deriv_channels
, num_dest_deriv_channels
;
4508 switch (instr
->sampler_dim
) {
4509 case GLSL_SAMPLER_DIM_3D
:
4510 case GLSL_SAMPLER_DIM_CUBE
:
4511 num_src_deriv_channels
= 3;
4512 num_dest_deriv_channels
= 3;
4514 case GLSL_SAMPLER_DIM_2D
:
4516 num_src_deriv_channels
= 2;
4517 num_dest_deriv_channels
= 2;
4519 case GLSL_SAMPLER_DIM_1D
:
4520 num_src_deriv_channels
= 1;
4521 if (ctx
->ac
.chip_class
== GFX9
) {
4522 num_dest_deriv_channels
= 2;
4524 num_dest_deriv_channels
= 1;
4529 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4530 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4531 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4532 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4533 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4535 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4536 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4537 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4541 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4542 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4543 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4544 if (instr
->coord_components
== 3)
4545 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4546 ac_prepare_cube_coords(&ctx
->ac
,
4547 instr
->op
== nir_texop_txd
, instr
->is_array
,
4548 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4551 /* Texture coordinates fixups */
4552 if (instr
->coord_components
> 1 &&
4553 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4555 instr
->op
!= nir_texop_txf
) {
4556 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4559 if (instr
->coord_components
> 2 &&
4560 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4561 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4562 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4563 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4565 instr
->op
!= nir_texop_txf
&&
4566 instr
->op
!= nir_texop_txf_ms
&&
4567 instr
->op
!= nir_texop_fragment_fetch
&&
4568 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4569 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4572 if (ctx
->ac
.chip_class
== GFX9
&&
4573 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4574 instr
->op
!= nir_texop_lod
) {
4575 LLVMValueRef filler
;
4576 if (instr
->op
== nir_texop_txf
)
4577 filler
= ctx
->ac
.i32_0
;
4579 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4581 if (instr
->is_array
)
4582 args
.coords
[2] = args
.coords
[1];
4583 args
.coords
[1] = filler
;
4586 /* Pack sample index */
4587 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4588 instr
->op
== nir_texop_fragment_fetch
))
4589 args
.coords
[instr
->coord_components
] = sample_index
;
4591 if (instr
->op
== nir_texop_samples_identical
) {
4592 struct ac_image_args txf_args
= { 0 };
4593 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4595 txf_args
.dmask
= 0xf;
4596 txf_args
.resource
= fmask_ptr
;
4597 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4598 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4600 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4601 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4605 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4606 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4607 instr
->op
!= nir_texop_txs
&&
4608 instr
->op
!= nir_texop_fragment_fetch
&&
4609 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4610 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4611 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4612 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4613 instr
->is_array
? args
.coords
[2] : NULL
,
4614 args
.coords
[sample_chan
], fmask_ptr
);
4617 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4618 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4619 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4620 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4621 args
.coords
[i
] = LLVMBuildAdd(
4622 ctx
->ac
.builder
, args
.coords
[i
],
4623 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4628 /* DMASK was repurposed for GATHER4. 4 components are always
4629 * returned and DMASK works like a swizzle - it selects
4630 * the component to fetch. The only valid DMASK values are
4631 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4632 * (red,red,red,red) etc.) The ISA document doesn't mention
4636 if (instr
->op
== nir_texop_tg4
) {
4637 if (instr
->is_shadow
)
4640 args
.dmask
= 1 << instr
->component
;
4643 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4644 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4645 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4648 /* Adjust the number of coordinates because we only need (x,y) for 2D
4649 * multisampled images and (x,y,layer) for 2D multisampled layered
4650 * images or for multisampled input attachments.
4652 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4653 if (args
.dim
== ac_image_2dmsaa
) {
4654 args
.dim
= ac_image_2d
;
4656 assert(args
.dim
== ac_image_2darraymsaa
);
4657 args
.dim
= ac_image_2darray
;
4661 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4663 if (instr
->op
== nir_texop_query_levels
)
4664 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4665 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4666 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4667 instr
->op
!= nir_texop_tg4
)
4668 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4669 else if (instr
->op
== nir_texop_txs
&&
4670 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4672 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4673 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4674 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4675 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4676 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4677 } else if (ctx
->ac
.chip_class
== GFX9
&&
4678 instr
->op
== nir_texop_txs
&&
4679 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4681 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4682 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4683 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4685 } else if (instr
->dest
.ssa
.num_components
!= 4)
4686 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4690 assert(instr
->dest
.is_ssa
);
4691 result
= ac_to_integer(&ctx
->ac
, result
);
4693 for (int i
= ARRAY_SIZE(wctx
); --i
>= 0;) {
4694 result
= exit_waterfall(ctx
, wctx
+ i
, result
);
4697 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4701 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4703 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4704 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4706 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4707 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4710 static void visit_post_phi(struct ac_nir_context
*ctx
,
4711 nir_phi_instr
*instr
,
4712 LLVMValueRef llvm_phi
)
4714 nir_foreach_phi_src(src
, instr
) {
4715 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4716 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4718 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4722 static void phi_post_pass(struct ac_nir_context
*ctx
)
4724 hash_table_foreach(ctx
->phis
, entry
) {
4725 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4726 (LLVMValueRef
)entry
->data
);
4731 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4732 const nir_ssa_undef_instr
*instr
)
4734 unsigned num_components
= instr
->def
.num_components
;
4735 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4738 if (num_components
== 1)
4739 undef
= LLVMGetUndef(type
);
4741 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4743 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4746 static void visit_jump(struct ac_llvm_context
*ctx
,
4747 const nir_jump_instr
*instr
)
4749 switch (instr
->type
) {
4750 case nir_jump_break
:
4751 ac_build_break(ctx
);
4753 case nir_jump_continue
:
4754 ac_build_continue(ctx
);
4757 fprintf(stderr
, "Unknown NIR jump instr: ");
4758 nir_print_instr(&instr
->instr
, stderr
);
4759 fprintf(stderr
, "\n");
4765 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4766 enum glsl_base_type type
)
4770 case GLSL_TYPE_UINT
:
4771 case GLSL_TYPE_BOOL
:
4772 case GLSL_TYPE_SUBROUTINE
:
4774 case GLSL_TYPE_INT8
:
4775 case GLSL_TYPE_UINT8
:
4777 case GLSL_TYPE_INT16
:
4778 case GLSL_TYPE_UINT16
:
4780 case GLSL_TYPE_FLOAT
:
4782 case GLSL_TYPE_FLOAT16
:
4784 case GLSL_TYPE_INT64
:
4785 case GLSL_TYPE_UINT64
:
4787 case GLSL_TYPE_DOUBLE
:
4790 unreachable("unknown GLSL type");
4795 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4796 const struct glsl_type
*type
)
4798 if (glsl_type_is_scalar(type
)) {
4799 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4802 if (glsl_type_is_vector(type
)) {
4803 return LLVMVectorType(
4804 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4805 glsl_get_vector_elements(type
));
4808 if (glsl_type_is_matrix(type
)) {
4809 return LLVMArrayType(
4810 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4811 glsl_get_matrix_columns(type
));
4814 if (glsl_type_is_array(type
)) {
4815 return LLVMArrayType(
4816 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4817 glsl_get_length(type
));
4820 assert(glsl_type_is_struct_or_ifc(type
));
4822 LLVMTypeRef member_types
[glsl_get_length(type
)];
4824 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4826 glsl_to_llvm_type(ac
,
4827 glsl_get_struct_field(type
, i
));
4830 return LLVMStructTypeInContext(ac
->context
, member_types
,
4831 glsl_get_length(type
), false);
4834 static void visit_deref(struct ac_nir_context
*ctx
,
4835 nir_deref_instr
*instr
)
4837 if (instr
->mode
!= nir_var_mem_shared
&&
4838 instr
->mode
!= nir_var_mem_global
)
4841 LLVMValueRef result
= NULL
;
4842 switch(instr
->deref_type
) {
4843 case nir_deref_type_var
: {
4844 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4845 result
= entry
->data
;
4848 case nir_deref_type_struct
:
4849 if (instr
->mode
== nir_var_mem_global
) {
4850 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4851 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4852 instr
->strct
.index
);
4853 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4854 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4856 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4857 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4860 case nir_deref_type_array
:
4861 if (instr
->mode
== nir_var_mem_global
) {
4862 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4863 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4865 if ((glsl_type_is_matrix(parent
->type
) &&
4866 glsl_matrix_type_is_row_major(parent
->type
)) ||
4867 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4868 stride
= type_scalar_size_bytes(parent
->type
);
4871 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4872 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4873 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4875 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4877 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4879 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4880 get_src(ctx
, instr
->arr
.index
));
4883 case nir_deref_type_ptr_as_array
:
4884 if (instr
->mode
== nir_var_mem_global
) {
4885 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4887 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4888 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4889 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4891 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4893 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4895 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4896 get_src(ctx
, instr
->arr
.index
));
4899 case nir_deref_type_cast
: {
4900 result
= get_src(ctx
, instr
->parent
);
4902 /* We can't use the structs from LLVM because the shader
4903 * specifies its own offsets. */
4904 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
4905 if (instr
->mode
== nir_var_mem_shared
)
4906 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
4908 unsigned address_space
;
4910 switch(instr
->mode
) {
4911 case nir_var_mem_shared
:
4912 address_space
= AC_ADDR_SPACE_LDS
;
4914 case nir_var_mem_global
:
4915 address_space
= AC_ADDR_SPACE_GLOBAL
;
4918 unreachable("Unhandled address space");
4921 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
4923 if (LLVMTypeOf(result
) != type
) {
4924 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
4925 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
4928 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
4935 unreachable("Unhandled deref_instr deref type");
4938 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4941 static void visit_cf_list(struct ac_nir_context
*ctx
,
4942 struct exec_list
*list
);
4944 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
4946 nir_foreach_instr(instr
, block
)
4948 switch (instr
->type
) {
4949 case nir_instr_type_alu
:
4950 visit_alu(ctx
, nir_instr_as_alu(instr
));
4952 case nir_instr_type_load_const
:
4953 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
4955 case nir_instr_type_intrinsic
:
4956 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
4958 case nir_instr_type_tex
:
4959 visit_tex(ctx
, nir_instr_as_tex(instr
));
4961 case nir_instr_type_phi
:
4962 visit_phi(ctx
, nir_instr_as_phi(instr
));
4964 case nir_instr_type_ssa_undef
:
4965 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
4967 case nir_instr_type_jump
:
4968 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
4970 case nir_instr_type_deref
:
4971 visit_deref(ctx
, nir_instr_as_deref(instr
));
4974 fprintf(stderr
, "Unknown NIR instr type: ");
4975 nir_print_instr(instr
, stderr
);
4976 fprintf(stderr
, "\n");
4981 _mesa_hash_table_insert(ctx
->defs
, block
,
4982 LLVMGetInsertBlock(ctx
->ac
.builder
));
4985 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
4987 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
4989 nir_block
*then_block
=
4990 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
4992 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
4994 visit_cf_list(ctx
, &if_stmt
->then_list
);
4996 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
4997 nir_block
*else_block
=
4998 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5000 ac_build_else(&ctx
->ac
, else_block
->index
);
5001 visit_cf_list(ctx
, &if_stmt
->else_list
);
5004 ac_build_endif(&ctx
->ac
, then_block
->index
);
5007 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5009 nir_block
*first_loop_block
=
5010 (nir_block
*) exec_list_get_head(&loop
->body
);
5012 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5014 visit_cf_list(ctx
, &loop
->body
);
5016 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5019 static void visit_cf_list(struct ac_nir_context
*ctx
,
5020 struct exec_list
*list
)
5022 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5024 switch (node
->type
) {
5025 case nir_cf_node_block
:
5026 visit_block(ctx
, nir_cf_node_as_block(node
));
5029 case nir_cf_node_if
:
5030 visit_if(ctx
, nir_cf_node_as_if(node
));
5033 case nir_cf_node_loop
:
5034 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5044 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
5045 struct ac_shader_abi
*abi
,
5046 struct nir_shader
*nir
,
5047 struct nir_variable
*variable
,
5048 gl_shader_stage stage
)
5050 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5051 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5053 /* tess ctrl has it's own load/store paths for outputs */
5054 if (stage
== MESA_SHADER_TESS_CTRL
)
5057 if (stage
== MESA_SHADER_VERTEX
||
5058 stage
== MESA_SHADER_TESS_EVAL
||
5059 stage
== MESA_SHADER_GEOMETRY
) {
5060 int idx
= variable
->data
.location
+ variable
->data
.index
;
5061 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5062 int length
= nir
->info
.clip_distance_array_size
+
5063 nir
->info
.cull_distance_array_size
;
5072 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
5073 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
5074 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5075 for (unsigned chan
= 0; chan
< 4; chan
++) {
5076 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5077 ac_build_alloca_undef(ctx
, type
, "");
5083 setup_locals(struct ac_nir_context
*ctx
,
5084 struct nir_function
*func
)
5087 ctx
->num_locals
= 0;
5088 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5089 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5090 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5091 variable
->data
.location_frac
= 0;
5092 ctx
->num_locals
+= attrib_count
;
5094 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5098 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5099 for (j
= 0; j
< 4; j
++) {
5100 ctx
->locals
[i
* 4 + j
] =
5101 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5107 setup_scratch(struct ac_nir_context
*ctx
,
5108 struct nir_shader
*shader
)
5110 if (shader
->scratch_size
== 0)
5113 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
5114 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
5119 setup_constant_data(struct ac_nir_context
*ctx
,
5120 struct nir_shader
*shader
)
5122 if (!shader
->constant_data
)
5126 LLVMConstStringInContext(ctx
->ac
.context
,
5127 shader
->constant_data
,
5128 shader
->constant_data_size
,
5130 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
5132 /* We want to put the constant data in the CONST address space so that
5133 * we can use scalar loads. However, LLVM versions before 10 put these
5134 * variables in the same section as the code, which is unacceptable
5135 * for RadeonSI as it needs to relocate all the data sections after
5136 * the code sections. See https://reviews.llvm.org/D65813.
5138 unsigned address_space
=
5139 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
5141 LLVMValueRef global
=
5142 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5146 LLVMSetInitializer(global
, data
);
5147 LLVMSetGlobalConstant(global
, true);
5148 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
5149 ctx
->constant_data
= global
;
5153 setup_shared(struct ac_nir_context
*ctx
,
5154 struct nir_shader
*nir
)
5159 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
5160 nir
->info
.cs
.shared_size
);
5163 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5166 LLVMSetAlignment(lds
, 64 * 1024);
5168 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
5169 LLVMPointerType(ctx
->ac
.i8
,
5170 AC_ADDR_SPACE_LDS
), "");
5173 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
5174 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
5176 struct ac_nir_context ctx
= {};
5177 struct nir_function
*func
;
5183 ctx
.stage
= nir
->info
.stage
;
5184 ctx
.info
= &nir
->info
;
5186 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
5188 nir_foreach_variable(variable
, &nir
->outputs
)
5189 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
5192 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5193 _mesa_key_pointer_equal
);
5194 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5195 _mesa_key_pointer_equal
);
5196 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5197 _mesa_key_pointer_equal
);
5199 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
5201 nir_index_ssa_defs(func
->impl
);
5202 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
5204 setup_locals(&ctx
, func
);
5205 setup_scratch(&ctx
, nir
);
5206 setup_constant_data(&ctx
, nir
);
5208 if (gl_shader_stage_is_compute(nir
->info
.stage
))
5209 setup_shared(&ctx
, nir
);
5211 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& nir
->info
.fs
.uses_demote
) {
5212 ctx
.ac
.postponed_kill
= ac_build_alloca_undef(&ctx
.ac
, ac
->i1
, "");
5213 /* true = don't kill. */
5214 LLVMBuildStore(ctx
.ac
.builder
, ctx
.ac
.i1true
, ctx
.ac
.postponed_kill
);
5217 visit_cf_list(&ctx
, &func
->impl
->body
);
5218 phi_post_pass(&ctx
);
5220 if (ctx
.ac
.postponed_kill
)
5221 ac_build_kill_if_false(&ctx
.ac
, LLVMBuildLoad(ctx
.ac
.builder
,
5222 ctx
.ac
.postponed_kill
, ""));
5224 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
5225 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
5230 ralloc_free(ctx
.defs
);
5231 ralloc_free(ctx
.phis
);
5232 ralloc_free(ctx
.vars
);
5236 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
5238 bool progress
= false;
5240 /* Lower large variables to scratch first so that we won't bloat the
5241 * shader by generating large if ladders for them. We later lower
5242 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
5244 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
5245 nir_var_function_temp
,
5247 glsl_get_natural_size_align_bytes
);
5249 /* While it would be nice not to have this flag, we are constrained
5250 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
5252 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
5254 /* TODO: Indirect indexing of GS inputs is unimplemented.
5256 * TCS and TES load inputs directly from LDS or offchip memory, so
5257 * indirect indexing is trivial.
5259 nir_variable_mode indirect_mask
= 0;
5260 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
5261 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
5262 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
5263 !llvm_has_working_vgpr_indexing
)) {
5264 indirect_mask
|= nir_var_shader_in
;
5266 if (!llvm_has_working_vgpr_indexing
&&
5267 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
5268 indirect_mask
|= nir_var_shader_out
;
5270 /* TODO: We shouldn't need to do this, however LLVM isn't currently
5271 * smart enough to handle indirects without causing excess spilling
5272 * causing the gpu to hang.
5274 * See the following thread for more details of the problem:
5275 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
5277 indirect_mask
|= nir_var_function_temp
;
5279 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
5284 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
5286 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
5290 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
5292 if (var
->data
.mode
!= nir_var_shader_out
)
5295 unsigned writemask
= 0;
5296 const int location
= var
->data
.location
;
5297 unsigned first_component
= var
->data
.location_frac
;
5298 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
5300 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
5301 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
5302 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
5303 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
5309 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
5310 unsigned *cond_block_tf_writemask
,
5311 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
5313 switch (cf_node
->type
) {
5314 case nir_cf_node_block
: {
5315 nir_block
*block
= nir_cf_node_as_block(cf_node
);
5316 nir_foreach_instr(instr
, block
) {
5317 if (instr
->type
!= nir_instr_type_intrinsic
)
5320 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
5321 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
5323 /* If we find a barrier in nested control flow put this in the
5324 * too hard basket. In GLSL this is not possible but it is in
5328 *tessfactors_are_def_in_all_invocs
= false;
5332 /* The following case must be prevented:
5333 * gl_TessLevelInner = ...;
5335 * if (gl_InvocationID == 1)
5336 * gl_TessLevelInner = ...;
5338 * If you consider disjoint code segments separated by barriers, each
5339 * such segment that writes tess factor channels should write the same
5340 * channels in all codepaths within that segment.
5342 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5343 /* Accumulate the result: */
5344 *tessfactors_are_def_in_all_invocs
&=
5345 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5347 /* Analyze the next code segment from scratch. */
5348 *upper_block_tf_writemask
= 0;
5349 *cond_block_tf_writemask
= 0;
5352 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5357 case nir_cf_node_if
: {
5358 unsigned then_tessfactor_writemask
= 0;
5359 unsigned else_tessfactor_writemask
= 0;
5361 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5362 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5363 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5364 cond_block_tf_writemask
,
5365 tessfactors_are_def_in_all_invocs
, true);
5368 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5369 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5370 cond_block_tf_writemask
,
5371 tessfactors_are_def_in_all_invocs
, true);
5374 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5375 /* If both statements write the same tess factor channels,
5376 * we can say that the upper block writes them too.
5378 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5379 else_tessfactor_writemask
;
5380 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5381 else_tessfactor_writemask
;
5386 case nir_cf_node_loop
: {
5387 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5388 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5389 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5390 cond_block_tf_writemask
,
5391 tessfactors_are_def_in_all_invocs
, true);
5397 unreachable("unknown cf node type");
5402 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5404 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5406 /* The pass works as follows:
5407 * If all codepaths write tess factors, we can say that all
5408 * invocations define tess factors.
5410 * Each tess factor channel is tracked separately.
5412 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5413 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5415 /* Initial value = true. Here the pass will accumulate results from
5416 * multiple segments surrounded by barriers. If tess factors aren't
5417 * written at all, it's a shader bug and we don't care if this will be
5420 bool tessfactors_are_def_in_all_invocs
= true;
5422 nir_foreach_function(function
, nir
) {
5423 if (function
->impl
) {
5424 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5425 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5426 &cond_block_tf_writemask
,
5427 &tessfactors_are_def_in_all_invocs
,
5433 /* Accumulate the result for the last code segment separated by a
5436 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5437 tessfactors_are_def_in_all_invocs
&=
5438 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5441 return tessfactors_are_def_in_all_invocs
;