2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
41 const struct ac_shader_args
*args
;
43 gl_shader_stage stage
;
46 LLVMValueRef
*ssa_defs
;
49 LLVMValueRef constant_data
;
51 struct hash_table
*defs
;
52 struct hash_table
*phis
;
53 struct hash_table
*vars
;
55 LLVMValueRef main_function
;
56 LLVMBasicBlockRef continue_block
;
57 LLVMBasicBlockRef break_block
;
63 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
64 nir_deref_instr
*deref_instr
,
65 const nir_instr
*instr
,
68 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
69 nir_deref_instr
*deref_instr
,
70 enum ac_descriptor_type desc_type
,
71 const nir_instr
*instr
,
73 bool image
, bool write
);
76 build_store_values_extended(struct ac_llvm_context
*ac
,
79 unsigned value_stride
,
82 LLVMBuilderRef builder
= ac
->builder
;
85 for (i
= 0; i
< value_count
; i
++) {
86 LLVMValueRef ptr
= values
[i
* value_stride
];
87 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
88 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
89 LLVMBuildStore(builder
, value
, ptr
);
93 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
94 const nir_ssa_def
*def
)
96 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
97 if (def
->num_components
> 1) {
98 type
= LLVMVectorType(type
, def
->num_components
);
103 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
106 return nir
->ssa_defs
[src
.ssa
->index
];
110 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
, unsigned bit_size
)
112 LLVMValueRef ptr
= get_src(ctx
, src
);
113 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
114 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
116 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, bit_size
);
118 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
119 LLVMPointerType(type
, addr_space
), "");
122 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
123 const struct nir_block
*b
)
125 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
126 return (LLVMBasicBlockRef
)entry
->data
;
129 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
131 unsigned num_components
)
133 LLVMValueRef value
= get_src(ctx
, src
.src
);
134 bool need_swizzle
= false;
137 unsigned src_components
= ac_get_llvm_num_components(value
);
138 for (unsigned i
= 0; i
< num_components
; ++i
) {
139 assert(src
.swizzle
[i
] < src_components
);
140 if (src
.swizzle
[i
] != i
)
144 if (need_swizzle
|| num_components
!= src_components
) {
145 LLVMValueRef masks
[] = {
146 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
147 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
148 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
149 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
151 if (src_components
> 1 && num_components
== 1) {
152 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
154 } else if (src_components
== 1 && num_components
> 1) {
155 LLVMValueRef values
[] = {value
, value
, value
, value
};
156 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
158 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
159 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
168 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
169 LLVMIntPredicate pred
, LLVMValueRef src0
,
172 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
173 return LLVMBuildSelect(ctx
->builder
, result
,
174 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
178 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
179 LLVMRealPredicate pred
, LLVMValueRef src0
,
183 src0
= ac_to_float(ctx
, src0
);
184 src1
= ac_to_float(ctx
, src1
);
185 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
186 return LLVMBuildSelect(ctx
->builder
, result
,
187 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
191 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
193 LLVMTypeRef result_type
,
197 LLVMValueRef params
[] = {
198 ac_to_float(ctx
, src0
),
201 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
202 ac_get_elem_bits(ctx
, result_type
));
203 assert(length
< sizeof(name
));
204 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
207 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
209 LLVMTypeRef result_type
,
210 LLVMValueRef src0
, LLVMValueRef src1
)
213 LLVMValueRef params
[] = {
214 ac_to_float(ctx
, src0
),
215 ac_to_float(ctx
, src1
),
218 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
219 ac_get_elem_bits(ctx
, result_type
));
220 assert(length
< sizeof(name
));
221 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
224 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
226 LLVMTypeRef result_type
,
227 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
230 LLVMValueRef params
[] = {
231 ac_to_float(ctx
, src0
),
232 ac_to_float(ctx
, src1
),
233 ac_to_float(ctx
, src2
),
236 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
237 ac_get_elem_bits(ctx
, result_type
));
238 assert(length
< sizeof(name
));
239 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
242 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
243 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
245 LLVMTypeRef src1_type
= LLVMTypeOf(src1
);
246 LLVMTypeRef src2_type
= LLVMTypeOf(src2
);
248 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
250 if (LLVMGetTypeKind(src1_type
) == LLVMPointerTypeKind
&&
251 LLVMGetTypeKind(src2_type
) != LLVMPointerTypeKind
) {
252 src2
= LLVMBuildIntToPtr(ctx
->builder
, src2
, src1_type
, "");
253 } else if (LLVMGetTypeKind(src2_type
) == LLVMPointerTypeKind
&&
254 LLVMGetTypeKind(src1_type
) != LLVMPointerTypeKind
) {
255 src1
= LLVMBuildIntToPtr(ctx
->builder
, src1
, src2_type
, "");
258 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
260 return LLVMBuildSelect(ctx
->builder
, v
,
261 ac_to_integer_or_pointer(ctx
, src1
),
262 ac_to_integer_or_pointer(ctx
, src2
), "");
265 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
268 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
271 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
273 LLVMValueRef src0
, LLVMValueRef src1
)
275 LLVMTypeRef ret_type
;
276 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
278 LLVMValueRef params
[] = { src0
, src1
};
279 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
282 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
283 params
, 2, AC_FUNC_ATTR_READNONE
);
285 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
286 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
290 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
294 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
295 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
297 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
301 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
305 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
307 unreachable("Unsupported bit size.");
311 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
314 src0
= ac_to_float(ctx
, src0
);
315 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
316 return LLVMBuildSExt(ctx
->builder
,
317 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
321 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
325 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
329 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
331 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
335 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
337 unreachable("Unsupported bit size.");
341 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
344 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
345 return LLVMBuildSExt(ctx
->builder
,
346 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
350 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
354 LLVMValueRef cond
= NULL
;
356 src0
= ac_to_float(ctx
, src0
);
357 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
359 if (ctx
->chip_class
>= GFX8
) {
360 LLVMValueRef args
[2];
361 /* Check if the result is a denormal - and flush to 0 if so. */
363 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
364 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
367 /* need to convert back up to f32 */
368 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
370 if (ctx
->chip_class
>= GFX8
)
371 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
374 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
375 * so compare the result and flush to 0 if it's smaller.
377 LLVMValueRef temp
, cond2
;
378 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
379 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealOGT
,
380 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
382 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealONE
,
383 temp
, ctx
->f32_0
, "");
384 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
385 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
390 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
391 LLVMValueRef src0
, LLVMValueRef src1
)
393 LLVMValueRef dst64
, result
;
394 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
395 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
397 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
398 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
399 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
403 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
404 LLVMValueRef src0
, LLVMValueRef src1
)
406 LLVMValueRef dst64
, result
;
407 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
408 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
410 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
411 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
412 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
416 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
417 LLVMValueRef bits
, LLVMValueRef offset
)
419 /* mask = ((1 << bits) - 1) << offset */
420 return LLVMBuildShl(ctx
->builder
,
421 LLVMBuildSub(ctx
->builder
,
422 LLVMBuildShl(ctx
->builder
,
429 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
430 LLVMValueRef mask
, LLVMValueRef insert
,
434 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
435 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
437 return LLVMBuildXor(ctx
->builder
, base
,
438 LLVMBuildAnd(ctx
->builder
, mask
,
439 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
442 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
444 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
445 LLVMValueRef args
[2]))
447 LLVMValueRef comp
[2];
449 src0
= ac_to_float(ctx
, src0
);
450 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
451 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
453 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
456 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
459 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
460 LLVMValueRef temps
[2], val
;
463 for (i
= 0; i
< 2; i
++) {
464 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
465 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
466 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
467 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
469 return ac_build_gather_values(ctx
, temps
, 2);
472 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
480 if (op
== nir_op_fddx_fine
)
481 mask
= AC_TID_MASK_LEFT
;
482 else if (op
== nir_op_fddy_fine
)
483 mask
= AC_TID_MASK_TOP
;
485 mask
= AC_TID_MASK_TOP_LEFT
;
487 /* for DDX we want to next X pixel, DDY next Y pixel. */
488 if (op
== nir_op_fddx_fine
||
489 op
== nir_op_fddx_coarse
||
495 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
499 struct waterfall_context
{
500 LLVMBasicBlockRef phi_bb
[2];
504 /* To deal with divergent descriptors we can create a loop that handles all
505 * lanes with the same descriptor on a given iteration (henceforth a
508 * These helper create the begin and end of the loop leaving the caller
509 * to implement the body.
512 * - ctx is the usal nir context
513 * - wctx is a temporary struct containing some loop info. Can be left uninitialized.
514 * - value is the possibly divergent value for which we built the loop
515 * - divergent is whether value is actually divergent. If false we just pass
518 static LLVMValueRef
enter_waterfall(struct ac_nir_context
*ctx
,
519 struct waterfall_context
*wctx
,
520 LLVMValueRef value
, bool divergent
)
522 /* If the app claims the value is divergent but it is constant we can
523 * end up with a dynamic index of NULL. */
527 wctx
->use_waterfall
= divergent
;
531 ac_build_bgnloop(&ctx
->ac
, 6000);
533 LLVMValueRef scalar_value
= ac_build_readlane(&ctx
->ac
, value
, NULL
);
535 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, value
,
536 scalar_value
, "uniform_active");
538 wctx
->phi_bb
[0] = LLVMGetInsertBlock(ctx
->ac
.builder
);
539 ac_build_ifcc(&ctx
->ac
, active
, 6001);
544 static LLVMValueRef
exit_waterfall(struct ac_nir_context
*ctx
,
545 struct waterfall_context
*wctx
,
548 LLVMValueRef ret
= NULL
;
549 LLVMValueRef phi_src
[2];
550 LLVMValueRef cc_phi_src
[2] = {
551 LLVMConstInt(ctx
->ac
.i32
, 0, false),
552 LLVMConstInt(ctx
->ac
.i32
, 0xffffffff, false),
555 if (!wctx
->use_waterfall
)
558 wctx
->phi_bb
[1] = LLVMGetInsertBlock(ctx
->ac
.builder
);
560 ac_build_endif(&ctx
->ac
, 6001);
563 phi_src
[0] = LLVMGetUndef(LLVMTypeOf(value
));
566 ret
= ac_build_phi(&ctx
->ac
, LLVMTypeOf(value
), 2, phi_src
, wctx
->phi_bb
);
570 * By using the optimization barrier on the exit decision, we decouple
571 * the operations from the break, and hence avoid LLVM hoisting the
572 * opteration into the break block.
574 LLVMValueRef cc
= ac_build_phi(&ctx
->ac
, ctx
->ac
.i32
, 2, cc_phi_src
, wctx
->phi_bb
);
575 ac_build_optimization_barrier(&ctx
->ac
, &cc
);
577 LLVMValueRef active
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntNE
, cc
, ctx
->ac
.i32_0
, "uniform_active2");
578 ac_build_ifcc(&ctx
->ac
, active
, 6002);
579 ac_build_break(&ctx
->ac
);
580 ac_build_endif(&ctx
->ac
, 6002);
582 ac_build_endloop(&ctx
->ac
, 6000);
586 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
588 LLVMValueRef src
[4], result
= NULL
;
589 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
590 unsigned src_components
;
591 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
593 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
600 case nir_op_pack_half_2x16
:
601 case nir_op_pack_snorm_2x16
:
602 case nir_op_pack_unorm_2x16
:
605 case nir_op_unpack_half_2x16
:
608 case nir_op_cube_face_coord
:
609 case nir_op_cube_face_index
:
613 src_components
= num_components
;
616 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
617 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
624 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
625 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
626 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
627 /* fneg will be optimized by backend compiler with sign
628 * bit removed via XOR. This is probably a LLVM bug.
630 result
= ac_build_canonicalize(&ctx
->ac
, result
,
631 instr
->dest
.dest
.ssa
.bit_size
);
635 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
638 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
641 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
644 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
645 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
646 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
649 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
650 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
651 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
654 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
657 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
660 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
663 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
666 /* lower_fmod only lower 16-bit and 32-bit fmod */
667 assert(instr
->dest
.dest
.ssa
.bit_size
== 64);
668 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
669 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
670 result
= ac_build_fdiv(&ctx
->ac
, src
[0], src
[1]);
671 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
672 ac_to_float_type(&ctx
->ac
, def_type
), result
);
673 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[1] , result
, "");
674 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], result
, "");
677 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
680 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
683 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
686 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
687 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
688 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
691 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rcp",
692 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
695 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
698 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
701 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
704 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
705 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
706 LLVMTypeOf(src
[0]), "");
707 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
708 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
709 LLVMTypeOf(src
[0]), "");
710 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
713 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
714 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
715 LLVMTypeOf(src
[0]), "");
716 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
717 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
718 LLVMTypeOf(src
[0]), "");
719 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
722 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
723 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
724 LLVMTypeOf(src
[0]), "");
725 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
726 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
727 LLVMTypeOf(src
[0]), "");
728 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
731 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
734 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
737 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
740 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
743 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
746 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
749 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
752 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
755 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
758 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
761 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
762 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
763 if (ctx
->ac
.float_mode
== AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO
) {
764 /* fabs will be optimized by backend compiler with sign
765 * bit removed via AND.
767 result
= ac_build_canonicalize(&ctx
->ac
, result
,
768 instr
->dest
.dest
.ssa
.bit_size
);
772 result
= emit_iabs(&ctx
->ac
, src
[0]);
775 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
778 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
781 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
784 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
787 result
= ac_build_isign(&ctx
->ac
, src
[0],
788 instr
->dest
.dest
.ssa
.bit_size
);
791 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
792 result
= ac_build_fsign(&ctx
->ac
, src
[0],
793 instr
->dest
.dest
.ssa
.bit_size
);
796 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
797 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
800 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
801 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
804 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
805 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
807 case nir_op_fround_even
:
808 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
809 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
812 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
813 result
= ac_build_fract(&ctx
->ac
, src
[0],
814 instr
->dest
.dest
.ssa
.bit_size
);
817 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
818 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
821 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
822 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
825 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
826 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
829 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
830 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
833 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
834 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
837 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.amdgcn.rsq",
838 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
840 case nir_op_frexp_exp
:
841 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
842 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
843 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
844 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
845 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
848 case nir_op_frexp_sig
:
849 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
850 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
851 instr
->dest
.dest
.ssa
.bit_size
);
854 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
855 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
858 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
859 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
860 if (ctx
->ac
.chip_class
< GFX9
&&
861 instr
->dest
.dest
.ssa
.bit_size
== 32) {
862 /* Only pre-GFX9 chips do not flush denorms. */
863 result
= ac_build_canonicalize(&ctx
->ac
, result
,
864 instr
->dest
.dest
.ssa
.bit_size
);
868 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
869 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
870 if (ctx
->ac
.chip_class
< GFX9
&&
871 instr
->dest
.dest
.ssa
.bit_size
== 32) {
872 /* Only pre-GFX9 chips do not flush denorms. */
873 result
= ac_build_canonicalize(&ctx
->ac
, result
,
874 instr
->dest
.dest
.ssa
.bit_size
);
878 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
879 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
880 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
883 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
884 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
885 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
886 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
887 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
889 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
892 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
894 case nir_op_bitfield_select
:
895 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
898 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
901 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
903 case nir_op_bitfield_reverse
:
904 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
906 case nir_op_bit_count
:
907 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
912 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
913 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
914 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
920 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
921 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
927 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
928 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
933 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
938 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
940 case nir_op_f2f16_rtz
:
941 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
942 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
943 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
944 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
945 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
946 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
948 case nir_op_f2f16_rtne
:
952 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
953 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
954 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
956 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
962 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
963 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
965 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
971 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
972 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
974 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
977 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
979 case nir_op_find_lsb
:
980 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
982 case nir_op_ufind_msb
:
983 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
985 case nir_op_ifind_msb
:
986 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
988 case nir_op_uadd_carry
:
989 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
991 case nir_op_usub_borrow
:
992 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
997 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1000 result
= emit_f2b(&ctx
->ac
, src
[0]);
1006 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
1009 result
= emit_i2b(&ctx
->ac
, src
[0]);
1011 case nir_op_fquantize2f16
:
1012 result
= emit_f2f16(&ctx
->ac
, src
[0]);
1014 case nir_op_umul_high
:
1015 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
1017 case nir_op_imul_high
:
1018 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
1020 case nir_op_pack_half_2x16
:
1021 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
1023 case nir_op_pack_snorm_2x16
:
1024 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
1026 case nir_op_pack_unorm_2x16
:
1027 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
1029 case nir_op_unpack_half_2x16
:
1030 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
1034 case nir_op_fddx_fine
:
1035 case nir_op_fddy_fine
:
1036 case nir_op_fddx_coarse
:
1037 case nir_op_fddy_coarse
:
1038 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
1041 case nir_op_unpack_64_2x32_split_x
: {
1042 assert(ac_get_llvm_num_components(src
[0]) == 1);
1043 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1046 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1051 case nir_op_unpack_64_2x32_split_y
: {
1052 assert(ac_get_llvm_num_components(src
[0]) == 1);
1053 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1056 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1061 case nir_op_pack_64_2x32_split
: {
1062 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1063 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
1067 case nir_op_pack_32_2x16_split
: {
1068 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
1069 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
1073 case nir_op_unpack_32_2x16_split_x
: {
1074 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1077 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1082 case nir_op_unpack_32_2x16_split_y
: {
1083 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
1086 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
1091 case nir_op_cube_face_coord
: {
1092 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1093 LLVMValueRef results
[2];
1095 for (unsigned chan
= 0; chan
< 3; chan
++)
1096 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1097 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
1098 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1099 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
1100 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1101 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
1102 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1103 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
1104 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
1105 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
1106 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
1107 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
1108 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
1112 case nir_op_cube_face_index
: {
1113 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1115 for (unsigned chan
= 0; chan
< 3; chan
++)
1116 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
1117 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
1118 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
1123 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1124 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1125 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
1126 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1129 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1130 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1133 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1134 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1137 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1138 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1139 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1140 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1143 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1144 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1147 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1148 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1150 case nir_op_fmed3
: {
1151 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1152 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1153 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1154 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1155 instr
->dest
.dest
.ssa
.bit_size
);
1158 case nir_op_imed3
: {
1159 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1160 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1161 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1162 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1165 case nir_op_umed3
: {
1166 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1167 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1168 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1169 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1174 fprintf(stderr
, "Unknown NIR alu instr: ");
1175 nir_print_instr(&instr
->instr
, stderr
);
1176 fprintf(stderr
, "\n");
1181 assert(instr
->dest
.dest
.is_ssa
);
1182 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1183 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1187 static void visit_load_const(struct ac_nir_context
*ctx
,
1188 const nir_load_const_instr
*instr
)
1190 LLVMValueRef values
[4], value
= NULL
;
1191 LLVMTypeRef element_type
=
1192 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1194 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1195 switch (instr
->def
.bit_size
) {
1197 values
[i
] = LLVMConstInt(element_type
,
1198 instr
->value
[i
].u8
, false);
1201 values
[i
] = LLVMConstInt(element_type
,
1202 instr
->value
[i
].u16
, false);
1205 values
[i
] = LLVMConstInt(element_type
,
1206 instr
->value
[i
].u32
, false);
1209 values
[i
] = LLVMConstInt(element_type
,
1210 instr
->value
[i
].u64
, false);
1214 "unsupported nir load_const bit_size: %d\n",
1215 instr
->def
.bit_size
);
1219 if (instr
->def
.num_components
> 1) {
1220 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1224 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1228 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1231 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1232 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1235 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1236 /* On GFX8, the descriptor contains the size in bytes,
1237 * but TXQ must return the size in elements.
1238 * The stride is always non-zero for resources using TXQ.
1240 LLVMValueRef stride
=
1241 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1243 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1244 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1245 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1246 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1248 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1253 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1254 * incorrectly forces nearest filtering if the texture format is integer.
1255 * The only effect it has on Gather4, which always returns 4 texels for
1256 * bilinear filtering, is that the final coordinates are off by 0.5 of
1259 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1260 * or (0.5 / size) from the normalized coordinates.
1262 * However, cube textures with 8_8_8_8 data formats require a different
1263 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1264 * precision in 32-bit data formats, so it needs to be applied dynamically at
1265 * runtime. In this case, return an i1 value that indicates whether the
1266 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1268 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1270 struct ac_image_args
*args
,
1271 const nir_tex_instr
*instr
)
1273 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1274 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1275 LLVMValueRef wa_8888
= NULL
;
1276 LLVMValueRef half_texel
[2];
1277 LLVMValueRef result
;
1279 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1281 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1282 LLVMValueRef formats
;
1283 LLVMValueRef data_format
;
1284 LLVMValueRef wa_formats
;
1286 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1288 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1289 LLVMConstInt(ctx
->i32
, 20, false), "");
1290 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1291 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1292 wa_8888
= LLVMBuildICmp(
1293 ctx
->builder
, LLVMIntEQ
, data_format
,
1294 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1297 uint32_t wa_num_format
=
1298 stype
== GLSL_TYPE_UINT
?
1299 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1300 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1301 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1302 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1304 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1305 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1307 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1308 args
->resource
= LLVMBuildInsertElement(
1309 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1312 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1314 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1316 struct ac_image_args resinfo
= {};
1317 LLVMBasicBlockRef bbs
[2];
1319 LLVMValueRef unnorm
= NULL
;
1320 LLVMValueRef default_offset
= ctx
->f32_0
;
1321 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1323 /* In vulkan, whether the sampler uses unnormalized
1324 * coordinates or not is a dynamic property of the
1325 * sampler. Hence, to figure out whether or not we
1326 * need to divide by the texture size, we need to test
1327 * the sampler at runtime. This tests the bit set by
1328 * radv_init_sampler().
1330 LLVMValueRef sampler0
=
1331 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1332 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1333 LLVMConstInt(ctx
->i32
, 15, false), "");
1334 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1335 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1336 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1339 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1340 if (wa_8888
|| unnorm
) {
1341 assert(!(wa_8888
&& unnorm
));
1342 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1343 /* Skip the texture size query entirely if we don't need it. */
1344 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1345 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1348 /* Query the texture size. */
1349 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1350 resinfo
.opcode
= ac_image_get_resinfo
;
1351 resinfo
.dmask
= 0xf;
1352 resinfo
.lod
= ctx
->i32_0
;
1353 resinfo
.resource
= args
->resource
;
1354 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1355 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1357 /* Compute -0.5 / size. */
1358 for (unsigned c
= 0; c
< 2; c
++) {
1360 LLVMBuildExtractElement(ctx
->builder
, size
,
1361 LLVMConstInt(ctx
->i32
, c
, 0), "");
1362 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1363 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1364 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1365 LLVMConstReal(ctx
->f32
, -0.5), "");
1368 if (wa_8888
|| unnorm
) {
1369 ac_build_endif(ctx
, 2000);
1371 for (unsigned c
= 0; c
< 2; c
++) {
1372 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1373 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1379 for (unsigned c
= 0; c
< 2; c
++) {
1381 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1382 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1385 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1386 result
= ac_build_image_opcode(ctx
, args
);
1388 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1389 LLVMValueRef tmp
, tmp2
;
1391 /* if the cube workaround is in place, f2i the result. */
1392 for (unsigned c
= 0; c
< 4; c
++) {
1393 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1394 if (stype
== GLSL_TYPE_UINT
)
1395 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1397 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1398 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1399 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1400 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1401 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1402 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1408 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1410 nir_deref_instr
*texture_deref_instr
= NULL
;
1412 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1413 switch (instr
->src
[i
].src_type
) {
1414 case nir_tex_src_texture_deref
:
1415 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1421 return texture_deref_instr
;
1424 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1425 const nir_tex_instr
*instr
,
1426 struct ac_image_args
*args
)
1428 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1429 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1431 return ac_build_buffer_load_format(&ctx
->ac
,
1435 util_last_bit(mask
),
1439 args
->opcode
= ac_image_sample
;
1441 switch (instr
->op
) {
1443 case nir_texop_txf_ms
:
1444 case nir_texop_samples_identical
:
1445 args
->opcode
= args
->level_zero
||
1446 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1447 ac_image_load
: ac_image_load_mip
;
1448 args
->level_zero
= false;
1451 case nir_texop_query_levels
:
1452 args
->opcode
= ac_image_get_resinfo
;
1454 args
->lod
= ctx
->ac
.i32_0
;
1455 args
->level_zero
= false;
1458 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1460 args
->level_zero
= true;
1464 args
->opcode
= ac_image_gather4
;
1465 args
->level_zero
= true;
1468 args
->opcode
= ac_image_get_lod
;
1470 case nir_texop_fragment_fetch
:
1471 case nir_texop_fragment_mask_fetch
:
1472 args
->opcode
= ac_image_load
;
1473 args
->level_zero
= false;
1479 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1480 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1481 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1482 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1483 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1484 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1485 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1489 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1490 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1491 if ((args
->dim
== ac_image_2darray
||
1492 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1493 args
->coords
[1] = ctx
->ac
.i32_0
;
1497 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1498 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1499 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1500 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1501 /* Prevent texture instructions with implicit derivatives from being
1502 * sinked into branches. */
1503 switch (instr
->op
) {
1507 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1514 return ac_build_image_opcode(&ctx
->ac
, args
);
1517 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1518 nir_intrinsic_instr
*instr
)
1520 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1521 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1523 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1524 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1528 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1529 nir_intrinsic_instr
*instr
)
1531 LLVMValueRef ptr
, addr
;
1532 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1533 unsigned index
= nir_intrinsic_base(instr
);
1535 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1536 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1538 /* Load constant values from user SGPRS when possible, otherwise
1539 * fallback to the default path that loads directly from memory.
1541 if (LLVMIsConstant(src0
) &&
1542 instr
->dest
.ssa
.bit_size
== 32) {
1543 unsigned count
= instr
->dest
.ssa
.num_components
;
1544 unsigned offset
= index
;
1546 offset
+= LLVMConstIntGetZExtValue(src0
);
1549 offset
-= ctx
->args
->base_inline_push_consts
;
1551 unsigned num_inline_push_consts
= ctx
->args
->num_inline_push_consts
;
1552 if (offset
+ count
<= num_inline_push_consts
) {
1553 LLVMValueRef push_constants
[num_inline_push_consts
];
1554 for (unsigned i
= 0; i
< num_inline_push_consts
; i
++)
1555 push_constants
[i
] = ac_get_arg(&ctx
->ac
,
1556 ctx
->args
->inline_push_consts
[i
]);
1557 return ac_build_gather_values(&ctx
->ac
,
1558 push_constants
+ offset
,
1563 ptr
= LLVMBuildGEP(ctx
->ac
.builder
,
1564 ac_get_arg(&ctx
->ac
, ctx
->args
->push_constants
), &addr
, 1, "");
1566 if (instr
->dest
.ssa
.bit_size
== 8) {
1567 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1568 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), 4 * load_dwords
);
1569 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1570 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1572 LLVMValueRef params
[3];
1573 if (load_dwords
> 1) {
1574 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i32
, 2), "");
1575 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1576 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1578 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1579 params
[0] = ctx
->ac
.i32_0
;
1583 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1585 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1586 if (instr
->dest
.ssa
.num_components
> 1)
1587 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), instr
->dest
.ssa
.num_components
), "");
1589 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1590 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1591 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt16TypeInContext(ctx
->ac
.context
), 2 * load_dwords
);
1592 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1593 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1594 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1595 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1596 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1597 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1598 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1599 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1600 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1601 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1602 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1603 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1604 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1605 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1608 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1610 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1613 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1614 const nir_intrinsic_instr
*instr
)
1616 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1618 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1621 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1623 uint32_t new_mask
= 0;
1624 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1625 if (mask
& (1u << i
))
1626 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1630 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1631 unsigned start
, unsigned count
)
1633 LLVMValueRef mask
[] = {
1634 ctx
->i32_0
, ctx
->i32_1
,
1635 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1637 unsigned src_elements
= ac_get_llvm_num_components(src
);
1639 if (count
== src_elements
) {
1642 } else if (count
== 1) {
1643 assert(start
< src_elements
);
1644 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1646 assert(start
+ count
<= src_elements
);
1648 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1649 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1653 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1654 enum gl_access_qualifier access
,
1655 bool may_store_unaligned
,
1656 bool writeonly_memory
)
1658 unsigned cache_policy
= 0;
1660 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1661 * store opcodes not aligned to a dword are affected. The only way to
1662 * get unaligned stores is through shader images.
1664 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1665 /* If this is write-only, don't keep data in L1 to prevent
1666 * evicting L1 cache lines that may be needed by other
1670 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1671 cache_policy
|= ac_glc
;
1674 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1675 cache_policy
|= ac_slc
;
1677 return cache_policy
;
1680 static LLVMValueRef
enter_waterfall_ssbo(struct ac_nir_context
*ctx
,
1681 struct waterfall_context
*wctx
,
1682 const nir_intrinsic_instr
*instr
,
1685 return enter_waterfall(ctx
, wctx
, get_src(ctx
, src
),
1686 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
1689 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1690 nir_intrinsic_instr
*instr
)
1692 if (ctx
->ac
.postponed_kill
) {
1693 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1694 ctx
->ac
.postponed_kill
, "");
1695 ac_build_ifcc(&ctx
->ac
, cond
, 7000);
1698 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1699 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1700 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1701 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1702 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1703 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1705 struct waterfall_context wctx
;
1706 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[1]);
1708 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, true);
1709 LLVMValueRef base_data
= src_data
;
1710 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1711 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1715 LLVMValueRef data
, offset
;
1716 LLVMTypeRef data_type
;
1718 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1720 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1721 * writes into a 2-element and a 1-element write. */
1723 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1724 writemask
|= 1 << (start
+ 2);
1727 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1729 /* we can only store 4 DWords at the same time.
1730 * can only happen for 64 Bit vectors. */
1731 if (num_bytes
> 16) {
1732 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1737 /* check alignment of 16 Bit stores */
1738 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1739 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1744 /* Due to alignment issues, split stores of 8-bit vectors. */
1745 if (ctx
->ac
.chip_class
== GFX6
&&
1746 elem_size_bytes
== 1 && count
> 1) {
1747 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1752 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1754 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1755 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1757 if (num_bytes
== 1) {
1758 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1759 offset
, ctx
->ac
.i32_0
,
1761 } else if (num_bytes
== 2) {
1762 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1763 offset
, ctx
->ac
.i32_0
,
1766 int num_channels
= num_bytes
/ 4;
1768 switch (num_bytes
) {
1769 case 16: /* v4f32 */
1770 data_type
= ctx
->ac
.v4f32
;
1772 case 12: /* v3f32 */
1773 data_type
= ctx
->ac
.v3f32
;
1776 data_type
= ctx
->ac
.v2f32
;
1779 data_type
= ctx
->ac
.f32
;
1782 unreachable("Malformed vector store.");
1784 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1786 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1787 num_channels
, offset
,
1793 exit_waterfall(ctx
, &wctx
, NULL
);
1795 if (ctx
->ac
.postponed_kill
)
1796 ac_build_endif(&ctx
->ac
, 7000);
1799 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1800 LLVMValueRef descriptor
,
1801 LLVMValueRef offset
,
1802 LLVMValueRef compare
,
1803 LLVMValueRef exchange
)
1805 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1806 if (ctx
->abi
->robust_buffer_access
) {
1807 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1809 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1810 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1812 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1814 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1817 LLVMValueRef ptr_parts
[2] = {
1818 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1819 LLVMBuildAnd(ctx
->ac
.builder
,
1820 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1821 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1824 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1825 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1827 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1829 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1830 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1831 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1832 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1834 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1835 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1837 if (ctx
->abi
->robust_buffer_access
) {
1838 ac_build_endif(&ctx
->ac
, -1);
1840 LLVMBasicBlockRef incoming_blocks
[2] = {
1845 LLVMValueRef incoming_values
[2] = {
1846 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1849 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1850 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1857 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1858 nir_intrinsic_instr
*instr
)
1860 if (ctx
->ac
.postponed_kill
) {
1861 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
1862 ctx
->ac
.postponed_kill
, "");
1863 ac_build_ifcc(&ctx
->ac
, cond
, 7001);
1866 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1868 char name
[64], type
[8];
1869 LLVMValueRef params
[6], descriptor
;
1870 LLVMValueRef result
;
1873 struct waterfall_context wctx
;
1874 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1876 switch (instr
->intrinsic
) {
1877 case nir_intrinsic_ssbo_atomic_add
:
1880 case nir_intrinsic_ssbo_atomic_imin
:
1883 case nir_intrinsic_ssbo_atomic_umin
:
1886 case nir_intrinsic_ssbo_atomic_imax
:
1889 case nir_intrinsic_ssbo_atomic_umax
:
1892 case nir_intrinsic_ssbo_atomic_and
:
1895 case nir_intrinsic_ssbo_atomic_or
:
1898 case nir_intrinsic_ssbo_atomic_xor
:
1901 case nir_intrinsic_ssbo_atomic_exchange
:
1904 case nir_intrinsic_ssbo_atomic_comp_swap
:
1911 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1915 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1916 return_type
== ctx
->ac
.i64
) {
1917 result
= emit_ssbo_comp_swap_64(ctx
, descriptor
,
1918 get_src(ctx
, instr
->src
[1]),
1919 get_src(ctx
, instr
->src
[2]),
1920 get_src(ctx
, instr
->src
[3]));
1922 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1923 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1925 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1926 params
[arg_count
++] = descriptor
;
1928 if (LLVM_VERSION_MAJOR
>= 9) {
1929 /* XXX: The new raw/struct atomic intrinsics are buggy with
1930 * LLVM 8, see r358579.
1932 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1933 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1934 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1936 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1937 snprintf(name
, sizeof(name
),
1938 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1940 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1941 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1942 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1944 assert(return_type
== ctx
->ac
.i32
);
1945 snprintf(name
, sizeof(name
),
1946 "llvm.amdgcn.buffer.atomic.%s", op
);
1949 result
= ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1953 result
= exit_waterfall(ctx
, &wctx
, result
);
1954 if (ctx
->ac
.postponed_kill
)
1955 ac_build_endif(&ctx
->ac
, 7001);
1959 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1960 nir_intrinsic_instr
*instr
)
1962 struct waterfall_context wctx
;
1963 LLVMValueRef rsrc_base
= enter_waterfall_ssbo(ctx
, &wctx
, instr
, instr
->src
[0]);
1965 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1966 int num_components
= instr
->num_components
;
1967 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1968 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1970 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1971 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
, rsrc_base
, false);
1972 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1974 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1975 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1977 LLVMValueRef results
[4];
1978 for (int i
= 0; i
< num_components
;) {
1979 int num_elems
= num_components
- i
;
1980 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1982 if (num_elems
* elem_size_bytes
> 16)
1983 num_elems
= 16 / elem_size_bytes
;
1984 int load_bytes
= num_elems
* elem_size_bytes
;
1986 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
1990 if (load_bytes
== 1) {
1991 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
1997 } else if (load_bytes
== 2) {
1998 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
2005 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
2006 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2008 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
2009 vindex
, offset
, immoffset
, 0,
2010 cache_policy
, can_speculate
, false);
2013 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
2014 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
2015 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
2017 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
2018 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
2020 for (unsigned j
= 0; j
< num_elems
; j
++) {
2021 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
2026 LLVMValueRef ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2027 return exit_waterfall(ctx
, &wctx
, ret
);
2030 static LLVMValueRef
enter_waterfall_ubo(struct ac_nir_context
*ctx
,
2031 struct waterfall_context
*wctx
,
2032 const nir_intrinsic_instr
*instr
)
2034 return enter_waterfall(ctx
, wctx
, get_src(ctx
, instr
->src
[0]),
2035 nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2038 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
2039 nir_intrinsic_instr
*instr
)
2041 struct waterfall_context wctx
;
2042 LLVMValueRef rsrc_base
= enter_waterfall_ubo(ctx
, &wctx
, instr
);
2045 LLVMValueRef rsrc
= rsrc_base
;
2046 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
2047 int num_components
= instr
->num_components
;
2049 if (ctx
->abi
->load_ubo
)
2050 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
2052 if (instr
->dest
.ssa
.bit_size
== 64)
2053 num_components
*= 2;
2055 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
2056 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
2057 LLVMValueRef results
[num_components
];
2058 for (unsigned i
= 0; i
< num_components
; ++i
) {
2059 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
2062 if (load_bytes
== 1) {
2063 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
2070 assert(load_bytes
== 2);
2071 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
2079 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
2081 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
2082 NULL
, 0, 0, true, true);
2084 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
2087 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
2088 get_def_type(ctx
, &instr
->dest
.ssa
), "");
2090 return exit_waterfall(ctx
, &wctx
, ret
);
2094 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
2095 bool vs_in
, unsigned *vertex_index_out
,
2096 LLVMValueRef
*vertex_index_ref
,
2097 unsigned *const_out
, LLVMValueRef
*indir_out
)
2099 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
2100 nir_deref_path path
;
2101 unsigned idx_lvl
= 1;
2103 nir_deref_path_init(&path
, instr
, NULL
);
2105 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
2106 if (vertex_index_ref
) {
2107 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
2108 if (vertex_index_out
)
2109 *vertex_index_out
= 0;
2111 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2116 uint32_t const_offset
= 0;
2117 LLVMValueRef offset
= NULL
;
2119 if (var
->data
.compact
) {
2120 assert(instr
->deref_type
== nir_deref_type_array
);
2121 const_offset
= nir_src_as_uint(instr
->arr
.index
);
2125 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
2126 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
2127 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
2128 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
2130 for (unsigned i
= 0; i
< index
; i
++) {
2131 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
2132 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
2134 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
2135 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
2136 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
2137 const_offset
+= size
*
2138 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
2140 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
2141 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
2143 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
2148 unreachable("Uhandled deref type in get_deref_instr_offset");
2152 nir_deref_path_finish(&path
);
2154 if (const_offset
&& offset
)
2155 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
2156 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
2159 *const_out
= const_offset
;
2160 *indir_out
= offset
;
2163 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
2164 nir_intrinsic_instr
*instr
,
2167 LLVMValueRef result
;
2168 LLVMValueRef vertex_index
= NULL
;
2169 LLVMValueRef indir_index
= NULL
;
2170 unsigned const_index
= 0;
2172 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
2174 unsigned location
= var
->data
.location
;
2175 unsigned driver_location
= var
->data
.driver_location
;
2176 const bool is_patch
= var
->data
.patch
||
2177 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2178 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2179 const bool is_compact
= var
->data
.compact
;
2181 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2182 false, NULL
, is_patch
? NULL
: &vertex_index
,
2183 &const_index
, &indir_index
);
2185 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2187 LLVMTypeRef src_component_type
;
2188 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
2189 src_component_type
= LLVMGetElementType(dest_type
);
2191 src_component_type
= dest_type
;
2193 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
2194 vertex_index
, indir_index
,
2195 const_index
, location
, driver_location
,
2196 var
->data
.location_frac
,
2197 instr
->num_components
,
2198 is_patch
, is_compact
, load_inputs
);
2199 if (instr
->dest
.ssa
.bit_size
== 16) {
2200 result
= ac_to_integer(&ctx
->ac
, result
);
2201 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
2203 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2207 type_scalar_size_bytes(const struct glsl_type
*type
)
2209 assert(glsl_type_is_vector_or_scalar(type
) ||
2210 glsl_type_is_matrix(type
));
2211 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2214 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2215 nir_intrinsic_instr
*instr
)
2217 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2218 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2220 LLVMValueRef values
[8];
2222 int ve
= instr
->dest
.ssa
.num_components
;
2224 LLVMValueRef indir_index
;
2226 unsigned const_index
;
2227 unsigned stride
= 4;
2228 int mode
= deref
->mode
;
2231 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2232 var
->data
.mode
== nir_var_shader_in
;
2233 idx
= var
->data
.driver_location
;
2234 comp
= var
->data
.location_frac
;
2235 mode
= var
->data
.mode
;
2237 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2238 &const_index
, &indir_index
);
2240 if (var
->data
.compact
) {
2242 const_index
+= comp
;
2247 if (instr
->dest
.ssa
.bit_size
== 64 &&
2248 (deref
->mode
== nir_var_shader_in
||
2249 deref
->mode
== nir_var_shader_out
||
2250 deref
->mode
== nir_var_function_temp
))
2254 case nir_var_shader_in
:
2255 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2256 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2257 return load_tess_varyings(ctx
, instr
, true);
2260 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2261 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2262 LLVMValueRef indir_index
;
2263 unsigned const_index
, vertex_index
;
2264 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2265 &const_index
, &indir_index
);
2266 assert(indir_index
== NULL
);
2268 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2269 var
->data
.driver_location
,
2270 var
->data
.location_frac
,
2271 instr
->num_components
, vertex_index
, const_index
, type
);
2274 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2276 unsigned count
= glsl_count_attribute_slots(
2278 ctx
->stage
== MESA_SHADER_VERTEX
);
2280 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2281 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2282 stride
, false, true);
2284 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2288 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2291 case nir_var_function_temp
:
2292 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2294 unsigned count
= glsl_count_attribute_slots(
2297 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2298 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2299 stride
, true, true);
2301 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2305 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2309 case nir_var_shader_out
:
2310 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2311 return load_tess_varyings(ctx
, instr
, false);
2314 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2315 var
->data
.fb_fetch_output
&&
2316 ctx
->abi
->emit_fbfetch
)
2317 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2319 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2321 unsigned count
= glsl_count_attribute_slots(
2324 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2325 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2326 stride
, true, true);
2328 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2332 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2333 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2338 case nir_var_mem_global
: {
2339 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2340 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2341 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2342 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2343 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2344 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, result_type
) / 8;
2345 bool split_loads
= ctx
->ac
.chip_class
== GFX6
&&
2346 elem_size_bytes
== 1;
2348 if (stride
!= natural_stride
|| split_loads
) {
2349 if (LLVMGetTypeKind(result_type
) == LLVMVectorTypeKind
)
2350 result_type
= LLVMGetElementType(result_type
);
2352 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2353 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2354 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2356 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2357 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2358 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2359 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2361 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2363 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2364 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2365 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2366 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2371 unreachable("unhandle variable mode");
2373 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2374 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2378 visit_store_var(struct ac_nir_context
*ctx
,
2379 nir_intrinsic_instr
*instr
)
2381 if (ctx
->ac
.postponed_kill
) {
2382 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2383 ctx
->ac
.postponed_kill
, "");
2384 ac_build_ifcc(&ctx
->ac
, cond
, 7002);
2387 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2388 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2390 LLVMValueRef temp_ptr
, value
;
2393 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2394 int writemask
= instr
->const_index
[0];
2395 LLVMValueRef indir_index
;
2396 unsigned const_index
;
2399 get_deref_offset(ctx
, deref
, false,
2400 NULL
, NULL
, &const_index
, &indir_index
);
2401 idx
= var
->data
.driver_location
;
2402 comp
= var
->data
.location_frac
;
2404 if (var
->data
.compact
) {
2405 const_index
+= comp
;
2410 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2411 (deref
->mode
== nir_var_shader_out
||
2412 deref
->mode
== nir_var_function_temp
)) {
2414 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2415 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2418 writemask
= widen_mask(writemask
, 2);
2421 writemask
= writemask
<< comp
;
2423 switch (deref
->mode
) {
2424 case nir_var_shader_out
:
2426 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2427 LLVMValueRef vertex_index
= NULL
;
2428 LLVMValueRef indir_index
= NULL
;
2429 unsigned const_index
= 0;
2430 const bool is_patch
= var
->data
.patch
||
2431 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
2432 var
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
;
2434 get_deref_offset(ctx
, deref
, false, NULL
,
2435 is_patch
? NULL
: &vertex_index
,
2436 &const_index
, &indir_index
);
2438 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2439 vertex_index
, indir_index
,
2440 const_index
, src
, writemask
);
2444 for (unsigned chan
= 0; chan
< 8; chan
++) {
2446 if (!(writemask
& (1 << chan
)))
2449 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2451 if (var
->data
.compact
)
2454 unsigned count
= glsl_count_attribute_slots(
2457 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2458 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2459 stride
, true, true);
2461 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2462 value
, indir_index
, "");
2463 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2464 count
, stride
, tmp_vec
);
2467 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2469 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2473 case nir_var_function_temp
:
2474 for (unsigned chan
= 0; chan
< 8; chan
++) {
2475 if (!(writemask
& (1 << chan
)))
2478 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2480 unsigned count
= glsl_count_attribute_slots(
2483 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2484 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2487 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2488 value
, indir_index
, "");
2489 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2492 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2494 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2499 case nir_var_mem_global
: {
2500 int writemask
= instr
->const_index
[0];
2501 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2502 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2504 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2505 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2506 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2507 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(val
)) / 8;
2508 bool split_stores
= ctx
->ac
.chip_class
== GFX6
&&
2509 elem_size_bytes
== 1;
2511 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2512 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2513 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2515 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2516 stride
== natural_stride
&& !split_stores
) {
2517 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2518 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2519 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2521 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2522 LLVMGetElementType(LLVMTypeOf(address
)), "");
2523 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2525 LLVMTypeRef val_type
= LLVMTypeOf(val
);
2526 if (LLVMGetTypeKind(LLVMTypeOf(val
)) == LLVMVectorTypeKind
)
2527 val_type
= LLVMGetElementType(val_type
);
2529 LLVMTypeRef ptr_type
= LLVMPointerType(val_type
,
2530 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2531 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2532 for (unsigned chan
= 0; chan
< 4; chan
++) {
2533 if (!(writemask
& (1 << chan
)))
2536 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2538 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2539 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2541 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2542 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2543 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2553 if (ctx
->ac
.postponed_kill
)
2554 ac_build_endif(&ctx
->ac
, 7002);
2557 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2560 case GLSL_SAMPLER_DIM_BUF
:
2562 case GLSL_SAMPLER_DIM_1D
:
2563 return array
? 2 : 1;
2564 case GLSL_SAMPLER_DIM_2D
:
2565 return array
? 3 : 2;
2566 case GLSL_SAMPLER_DIM_MS
:
2567 return array
? 4 : 3;
2568 case GLSL_SAMPLER_DIM_3D
:
2569 case GLSL_SAMPLER_DIM_CUBE
:
2571 case GLSL_SAMPLER_DIM_RECT
:
2572 case GLSL_SAMPLER_DIM_SUBPASS
:
2574 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2582 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2583 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2584 LLVMValueRef coord_z
,
2585 LLVMValueRef sample_index
,
2586 LLVMValueRef fmask_desc_ptr
)
2588 unsigned sample_chan
= coord_z
? 3 : 2;
2589 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2590 addr
[sample_chan
] = sample_index
;
2592 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2593 return addr
[sample_chan
];
2596 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2598 assert(instr
->src
[0].is_ssa
);
2599 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2602 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2603 const nir_intrinsic_instr
*instr
,
2604 LLVMValueRef dynamic_index
,
2605 enum ac_descriptor_type desc_type
,
2608 nir_deref_instr
*deref_instr
=
2609 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2610 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2612 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, dynamic_index
, true, write
);
2615 static void get_image_coords(struct ac_nir_context
*ctx
,
2616 const nir_intrinsic_instr
*instr
,
2617 LLVMValueRef dynamic_desc_index
,
2618 struct ac_image_args
*args
,
2619 enum glsl_sampler_dim dim
,
2622 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2623 LLVMValueRef masks
[] = {
2624 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2625 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2627 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2630 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2631 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2632 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2633 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2634 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2635 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2636 count
= image_type_to_components_count(dim
, is_array
);
2638 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2639 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2640 LLVMValueRef fmask_load_address
[3];
2642 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2643 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2645 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2647 fmask_load_address
[2] = NULL
;
2649 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2650 fmask_load_address
[0],
2651 fmask_load_address
[1],
2652 fmask_load_address
[2],
2654 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2655 AC_DESC_FMASK
, &instr
->instr
, dynamic_desc_index
, true, false));
2657 if (count
== 1 && !gfx9_1d
) {
2658 if (instr
->src
[1].ssa
->num_components
)
2659 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2661 args
->coords
[0] = src0
;
2666 for (chan
= 0; chan
< count
; ++chan
) {
2667 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2672 args
->coords
[2] = args
->coords
[1];
2673 args
->coords
[1] = ctx
->ac
.i32_0
;
2675 args
->coords
[1] = ctx
->ac
.i32_0
;
2678 if (ctx
->ac
.chip_class
== GFX9
&&
2679 dim
== GLSL_SAMPLER_DIM_2D
&&
2681 /* The hw can't bind a slice of a 3D image as a 2D
2682 * image, because it ignores BASE_ARRAY if the target
2683 * is 3D. The workaround is to read BASE_ARRAY and set
2684 * it as the 3rd address operand for all 2D images.
2686 LLVMValueRef first_layer
, const5
, mask
;
2688 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2689 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2690 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2691 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2693 args
->coords
[count
] = first_layer
;
2699 args
->coords
[count
] = sample_index
;
2705 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2706 const nir_intrinsic_instr
*instr
,
2707 LLVMValueRef dynamic_index
,
2708 bool write
, bool atomic
)
2710 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, write
);
2711 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2712 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2713 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2714 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2716 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2717 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2718 elem_count
, stride
, "");
2720 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2721 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2726 static LLVMValueRef
enter_waterfall_image(struct ac_nir_context
*ctx
,
2727 struct waterfall_context
*wctx
,
2728 const nir_intrinsic_instr
*instr
)
2730 nir_deref_instr
*deref_instr
= NULL
;
2732 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
)
2733 deref_instr
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2735 LLVMValueRef value
= get_sampler_desc_index(ctx
, deref_instr
, &instr
->instr
, true);
2736 return enter_waterfall(ctx
, wctx
, value
, nir_intrinsic_access(instr
) & ACCESS_NON_UNIFORM
);
2739 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2740 const nir_intrinsic_instr
*instr
,
2745 enum glsl_sampler_dim dim
;
2746 enum gl_access_qualifier access
;
2749 dim
= nir_intrinsic_image_dim(instr
);
2750 access
= nir_intrinsic_access(instr
);
2751 is_array
= nir_intrinsic_image_array(instr
);
2753 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2754 const struct glsl_type
*type
= image_deref
->type
;
2755 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2756 dim
= glsl_get_sampler_dim(type
);
2757 access
= var
->data
.access
;
2758 is_array
= glsl_sampler_type_is_array(type
);
2761 struct waterfall_context wctx
;
2762 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2764 struct ac_image_args args
= {};
2766 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2768 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2769 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2770 unsigned num_channels
= util_last_bit(mask
);
2771 LLVMValueRef rsrc
, vindex
;
2773 rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, false, false);
2774 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2777 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2778 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2779 ctx
->ac
.i32_0
, num_channels
,
2782 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2784 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2785 res
= ac_to_integer(&ctx
->ac
, res
);
2787 bool level_zero
= nir_src_is_const(instr
->src
[3]) && nir_src_as_uint(instr
->src
[3]) == 0;
2789 args
.opcode
= level_zero
? ac_image_load
: ac_image_load_mip
;
2790 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
2791 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2792 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2794 args
.lod
= get_src(ctx
, instr
->src
[3]);
2796 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2798 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2800 return exit_waterfall(ctx
, &wctx
, res
);
2803 static void visit_image_store(struct ac_nir_context
*ctx
,
2804 const nir_intrinsic_instr
*instr
,
2807 if (ctx
->ac
.postponed_kill
) {
2808 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2809 ctx
->ac
.postponed_kill
, "");
2810 ac_build_ifcc(&ctx
->ac
, cond
, 7003);
2813 enum glsl_sampler_dim dim
;
2814 enum gl_access_qualifier access
;
2818 dim
= nir_intrinsic_image_dim(instr
);
2819 access
= nir_intrinsic_access(instr
);
2820 is_array
= nir_intrinsic_image_array(instr
);
2822 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2823 const struct glsl_type
*type
= image_deref
->type
;
2824 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2825 dim
= glsl_get_sampler_dim(type
);
2826 access
= var
->data
.access
;
2827 is_array
= glsl_sampler_type_is_array(type
);
2830 struct waterfall_context wctx
;
2831 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2833 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2834 struct ac_image_args args
= {};
2836 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2838 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2839 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, false);
2840 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2841 unsigned src_channels
= ac_get_llvm_num_components(src
);
2842 LLVMValueRef vindex
;
2844 if (src_channels
== 3)
2845 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2847 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2848 get_src(ctx
, instr
->src
[1]),
2851 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2852 ctx
->ac
.i32_0
, src_channels
,
2855 bool level_zero
= nir_src_is_const(instr
->src
[4]) && nir_src_as_uint(instr
->src
[4]) == 0;
2857 args
.opcode
= level_zero
? ac_image_store
: ac_image_store_mip
;
2858 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2859 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
2860 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
2861 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2863 args
.lod
= get_src(ctx
, instr
->src
[4]);
2866 ac_build_image_opcode(&ctx
->ac
, &args
);
2869 exit_waterfall(ctx
, &wctx
, NULL
);
2870 if (ctx
->ac
.postponed_kill
)
2871 ac_build_endif(&ctx
->ac
, 7003);
2874 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2875 const nir_intrinsic_instr
*instr
,
2878 if (ctx
->ac
.postponed_kill
) {
2879 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
2880 ctx
->ac
.postponed_kill
, "");
2881 ac_build_ifcc(&ctx
->ac
, cond
, 7004);
2884 LLVMValueRef params
[7];
2885 int param_count
= 0;
2887 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2888 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2889 const char *atomic_name
;
2890 char intrinsic_name
[64];
2891 enum ac_atomic_op atomic_subop
;
2892 ASSERTED
int length
;
2894 enum glsl_sampler_dim dim
;
2897 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2898 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2899 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2900 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2901 ASSERTED
const GLenum format
= nir_intrinsic_format(instr
);
2902 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2904 dim
= nir_intrinsic_image_dim(instr
);
2905 is_array
= nir_intrinsic_image_array(instr
);
2907 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2908 dim
= glsl_get_sampler_dim(type
);
2909 is_array
= glsl_sampler_type_is_array(type
);
2912 struct waterfall_context wctx
;
2913 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
2915 switch (instr
->intrinsic
) {
2916 case nir_intrinsic_bindless_image_atomic_add
:
2917 case nir_intrinsic_image_deref_atomic_add
:
2918 atomic_name
= "add";
2919 atomic_subop
= ac_atomic_add
;
2921 case nir_intrinsic_bindless_image_atomic_imin
:
2922 case nir_intrinsic_image_deref_atomic_imin
:
2923 atomic_name
= "smin";
2924 atomic_subop
= ac_atomic_smin
;
2926 case nir_intrinsic_bindless_image_atomic_umin
:
2927 case nir_intrinsic_image_deref_atomic_umin
:
2928 atomic_name
= "umin";
2929 atomic_subop
= ac_atomic_umin
;
2931 case nir_intrinsic_bindless_image_atomic_imax
:
2932 case nir_intrinsic_image_deref_atomic_imax
:
2933 atomic_name
= "smax";
2934 atomic_subop
= ac_atomic_smax
;
2936 case nir_intrinsic_bindless_image_atomic_umax
:
2937 case nir_intrinsic_image_deref_atomic_umax
:
2938 atomic_name
= "umax";
2939 atomic_subop
= ac_atomic_umax
;
2941 case nir_intrinsic_bindless_image_atomic_and
:
2942 case nir_intrinsic_image_deref_atomic_and
:
2943 atomic_name
= "and";
2944 atomic_subop
= ac_atomic_and
;
2946 case nir_intrinsic_bindless_image_atomic_or
:
2947 case nir_intrinsic_image_deref_atomic_or
:
2949 atomic_subop
= ac_atomic_or
;
2951 case nir_intrinsic_bindless_image_atomic_xor
:
2952 case nir_intrinsic_image_deref_atomic_xor
:
2953 atomic_name
= "xor";
2954 atomic_subop
= ac_atomic_xor
;
2956 case nir_intrinsic_bindless_image_atomic_exchange
:
2957 case nir_intrinsic_image_deref_atomic_exchange
:
2958 atomic_name
= "swap";
2959 atomic_subop
= ac_atomic_swap
;
2961 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2962 case nir_intrinsic_image_deref_atomic_comp_swap
:
2963 atomic_name
= "cmpswap";
2964 atomic_subop
= 0; /* not used */
2966 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2967 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2968 atomic_name
= "inc";
2969 atomic_subop
= ac_atomic_inc_wrap
;
2970 /* ATOMIC_INC instruction does:
2971 * value = (value + 1) % (data + 1)
2973 * value = (value + 1) % data
2974 * So replace 'data' by 'data - 1'.
2976 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2977 LLVMBuildSub(ctx
->ac
.builder
,
2978 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2982 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2983 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2984 atomic_name
= "dec";
2985 atomic_subop
= ac_atomic_dec_wrap
;
2992 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
2993 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
2995 LLVMValueRef result
;
2996 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2997 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, dynamic_index
, true, true);
2998 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2999 ctx
->ac
.i32_0
, ""); /* vindex */
3000 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
3001 if (LLVM_VERSION_MAJOR
>= 9) {
3002 /* XXX: The new raw/struct atomic intrinsics are buggy
3003 * with LLVM 8, see r358579.
3005 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
3006 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
3008 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3009 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
3011 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
3013 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
3014 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
3017 assert(length
< sizeof(intrinsic_name
));
3018 result
= ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
3019 params
, param_count
, 0);
3021 struct ac_image_args args
= {};
3022 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
3023 args
.atomic
= atomic_subop
;
3024 args
.data
[0] = params
[0];
3026 args
.data
[1] = params
[1];
3027 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, true);
3028 get_image_coords(ctx
, instr
, dynamic_index
, &args
, dim
, is_array
);
3029 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3031 result
= ac_build_image_opcode(&ctx
->ac
, &args
);
3034 result
= exit_waterfall(ctx
, &wctx
, result
);
3035 if (ctx
->ac
.postponed_kill
)
3036 ac_build_endif(&ctx
->ac
, 7004);
3040 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
3041 nir_intrinsic_instr
*instr
)
3043 struct waterfall_context wctx
;
3044 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3045 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3047 LLVMValueRef ret
= ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
3049 return exit_waterfall(ctx
, &wctx
, ret
);
3052 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
3053 const nir_intrinsic_instr
*instr
,
3058 enum glsl_sampler_dim dim
;
3061 dim
= nir_intrinsic_image_dim(instr
);
3062 is_array
= nir_intrinsic_image_array(instr
);
3064 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
3065 dim
= glsl_get_sampler_dim(type
);
3066 is_array
= glsl_sampler_type_is_array(type
);
3069 struct waterfall_context wctx
;
3070 LLVMValueRef dynamic_index
= enter_waterfall_image(ctx
, &wctx
, instr
);
3072 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
3073 res
= get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_BUFFER
, false), true);
3076 struct ac_image_args args
= { 0 };
3078 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
3080 args
.resource
= get_image_descriptor(ctx
, instr
, dynamic_index
, AC_DESC_IMAGE
, false);
3081 args
.opcode
= ac_image_get_resinfo
;
3082 args
.lod
= ctx
->ac
.i32_0
;
3083 args
.attributes
= AC_FUNC_ATTR_READNONE
;
3085 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
3087 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
3089 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
3090 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
3091 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3092 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
3093 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
3096 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
3097 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
3098 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
3102 return exit_waterfall(ctx
, &wctx
, res
);
3105 static void emit_membar(struct ac_llvm_context
*ac
,
3106 const nir_intrinsic_instr
*instr
)
3108 unsigned wait_flags
= 0;
3110 switch (instr
->intrinsic
) {
3111 case nir_intrinsic_memory_barrier
:
3112 case nir_intrinsic_group_memory_barrier
:
3113 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3115 case nir_intrinsic_memory_barrier_buffer
:
3116 case nir_intrinsic_memory_barrier_image
:
3117 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
3119 case nir_intrinsic_memory_barrier_shared
:
3120 wait_flags
= AC_WAIT_LGKM
;
3126 ac_build_waitcnt(ac
, wait_flags
);
3129 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
3131 /* GFX6 only (thanks to a hw bug workaround):
3132 * The real barrier instruction isn’t needed, because an entire patch
3133 * always fits into a single wave.
3135 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
3136 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
3139 ac_build_s_barrier(ac
);
3142 static void emit_discard(struct ac_nir_context
*ctx
,
3143 const nir_intrinsic_instr
*instr
)
3147 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
3148 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3149 get_src(ctx
, instr
->src
[0]),
3152 assert(instr
->intrinsic
== nir_intrinsic_discard
);
3153 cond
= ctx
->ac
.i1false
;
3156 ac_build_kill_if_false(&ctx
->ac
, cond
);
3159 static void emit_demote(struct ac_nir_context
*ctx
,
3160 const nir_intrinsic_instr
*instr
)
3164 if (instr
->intrinsic
== nir_intrinsic_demote_if
) {
3165 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
3166 get_src(ctx
, instr
->src
[0]),
3169 assert(instr
->intrinsic
== nir_intrinsic_demote
);
3170 cond
= ctx
->ac
.i1false
;
3173 /* Kill immediately while maintaining WQM. */
3174 ac_build_kill_if_false(&ctx
->ac
, ac_build_wqm_vote(&ctx
->ac
, cond
));
3176 LLVMValueRef mask
= LLVMBuildLoad(ctx
->ac
.builder
, ctx
->ac
.postponed_kill
, "");
3177 mask
= LLVMBuildAnd(ctx
->ac
.builder
, mask
, cond
, "");
3178 LLVMBuildStore(ctx
->ac
.builder
, mask
, ctx
->ac
.postponed_kill
);
3183 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
3185 LLVMValueRef result
;
3186 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
3187 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3188 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3189 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3191 if (ctx
->ac
.wave_size
== 32)
3192 result
= LLVMBuildLShr(ctx
->ac
.builder
, result
,
3193 LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
3195 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
3199 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
3201 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3202 LLVMValueRef result
;
3203 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3204 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3205 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
3206 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
3208 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
3213 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
3215 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
3216 return LLVMBuildAnd(ctx
->ac
.builder
,
3217 ac_get_arg(&ctx
->ac
, ctx
->args
->tg_size
),
3218 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
3220 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
3225 visit_first_invocation(struct ac_nir_context
*ctx
)
3227 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
3228 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
3230 /* The second argument is whether cttz(0) should be defined, but we do not care. */
3231 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
3232 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
3233 ctx
->ac
.iN_wavemask
, args
, 2,
3234 AC_FUNC_ATTR_NOUNWIND
|
3235 AC_FUNC_ATTR_READNONE
);
3237 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
3241 visit_load_shared(struct ac_nir_context
*ctx
,
3242 const nir_intrinsic_instr
*instr
)
3244 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
3246 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3247 instr
->dest
.ssa
.bit_size
);
3249 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
3250 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3251 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
3252 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
3255 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
3256 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
3260 visit_store_shared(struct ac_nir_context
*ctx
,
3261 const nir_intrinsic_instr
*instr
)
3263 LLVMValueRef derived_ptr
, data
,index
;
3264 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3266 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1],
3267 instr
->src
[0].ssa
->bit_size
);
3268 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3270 int writemask
= nir_intrinsic_write_mask(instr
);
3271 for (int chan
= 0; chan
< 4; chan
++) {
3272 if (!(writemask
& (1 << chan
))) {
3275 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
3276 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
3277 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
3278 LLVMBuildStore(builder
, data
, derived_ptr
);
3282 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
3283 const nir_intrinsic_instr
*instr
,
3284 LLVMValueRef ptr
, int src_idx
)
3286 if (ctx
->ac
.postponed_kill
) {
3287 LLVMValueRef cond
= LLVMBuildLoad(ctx
->ac
.builder
,
3288 ctx
->ac
.postponed_kill
, "");
3289 ac_build_ifcc(&ctx
->ac
, cond
, 7005);
3292 LLVMValueRef result
;
3293 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
3295 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
3297 if (instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
) {
3298 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
3299 if (deref
->mode
== nir_var_mem_global
) {
3300 /* use "singlethread" sync scope to implement relaxed ordering */
3301 sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "singlethread-one-as" : "singlethread";
3303 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(src
), LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
)));
3304 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ptr_type
, "");
3308 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
3309 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
3310 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
3311 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
3312 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
3314 LLVMAtomicRMWBinOp op
;
3315 switch (instr
->intrinsic
) {
3316 case nir_intrinsic_shared_atomic_add
:
3317 case nir_intrinsic_deref_atomic_add
:
3318 op
= LLVMAtomicRMWBinOpAdd
;
3320 case nir_intrinsic_shared_atomic_umin
:
3321 case nir_intrinsic_deref_atomic_umin
:
3322 op
= LLVMAtomicRMWBinOpUMin
;
3324 case nir_intrinsic_shared_atomic_umax
:
3325 case nir_intrinsic_deref_atomic_umax
:
3326 op
= LLVMAtomicRMWBinOpUMax
;
3328 case nir_intrinsic_shared_atomic_imin
:
3329 case nir_intrinsic_deref_atomic_imin
:
3330 op
= LLVMAtomicRMWBinOpMin
;
3332 case nir_intrinsic_shared_atomic_imax
:
3333 case nir_intrinsic_deref_atomic_imax
:
3334 op
= LLVMAtomicRMWBinOpMax
;
3336 case nir_intrinsic_shared_atomic_and
:
3337 case nir_intrinsic_deref_atomic_and
:
3338 op
= LLVMAtomicRMWBinOpAnd
;
3340 case nir_intrinsic_shared_atomic_or
:
3341 case nir_intrinsic_deref_atomic_or
:
3342 op
= LLVMAtomicRMWBinOpOr
;
3344 case nir_intrinsic_shared_atomic_xor
:
3345 case nir_intrinsic_deref_atomic_xor
:
3346 op
= LLVMAtomicRMWBinOpXor
;
3348 case nir_intrinsic_shared_atomic_exchange
:
3349 case nir_intrinsic_deref_atomic_exchange
:
3350 op
= LLVMAtomicRMWBinOpXchg
;
3356 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3359 if (ctx
->ac
.postponed_kill
)
3360 ac_build_endif(&ctx
->ac
, 7005);
3364 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3366 LLVMValueRef values
[2];
3367 LLVMValueRef pos
[2];
3369 pos
[0] = ac_to_float(&ctx
->ac
,
3370 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]));
3371 pos
[1] = ac_to_float(&ctx
->ac
,
3372 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]));
3374 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3375 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3376 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3379 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3380 enum glsl_interp_mode interp
, unsigned location
)
3383 case INTERP_MODE_FLAT
:
3386 case INTERP_MODE_SMOOTH
:
3387 case INTERP_MODE_NONE
:
3388 if (location
== INTERP_CENTER
)
3389 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_center
);
3390 else if (location
== INTERP_CENTROID
)
3391 return ctx
->abi
->persp_centroid
;
3392 else if (location
== INTERP_SAMPLE
)
3393 return ac_get_arg(&ctx
->ac
, ctx
->args
->persp_sample
);
3395 case INTERP_MODE_NOPERSPECTIVE
:
3396 if (location
== INTERP_CENTER
)
3397 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_center
);
3398 else if (location
== INTERP_CENTROID
)
3399 return ctx
->abi
->linear_centroid
;
3400 else if (location
== INTERP_SAMPLE
)
3401 return ac_get_arg(&ctx
->ac
, ctx
->args
->linear_sample
);
3407 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3410 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3411 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3414 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3416 LLVMValueRef offset
)
3418 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3419 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3420 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3422 LLVMValueRef ij_out
[2];
3423 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3426 * take the I then J parameters, and the DDX/Y for it, and
3427 * calculate the IJ inputs for the interpolator.
3428 * temp1 = ddx * offset/sample.x + I;
3429 * interp_param.I = ddy * offset/sample.y + temp1;
3430 * temp1 = ddx * offset/sample.x + J;
3431 * interp_param.J = ddy * offset/sample.y + temp1;
3433 for (unsigned i
= 0; i
< 2; i
++) {
3434 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3435 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3436 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3437 ddxy_out
, ix_ll
, "");
3438 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3439 ddxy_out
, iy_ll
, "");
3440 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3441 interp_param
, ix_ll
, "");
3442 LLVMValueRef temp1
, temp2
;
3444 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3447 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3448 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3450 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3451 temp2
, ctx
->ac
.i32
, "");
3453 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3454 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3457 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3460 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3461 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3464 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3466 LLVMValueRef sample_id
)
3468 if (ctx
->abi
->interp_at_sample_force_center
)
3469 return barycentric_center(ctx
, mode
);
3471 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3473 /* fetch sample ID */
3474 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3476 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3477 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3478 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3479 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3480 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3481 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3483 return barycentric_offset(ctx
, mode
, offset
);
3487 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3490 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3491 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3494 static LLVMValueRef
barycentric_model(struct ac_nir_context
*ctx
)
3496 return LLVMBuildBitCast(ctx
->ac
.builder
,
3497 ac_get_arg(&ctx
->ac
, ctx
->args
->pull_model
),
3501 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3502 LLVMValueRef interp_param
,
3503 unsigned index
, unsigned comp_start
,
3504 unsigned num_components
,
3507 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3509 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
,
3510 interp_param
, ctx
->ac
.v2f32
, "");
3511 LLVMValueRef i
= LLVMBuildExtractElement(
3512 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_0
, "");
3513 LLVMValueRef j
= LLVMBuildExtractElement(
3514 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_1
, "");
3516 LLVMValueRef values
[4];
3517 assert(bitsize
== 16 || bitsize
== 32);
3518 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3519 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3520 if (bitsize
== 16) {
3521 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3522 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3524 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3525 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
), i
, j
);
3529 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3532 static LLVMValueRef
load_input(struct ac_nir_context
*ctx
,
3533 nir_intrinsic_instr
*instr
)
3535 unsigned offset_idx
= instr
->intrinsic
== nir_intrinsic_load_input
? 0 : 1;
3537 /* We only lower inputs for fragment shaders ATM */
3538 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[offset_idx
]);
3540 assert(offset
[0].i32
== 0);
3542 unsigned component
= nir_intrinsic_component(instr
);
3543 unsigned index
= nir_intrinsic_base(instr
);
3544 unsigned vertex_id
= 2; /* P0 */
3546 if (instr
->intrinsic
== nir_intrinsic_load_input_vertex
) {
3547 nir_const_value
*src0
= nir_src_as_const_value(instr
->src
[0]);
3549 switch (src0
[0].i32
) {
3560 unreachable("Invalid vertex index");
3564 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3565 LLVMValueRef values
[8];
3567 /* Each component of a 64-bit value takes up two GL-level channels. */
3568 unsigned num_components
= instr
->dest
.ssa
.num_components
;
3569 unsigned bit_size
= instr
->dest
.ssa
.bit_size
;
3571 bit_size
== 64 ? num_components
* 2 : num_components
;
3573 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3574 if (component
+ chan
> 4)
3575 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3576 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (component
+ chan
) % 4, false);
3577 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3578 LLVMConstInt(ctx
->ac
.i32
, vertex_id
, false),
3581 ac_get_arg(&ctx
->ac
, ctx
->args
->prim_mask
));
3582 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3583 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3584 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3587 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3588 if (bit_size
== 64) {
3589 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3590 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3591 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3596 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3597 nir_intrinsic_instr
*instr
)
3599 LLVMValueRef result
= NULL
;
3601 switch (instr
->intrinsic
) {
3602 case nir_intrinsic_ballot
:
3603 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3604 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3605 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3607 case nir_intrinsic_read_invocation
:
3608 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3609 get_src(ctx
, instr
->src
[1]));
3611 case nir_intrinsic_read_first_invocation
:
3612 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3614 case nir_intrinsic_load_subgroup_invocation
:
3615 result
= ac_get_thread_id(&ctx
->ac
);
3617 case nir_intrinsic_load_work_group_id
: {
3618 LLVMValueRef values
[3];
3620 for (int i
= 0; i
< 3; i
++) {
3621 values
[i
] = ctx
->args
->workgroup_ids
[i
].used
?
3622 ac_get_arg(&ctx
->ac
, ctx
->args
->workgroup_ids
[i
]) : ctx
->ac
.i32_0
;
3625 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3628 case nir_intrinsic_load_base_vertex
:
3629 case nir_intrinsic_load_first_vertex
:
3630 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3632 case nir_intrinsic_load_local_group_size
:
3633 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3635 case nir_intrinsic_load_vertex_id
:
3636 result
= LLVMBuildAdd(ctx
->ac
.builder
,
3637 ac_get_arg(&ctx
->ac
, ctx
->args
->vertex_id
),
3638 ac_get_arg(&ctx
->ac
, ctx
->args
->base_vertex
), "");
3640 case nir_intrinsic_load_vertex_id_zero_base
: {
3641 result
= ctx
->abi
->vertex_id
;
3644 case nir_intrinsic_load_local_invocation_id
: {
3645 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->local_invocation_ids
);
3648 case nir_intrinsic_load_base_instance
:
3649 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->start_instance
);
3651 case nir_intrinsic_load_draw_id
:
3652 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->draw_id
);
3654 case nir_intrinsic_load_view_index
:
3655 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->view_index
);
3657 case nir_intrinsic_load_invocation_id
:
3658 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3659 result
= ac_unpack_param(&ctx
->ac
,
3660 ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_rel_ids
),
3663 if (ctx
->ac
.chip_class
>= GFX10
) {
3664 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3665 ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
),
3666 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3668 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_invocation_id
);
3672 case nir_intrinsic_load_primitive_id
:
3673 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3674 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->gs_prim_id
);
3675 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3676 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tcs_patch_id
);
3677 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3678 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->tes_patch_id
);
3680 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3682 case nir_intrinsic_load_sample_id
:
3683 result
= ac_unpack_param(&ctx
->ac
,
3684 ac_get_arg(&ctx
->ac
, ctx
->args
->ancillary
),
3687 case nir_intrinsic_load_sample_pos
:
3688 result
= load_sample_pos(ctx
);
3690 case nir_intrinsic_load_sample_mask_in
:
3691 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3693 case nir_intrinsic_load_frag_coord
: {
3694 LLVMValueRef values
[4] = {
3695 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[0]),
3696 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[1]),
3697 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[2]),
3698 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
,
3699 ac_get_arg(&ctx
->ac
, ctx
->args
->frag_pos
[3]))
3701 result
= ac_to_integer(&ctx
->ac
,
3702 ac_build_gather_values(&ctx
->ac
, values
, 4));
3705 case nir_intrinsic_load_layer_id
:
3706 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3708 case nir_intrinsic_load_front_face
:
3709 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->front_face
);
3711 case nir_intrinsic_load_helper_invocation
:
3712 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3714 case nir_intrinsic_is_helper_invocation
:
3715 result
= ac_build_is_helper_invocation(&ctx
->ac
);
3717 case nir_intrinsic_load_color0
:
3718 result
= ctx
->abi
->color0
;
3720 case nir_intrinsic_load_color1
:
3721 result
= ctx
->abi
->color1
;
3723 case nir_intrinsic_load_user_data_amd
:
3724 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3725 result
= ctx
->abi
->user_data
;
3727 case nir_intrinsic_load_instance_id
:
3728 result
= ctx
->abi
->instance_id
;
3730 case nir_intrinsic_load_num_work_groups
:
3731 result
= ac_get_arg(&ctx
->ac
, ctx
->args
->num_work_groups
);
3733 case nir_intrinsic_load_local_invocation_index
:
3734 result
= visit_load_local_invocation_index(ctx
);
3736 case nir_intrinsic_load_subgroup_id
:
3737 result
= visit_load_subgroup_id(ctx
);
3739 case nir_intrinsic_load_num_subgroups
:
3740 result
= visit_load_num_subgroups(ctx
);
3742 case nir_intrinsic_first_invocation
:
3743 result
= visit_first_invocation(ctx
);
3745 case nir_intrinsic_load_push_constant
:
3746 result
= visit_load_push_constant(ctx
, instr
);
3748 case nir_intrinsic_vulkan_resource_index
: {
3749 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3750 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3751 unsigned binding
= nir_intrinsic_binding(instr
);
3753 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3757 case nir_intrinsic_vulkan_resource_reindex
:
3758 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3760 case nir_intrinsic_store_ssbo
:
3761 visit_store_ssbo(ctx
, instr
);
3763 case nir_intrinsic_load_ssbo
:
3764 result
= visit_load_buffer(ctx
, instr
);
3766 case nir_intrinsic_ssbo_atomic_add
:
3767 case nir_intrinsic_ssbo_atomic_imin
:
3768 case nir_intrinsic_ssbo_atomic_umin
:
3769 case nir_intrinsic_ssbo_atomic_imax
:
3770 case nir_intrinsic_ssbo_atomic_umax
:
3771 case nir_intrinsic_ssbo_atomic_and
:
3772 case nir_intrinsic_ssbo_atomic_or
:
3773 case nir_intrinsic_ssbo_atomic_xor
:
3774 case nir_intrinsic_ssbo_atomic_exchange
:
3775 case nir_intrinsic_ssbo_atomic_comp_swap
:
3776 result
= visit_atomic_ssbo(ctx
, instr
);
3778 case nir_intrinsic_load_ubo
:
3779 result
= visit_load_ubo_buffer(ctx
, instr
);
3781 case nir_intrinsic_get_buffer_size
:
3782 result
= visit_get_buffer_size(ctx
, instr
);
3784 case nir_intrinsic_load_deref
:
3785 result
= visit_load_var(ctx
, instr
);
3787 case nir_intrinsic_store_deref
:
3788 visit_store_var(ctx
, instr
);
3790 case nir_intrinsic_load_shared
:
3791 result
= visit_load_shared(ctx
, instr
);
3793 case nir_intrinsic_store_shared
:
3794 visit_store_shared(ctx
, instr
);
3796 case nir_intrinsic_bindless_image_samples
:
3797 case nir_intrinsic_image_deref_samples
:
3798 result
= visit_image_samples(ctx
, instr
);
3800 case nir_intrinsic_bindless_image_load
:
3801 result
= visit_image_load(ctx
, instr
, true);
3803 case nir_intrinsic_image_deref_load
:
3804 result
= visit_image_load(ctx
, instr
, false);
3806 case nir_intrinsic_bindless_image_store
:
3807 visit_image_store(ctx
, instr
, true);
3809 case nir_intrinsic_image_deref_store
:
3810 visit_image_store(ctx
, instr
, false);
3812 case nir_intrinsic_bindless_image_atomic_add
:
3813 case nir_intrinsic_bindless_image_atomic_imin
:
3814 case nir_intrinsic_bindless_image_atomic_umin
:
3815 case nir_intrinsic_bindless_image_atomic_imax
:
3816 case nir_intrinsic_bindless_image_atomic_umax
:
3817 case nir_intrinsic_bindless_image_atomic_and
:
3818 case nir_intrinsic_bindless_image_atomic_or
:
3819 case nir_intrinsic_bindless_image_atomic_xor
:
3820 case nir_intrinsic_bindless_image_atomic_exchange
:
3821 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3822 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3823 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3824 result
= visit_image_atomic(ctx
, instr
, true);
3826 case nir_intrinsic_image_deref_atomic_add
:
3827 case nir_intrinsic_image_deref_atomic_imin
:
3828 case nir_intrinsic_image_deref_atomic_umin
:
3829 case nir_intrinsic_image_deref_atomic_imax
:
3830 case nir_intrinsic_image_deref_atomic_umax
:
3831 case nir_intrinsic_image_deref_atomic_and
:
3832 case nir_intrinsic_image_deref_atomic_or
:
3833 case nir_intrinsic_image_deref_atomic_xor
:
3834 case nir_intrinsic_image_deref_atomic_exchange
:
3835 case nir_intrinsic_image_deref_atomic_comp_swap
:
3836 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3837 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3838 result
= visit_image_atomic(ctx
, instr
, false);
3840 case nir_intrinsic_bindless_image_size
:
3841 result
= visit_image_size(ctx
, instr
, true);
3843 case nir_intrinsic_image_deref_size
:
3844 result
= visit_image_size(ctx
, instr
, false);
3846 case nir_intrinsic_shader_clock
:
3847 result
= ac_build_shader_clock(&ctx
->ac
);
3849 case nir_intrinsic_discard
:
3850 case nir_intrinsic_discard_if
:
3851 emit_discard(ctx
, instr
);
3853 case nir_intrinsic_demote
:
3854 case nir_intrinsic_demote_if
:
3855 emit_demote(ctx
, instr
);
3857 case nir_intrinsic_memory_barrier
:
3858 case nir_intrinsic_group_memory_barrier
:
3859 case nir_intrinsic_memory_barrier_buffer
:
3860 case nir_intrinsic_memory_barrier_image
:
3861 case nir_intrinsic_memory_barrier_shared
:
3862 emit_membar(&ctx
->ac
, instr
);
3864 case nir_intrinsic_memory_barrier_tcs_patch
:
3866 case nir_intrinsic_control_barrier
:
3867 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3869 case nir_intrinsic_shared_atomic_add
:
3870 case nir_intrinsic_shared_atomic_imin
:
3871 case nir_intrinsic_shared_atomic_umin
:
3872 case nir_intrinsic_shared_atomic_imax
:
3873 case nir_intrinsic_shared_atomic_umax
:
3874 case nir_intrinsic_shared_atomic_and
:
3875 case nir_intrinsic_shared_atomic_or
:
3876 case nir_intrinsic_shared_atomic_xor
:
3877 case nir_intrinsic_shared_atomic_exchange
:
3878 case nir_intrinsic_shared_atomic_comp_swap
: {
3879 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0],
3880 instr
->src
[1].ssa
->bit_size
);
3881 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3884 case nir_intrinsic_deref_atomic_add
:
3885 case nir_intrinsic_deref_atomic_imin
:
3886 case nir_intrinsic_deref_atomic_umin
:
3887 case nir_intrinsic_deref_atomic_imax
:
3888 case nir_intrinsic_deref_atomic_umax
:
3889 case nir_intrinsic_deref_atomic_and
:
3890 case nir_intrinsic_deref_atomic_or
:
3891 case nir_intrinsic_deref_atomic_xor
:
3892 case nir_intrinsic_deref_atomic_exchange
:
3893 case nir_intrinsic_deref_atomic_comp_swap
: {
3894 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3895 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3898 case nir_intrinsic_load_barycentric_pixel
:
3899 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3901 case nir_intrinsic_load_barycentric_centroid
:
3902 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3904 case nir_intrinsic_load_barycentric_sample
:
3905 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3907 case nir_intrinsic_load_barycentric_model
:
3908 result
= barycentric_model(ctx
);
3910 case nir_intrinsic_load_barycentric_at_offset
: {
3911 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3912 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3915 case nir_intrinsic_load_barycentric_at_sample
: {
3916 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3917 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3920 case nir_intrinsic_load_interpolated_input
: {
3921 /* We assume any indirect loads have been lowered away */
3922 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3924 assert(offset
[0].i32
== 0);
3926 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3927 unsigned index
= nir_intrinsic_base(instr
);
3928 unsigned component
= nir_intrinsic_component(instr
);
3929 result
= load_interpolated_input(ctx
, interp_param
, index
,
3931 instr
->dest
.ssa
.num_components
,
3932 instr
->dest
.ssa
.bit_size
);
3935 case nir_intrinsic_load_input
:
3936 case nir_intrinsic_load_input_vertex
:
3937 result
= load_input(ctx
, instr
);
3939 case nir_intrinsic_emit_vertex
:
3940 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3942 case nir_intrinsic_end_primitive
:
3943 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3945 case nir_intrinsic_load_tess_coord
:
3946 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3948 case nir_intrinsic_load_tess_level_outer
:
3949 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3951 case nir_intrinsic_load_tess_level_inner
:
3952 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3954 case nir_intrinsic_load_tess_level_outer_default
:
3955 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3957 case nir_intrinsic_load_tess_level_inner_default
:
3958 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3960 case nir_intrinsic_load_patch_vertices_in
:
3961 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
3963 case nir_intrinsic_vote_all
: {
3964 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3965 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3968 case nir_intrinsic_vote_any
: {
3969 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3970 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3973 case nir_intrinsic_shuffle
:
3974 if (ctx
->ac
.chip_class
== GFX8
||
3975 ctx
->ac
.chip_class
== GFX9
||
3976 (ctx
->ac
.chip_class
== GFX10
&& ctx
->ac
.wave_size
== 32)) {
3977 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3978 get_src(ctx
, instr
->src
[1]));
3980 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3981 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
3982 LLVMTypeRef type
= LLVMTypeOf(src
);
3983 struct waterfall_context wctx
;
3984 LLVMValueRef index_val
;
3986 index_val
= enter_waterfall(ctx
, &wctx
, index
, true);
3988 src
= LLVMBuildZExt(ctx
->ac
.builder
, src
,
3991 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.readlane",
3993 (LLVMValueRef
[]) { src
, index_val
}, 2,
3994 AC_FUNC_ATTR_READNONE
|
3995 AC_FUNC_ATTR_CONVERGENT
);
3997 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, type
, "");
3999 result
= exit_waterfall(ctx
, &wctx
, result
);
4002 case nir_intrinsic_reduce
:
4003 result
= ac_build_reduce(&ctx
->ac
,
4004 get_src(ctx
, instr
->src
[0]),
4005 instr
->const_index
[0],
4006 instr
->const_index
[1]);
4008 case nir_intrinsic_inclusive_scan
:
4009 result
= ac_build_inclusive_scan(&ctx
->ac
,
4010 get_src(ctx
, instr
->src
[0]),
4011 instr
->const_index
[0]);
4013 case nir_intrinsic_exclusive_scan
:
4014 result
= ac_build_exclusive_scan(&ctx
->ac
,
4015 get_src(ctx
, instr
->src
[0]),
4016 instr
->const_index
[0]);
4018 case nir_intrinsic_quad_broadcast
: {
4019 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
4020 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4021 lane
, lane
, lane
, lane
);
4024 case nir_intrinsic_quad_swap_horizontal
:
4025 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
4027 case nir_intrinsic_quad_swap_vertical
:
4028 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
4030 case nir_intrinsic_quad_swap_diagonal
:
4031 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
4033 case nir_intrinsic_quad_swizzle_amd
: {
4034 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4035 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4036 mask
& 0x3, (mask
>> 2) & 0x3,
4037 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
4040 case nir_intrinsic_masked_swizzle_amd
: {
4041 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
4042 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
4045 case nir_intrinsic_write_invocation_amd
:
4046 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
4047 get_src(ctx
, instr
->src
[1]),
4048 get_src(ctx
, instr
->src
[2]));
4050 case nir_intrinsic_mbcnt_amd
:
4051 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
4053 case nir_intrinsic_load_scratch
: {
4054 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4055 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4057 LLVMTypeRef comp_type
=
4058 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4059 LLVMTypeRef vec_type
=
4060 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4061 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4062 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4063 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4064 LLVMPointerType(vec_type
, addr_space
), "");
4065 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4068 case nir_intrinsic_store_scratch
: {
4069 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
4070 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
4072 LLVMTypeRef comp_type
=
4073 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
4074 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4075 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4076 LLVMPointerType(comp_type
, addr_space
), "");
4077 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
4078 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
4081 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
4083 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
4084 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
4085 LLVMTypeRef vec_type
=
4086 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
4087 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
4089 LLVMPointerType(vec_type
, addr_space
),
4091 LLVMValueRef offset_src
=
4092 ac_extract_components(&ctx
->ac
, src
, start
, count
);
4093 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
4097 case nir_intrinsic_load_constant
: {
4098 unsigned base
= nir_intrinsic_base(instr
);
4099 unsigned range
= nir_intrinsic_range(instr
);
4101 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
4102 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
4103 LLVMConstInt(ctx
->ac
.i32
, base
, false), "");
4105 /* Clamp the offset to avoid out-of-bound access because global
4106 * instructions can't handle them.
4108 LLVMValueRef size
= LLVMConstInt(ctx
->ac
.i32
, base
+ range
, false);
4109 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
,
4111 offset
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, offset
, size
, "");
4113 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
4115 LLVMTypeRef comp_type
=
4116 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
4117 LLVMTypeRef vec_type
=
4118 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
4119 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
4120 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
4121 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
4122 LLVMPointerType(vec_type
, addr_space
), "");
4123 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
4127 fprintf(stderr
, "Unknown intrinsic: ");
4128 nir_print_instr(&instr
->instr
, stderr
);
4129 fprintf(stderr
, "\n");
4133 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4137 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
4138 unsigned base_index
,
4139 unsigned constant_index
,
4140 LLVMValueRef dynamic_index
)
4142 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
4143 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
4144 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
4146 /* Bindless uniforms are 64bit so multiple index by 8 */
4147 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
4148 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
4150 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
4152 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
4153 NULL
, 0, 0, true, true);
4155 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
4158 struct sampler_desc_address
{
4159 unsigned descriptor_set
;
4160 unsigned base_index
; /* binding in vulkan */
4161 unsigned constant_index
;
4162 LLVMValueRef dynamic_index
;
4167 static struct sampler_desc_address
4168 get_sampler_desc_internal(struct ac_nir_context
*ctx
,
4169 nir_deref_instr
*deref_instr
,
4170 const nir_instr
*instr
,
4173 LLVMValueRef index
= NULL
;
4174 unsigned constant_index
= 0;
4175 unsigned descriptor_set
;
4176 unsigned base_index
;
4177 bool bindless
= false;
4182 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
4185 index
= get_src(ctx
, img_instr
->src
[0]);
4187 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
4188 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
4189 nir_tex_src_sampler_handle
);
4190 if (sampSrcIdx
!= -1) {
4193 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
4195 assert(tex_instr
&& !image
);
4196 base_index
= tex_instr
->sampler_index
;
4200 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
4201 if (deref_instr
->deref_type
== nir_deref_type_array
) {
4202 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
4206 if (nir_src_is_const(deref_instr
->arr
.index
)) {
4207 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
4209 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
4211 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
4212 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
4217 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
4220 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4221 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
4222 unsigned sidx
= deref_instr
->strct
.index
;
4223 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
4224 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
4226 unreachable("Unsupported deref type");
4229 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
4231 if (deref_instr
->var
->data
.bindless
) {
4232 /* For now just assert on unhandled variable types */
4233 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
4235 base_index
= deref_instr
->var
->data
.driver_location
;
4238 index
= index
? index
: ctx
->ac
.i32_0
;
4239 index
= get_bindless_index_from_uniform(ctx
, base_index
,
4240 constant_index
, index
);
4242 base_index
= deref_instr
->var
->data
.binding
;
4244 return (struct sampler_desc_address
) {
4245 .descriptor_set
= descriptor_set
,
4246 .base_index
= base_index
,
4247 .constant_index
= constant_index
,
4248 .dynamic_index
= index
,
4250 .bindless
= bindless
,
4254 /* Extract any possibly divergent index into a separate value that can be fed
4255 * into get_sampler_desc with the same arguments. */
4256 static LLVMValueRef
get_sampler_desc_index(struct ac_nir_context
*ctx
,
4257 nir_deref_instr
*deref_instr
,
4258 const nir_instr
*instr
,
4261 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4262 return addr
.dynamic_index
;
4265 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
4266 nir_deref_instr
*deref_instr
,
4267 enum ac_descriptor_type desc_type
,
4268 const nir_instr
*instr
,
4270 bool image
, bool write
)
4272 struct sampler_desc_address addr
= get_sampler_desc_internal(ctx
, deref_instr
, instr
, image
);
4273 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
4274 addr
.descriptor_set
,
4276 addr
.constant_index
, index
,
4277 desc_type
, addr
.image
, write
, addr
.bindless
);
4280 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
4283 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
4284 * filtering manually. The driver sets img7 to a mask clearing
4285 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
4286 * s_and_b32 samp0, samp0, img7
4289 * The ANISO_OVERRIDE sampler field enables this fix in TA.
4291 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
4292 LLVMValueRef res
, LLVMValueRef samp
)
4294 LLVMBuilderRef builder
= ctx
->ac
.builder
;
4295 LLVMValueRef img7
, samp0
;
4297 if (ctx
->ac
.chip_class
>= GFX8
)
4300 img7
= LLVMBuildExtractElement(builder
, res
,
4301 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
4302 samp0
= LLVMBuildExtractElement(builder
, samp
,
4303 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4304 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
4305 return LLVMBuildInsertElement(builder
, samp
, samp0
,
4306 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
4309 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
4310 nir_tex_instr
*instr
,
4311 struct waterfall_context
*wctx
,
4312 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
4313 LLVMValueRef
*fmask_ptr
)
4315 nir_deref_instr
*texture_deref_instr
= NULL
;
4316 nir_deref_instr
*sampler_deref_instr
= NULL
;
4319 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4320 switch (instr
->src
[i
].src_type
) {
4321 case nir_tex_src_texture_deref
:
4322 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4324 case nir_tex_src_sampler_deref
:
4325 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
4327 case nir_tex_src_plane
:
4328 plane
= nir_src_as_int(instr
->src
[i
].src
);
4335 LLVMValueRef texture_dynamic_index
= get_sampler_desc_index(ctx
, texture_deref_instr
,
4336 &instr
->instr
, false);
4337 if (!sampler_deref_instr
)
4338 sampler_deref_instr
= texture_deref_instr
;
4340 LLVMValueRef sampler_dynamic_index
= get_sampler_desc_index(ctx
, sampler_deref_instr
,
4341 &instr
->instr
, false);
4342 if (instr
->texture_non_uniform
)
4343 texture_dynamic_index
= enter_waterfall(ctx
, wctx
+ 0, texture_dynamic_index
, true);
4345 if (instr
->sampler_non_uniform
)
4346 sampler_dynamic_index
= enter_waterfall(ctx
, wctx
+ 1, sampler_dynamic_index
, true);
4348 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
4351 assert(instr
->op
!= nir_texop_txf_ms
&&
4352 instr
->op
!= nir_texop_samples_identical
);
4353 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
4355 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
4358 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4359 /* The fragment mask is fetched from the compressed
4360 * multisampled surface.
4362 main_descriptor
= AC_DESC_FMASK
;
4365 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
,
4366 texture_dynamic_index
, false, false);
4369 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
,
4370 sampler_dynamic_index
, false, false);
4371 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
4372 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
4374 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
4375 instr
->op
== nir_texop_samples_identical
))
4376 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
,
4377 &instr
->instr
, texture_dynamic_index
, false, false);
4380 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
4383 coord
= ac_to_float(ctx
, coord
);
4384 coord
= ac_build_round(ctx
, coord
);
4385 coord
= ac_to_integer(ctx
, coord
);
4389 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
4391 LLVMValueRef result
= NULL
;
4392 struct ac_image_args args
= { 0 };
4393 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
4394 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
4395 unsigned offset_src
= 0;
4396 struct waterfall_context wctx
[2] = {{{0}}};
4398 tex_fetch_ptrs(ctx
, instr
, wctx
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
4400 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
4401 switch (instr
->src
[i
].src_type
) {
4402 case nir_tex_src_coord
: {
4403 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
4404 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
4405 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
4408 case nir_tex_src_projector
:
4410 case nir_tex_src_comparator
:
4411 if (instr
->is_shadow
) {
4412 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
4413 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
4416 case nir_tex_src_offset
:
4417 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
4420 case nir_tex_src_bias
:
4421 if (instr
->op
== nir_texop_txb
)
4422 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
4424 case nir_tex_src_lod
: {
4425 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
4426 args
.level_zero
= true;
4428 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
4431 case nir_tex_src_ms_index
:
4432 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
4434 case nir_tex_src_ms_mcs
:
4436 case nir_tex_src_ddx
:
4437 ddx
= get_src(ctx
, instr
->src
[i
].src
);
4439 case nir_tex_src_ddy
:
4440 ddy
= get_src(ctx
, instr
->src
[i
].src
);
4442 case nir_tex_src_texture_offset
:
4443 case nir_tex_src_sampler_offset
:
4444 case nir_tex_src_plane
:
4450 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
4451 result
= get_buffer_size(ctx
, args
.resource
, true);
4455 if (instr
->op
== nir_texop_texture_samples
) {
4456 LLVMValueRef res
, samples
, is_msaa
;
4457 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
4458 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
4459 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4460 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4461 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
4462 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
4463 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4464 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
4465 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
4467 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
4468 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
4469 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
4470 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
4471 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
4473 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4479 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4480 LLVMValueRef offset
[3], pack
;
4481 for (unsigned chan
= 0; chan
< 3; ++chan
)
4482 offset
[chan
] = ctx
->ac
.i32_0
;
4484 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4485 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4486 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4487 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4488 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4490 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4491 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4493 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4494 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4498 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4499 * OpenGL 4.5 spec says:
4501 * "If the texture’s internal format indicates a fixed-point
4502 * depth texture, then D_t and D_ref are clamped to the
4503 * range [0, 1]; otherwise no clamping is performed."
4505 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4506 * so the depth comparison value isn't clamped for Z16 and
4507 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4508 * an explicitly clamped 32-bit float format.
4511 ctx
->ac
.chip_class
>= GFX8
&&
4512 ctx
->ac
.chip_class
<= GFX9
&&
4513 ctx
->abi
->clamp_shadow_reference
) {
4514 LLVMValueRef upgraded
, clamped
;
4516 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4517 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4518 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4519 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4520 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4521 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4522 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4526 /* pack derivatives */
4528 int num_src_deriv_channels
, num_dest_deriv_channels
;
4529 switch (instr
->sampler_dim
) {
4530 case GLSL_SAMPLER_DIM_3D
:
4531 case GLSL_SAMPLER_DIM_CUBE
:
4532 num_src_deriv_channels
= 3;
4533 num_dest_deriv_channels
= 3;
4535 case GLSL_SAMPLER_DIM_2D
:
4537 num_src_deriv_channels
= 2;
4538 num_dest_deriv_channels
= 2;
4540 case GLSL_SAMPLER_DIM_1D
:
4541 num_src_deriv_channels
= 1;
4542 if (ctx
->ac
.chip_class
== GFX9
) {
4543 num_dest_deriv_channels
= 2;
4545 num_dest_deriv_channels
= 1;
4550 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4551 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4552 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4553 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4554 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4556 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4557 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4558 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4562 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4563 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4564 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4565 if (instr
->coord_components
== 3)
4566 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4567 ac_prepare_cube_coords(&ctx
->ac
,
4568 instr
->op
== nir_texop_txd
, instr
->is_array
,
4569 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4572 /* Texture coordinates fixups */
4573 if (instr
->coord_components
> 1 &&
4574 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4576 instr
->op
!= nir_texop_txf
) {
4577 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4580 if (instr
->coord_components
> 2 &&
4581 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4582 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4583 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4584 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4586 instr
->op
!= nir_texop_txf
&&
4587 instr
->op
!= nir_texop_txf_ms
&&
4588 instr
->op
!= nir_texop_fragment_fetch
&&
4589 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4590 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4593 if (ctx
->ac
.chip_class
== GFX9
&&
4594 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4595 instr
->op
!= nir_texop_lod
) {
4596 LLVMValueRef filler
;
4597 if (instr
->op
== nir_texop_txf
)
4598 filler
= ctx
->ac
.i32_0
;
4600 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4602 if (instr
->is_array
)
4603 args
.coords
[2] = args
.coords
[1];
4604 args
.coords
[1] = filler
;
4607 /* Pack sample index */
4608 if (sample_index
&& (instr
->op
== nir_texop_txf_ms
||
4609 instr
->op
== nir_texop_fragment_fetch
))
4610 args
.coords
[instr
->coord_components
] = sample_index
;
4612 if (instr
->op
== nir_texop_samples_identical
) {
4613 struct ac_image_args txf_args
= { 0 };
4614 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4616 txf_args
.dmask
= 0xf;
4617 txf_args
.resource
= fmask_ptr
;
4618 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4619 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4621 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4622 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4626 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4627 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4628 instr
->op
!= nir_texop_txs
&&
4629 instr
->op
!= nir_texop_fragment_fetch
&&
4630 instr
->op
!= nir_texop_fragment_mask_fetch
) {
4631 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4632 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4633 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4634 instr
->is_array
? args
.coords
[2] : NULL
,
4635 args
.coords
[sample_chan
], fmask_ptr
);
4638 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4639 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4640 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4641 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4642 args
.coords
[i
] = LLVMBuildAdd(
4643 ctx
->ac
.builder
, args
.coords
[i
],
4644 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4649 /* DMASK was repurposed for GATHER4. 4 components are always
4650 * returned and DMASK works like a swizzle - it selects
4651 * the component to fetch. The only valid DMASK values are
4652 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4653 * (red,red,red,red) etc.) The ISA document doesn't mention
4657 if (instr
->op
== nir_texop_tg4
) {
4658 if (instr
->is_shadow
)
4661 args
.dmask
= 1 << instr
->component
;
4664 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4665 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4666 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4669 /* Adjust the number of coordinates because we only need (x,y) for 2D
4670 * multisampled images and (x,y,layer) for 2D multisampled layered
4671 * images or for multisampled input attachments.
4673 if (instr
->op
== nir_texop_fragment_mask_fetch
) {
4674 if (args
.dim
== ac_image_2dmsaa
) {
4675 args
.dim
= ac_image_2d
;
4677 assert(args
.dim
== ac_image_2darraymsaa
);
4678 args
.dim
= ac_image_2darray
;
4682 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4684 if (instr
->op
== nir_texop_query_levels
)
4685 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4686 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4687 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4688 instr
->op
!= nir_texop_tg4
)
4689 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4690 else if (instr
->op
== nir_texop_txs
&&
4691 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4693 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4694 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4695 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4696 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4697 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4698 } else if (ctx
->ac
.chip_class
== GFX9
&&
4699 instr
->op
== nir_texop_txs
&&
4700 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4702 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4703 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4704 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4706 } else if (instr
->dest
.ssa
.num_components
!= 4)
4707 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4711 assert(instr
->dest
.is_ssa
);
4712 result
= ac_to_integer(&ctx
->ac
, result
);
4714 for (int i
= ARRAY_SIZE(wctx
); --i
>= 0;) {
4715 result
= exit_waterfall(ctx
, wctx
+ i
, result
);
4718 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4722 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4724 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4725 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4727 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4728 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4731 static void visit_post_phi(struct ac_nir_context
*ctx
,
4732 nir_phi_instr
*instr
,
4733 LLVMValueRef llvm_phi
)
4735 nir_foreach_phi_src(src
, instr
) {
4736 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4737 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4739 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4743 static void phi_post_pass(struct ac_nir_context
*ctx
)
4745 hash_table_foreach(ctx
->phis
, entry
) {
4746 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4747 (LLVMValueRef
)entry
->data
);
4752 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4753 const nir_ssa_undef_instr
*instr
)
4755 unsigned num_components
= instr
->def
.num_components
;
4756 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4759 if (num_components
== 1)
4760 undef
= LLVMGetUndef(type
);
4762 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4764 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4767 static void visit_jump(struct ac_llvm_context
*ctx
,
4768 const nir_jump_instr
*instr
)
4770 switch (instr
->type
) {
4771 case nir_jump_break
:
4772 ac_build_break(ctx
);
4774 case nir_jump_continue
:
4775 ac_build_continue(ctx
);
4778 fprintf(stderr
, "Unknown NIR jump instr: ");
4779 nir_print_instr(&instr
->instr
, stderr
);
4780 fprintf(stderr
, "\n");
4786 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4787 enum glsl_base_type type
)
4791 case GLSL_TYPE_UINT
:
4792 case GLSL_TYPE_BOOL
:
4793 case GLSL_TYPE_SUBROUTINE
:
4795 case GLSL_TYPE_INT8
:
4796 case GLSL_TYPE_UINT8
:
4798 case GLSL_TYPE_INT16
:
4799 case GLSL_TYPE_UINT16
:
4801 case GLSL_TYPE_FLOAT
:
4803 case GLSL_TYPE_FLOAT16
:
4805 case GLSL_TYPE_INT64
:
4806 case GLSL_TYPE_UINT64
:
4808 case GLSL_TYPE_DOUBLE
:
4811 unreachable("unknown GLSL type");
4816 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4817 const struct glsl_type
*type
)
4819 if (glsl_type_is_scalar(type
)) {
4820 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4823 if (glsl_type_is_vector(type
)) {
4824 return LLVMVectorType(
4825 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4826 glsl_get_vector_elements(type
));
4829 if (glsl_type_is_matrix(type
)) {
4830 return LLVMArrayType(
4831 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4832 glsl_get_matrix_columns(type
));
4835 if (glsl_type_is_array(type
)) {
4836 return LLVMArrayType(
4837 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4838 glsl_get_length(type
));
4841 assert(glsl_type_is_struct_or_ifc(type
));
4843 LLVMTypeRef member_types
[glsl_get_length(type
)];
4845 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4847 glsl_to_llvm_type(ac
,
4848 glsl_get_struct_field(type
, i
));
4851 return LLVMStructTypeInContext(ac
->context
, member_types
,
4852 glsl_get_length(type
), false);
4855 static void visit_deref(struct ac_nir_context
*ctx
,
4856 nir_deref_instr
*instr
)
4858 if (instr
->mode
!= nir_var_mem_shared
&&
4859 instr
->mode
!= nir_var_mem_global
)
4862 LLVMValueRef result
= NULL
;
4863 switch(instr
->deref_type
) {
4864 case nir_deref_type_var
: {
4865 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4866 result
= entry
->data
;
4869 case nir_deref_type_struct
:
4870 if (instr
->mode
== nir_var_mem_global
) {
4871 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4872 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4873 instr
->strct
.index
);
4874 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4875 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4877 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4878 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4881 case nir_deref_type_array
:
4882 if (instr
->mode
== nir_var_mem_global
) {
4883 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4884 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4886 if ((glsl_type_is_matrix(parent
->type
) &&
4887 glsl_matrix_type_is_row_major(parent
->type
)) ||
4888 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4889 stride
= type_scalar_size_bytes(parent
->type
);
4892 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4893 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4894 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4896 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4898 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4900 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4901 get_src(ctx
, instr
->arr
.index
));
4904 case nir_deref_type_ptr_as_array
:
4905 if (instr
->mode
== nir_var_mem_global
) {
4906 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4908 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4909 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4910 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4912 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4914 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4916 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4917 get_src(ctx
, instr
->arr
.index
));
4920 case nir_deref_type_cast
: {
4921 result
= get_src(ctx
, instr
->parent
);
4923 /* We can't use the structs from LLVM because the shader
4924 * specifies its own offsets. */
4925 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
4926 if (instr
->mode
== nir_var_mem_shared
)
4927 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
4929 unsigned address_space
;
4931 switch(instr
->mode
) {
4932 case nir_var_mem_shared
:
4933 address_space
= AC_ADDR_SPACE_LDS
;
4935 case nir_var_mem_global
:
4936 address_space
= AC_ADDR_SPACE_GLOBAL
;
4939 unreachable("Unhandled address space");
4942 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
4944 if (LLVMTypeOf(result
) != type
) {
4945 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
4946 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
4949 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
4956 unreachable("Unhandled deref_instr deref type");
4959 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4962 static void visit_cf_list(struct ac_nir_context
*ctx
,
4963 struct exec_list
*list
);
4965 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
4967 nir_foreach_instr(instr
, block
)
4969 switch (instr
->type
) {
4970 case nir_instr_type_alu
:
4971 visit_alu(ctx
, nir_instr_as_alu(instr
));
4973 case nir_instr_type_load_const
:
4974 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
4976 case nir_instr_type_intrinsic
:
4977 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
4979 case nir_instr_type_tex
:
4980 visit_tex(ctx
, nir_instr_as_tex(instr
));
4982 case nir_instr_type_phi
:
4983 visit_phi(ctx
, nir_instr_as_phi(instr
));
4985 case nir_instr_type_ssa_undef
:
4986 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
4988 case nir_instr_type_jump
:
4989 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
4991 case nir_instr_type_deref
:
4992 visit_deref(ctx
, nir_instr_as_deref(instr
));
4995 fprintf(stderr
, "Unknown NIR instr type: ");
4996 nir_print_instr(instr
, stderr
);
4997 fprintf(stderr
, "\n");
5002 _mesa_hash_table_insert(ctx
->defs
, block
,
5003 LLVMGetInsertBlock(ctx
->ac
.builder
));
5006 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
5008 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
5010 nir_block
*then_block
=
5011 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
5013 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
5015 visit_cf_list(ctx
, &if_stmt
->then_list
);
5017 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
5018 nir_block
*else_block
=
5019 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
5021 ac_build_else(&ctx
->ac
, else_block
->index
);
5022 visit_cf_list(ctx
, &if_stmt
->else_list
);
5025 ac_build_endif(&ctx
->ac
, then_block
->index
);
5028 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
5030 nir_block
*first_loop_block
=
5031 (nir_block
*) exec_list_get_head(&loop
->body
);
5033 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
5035 visit_cf_list(ctx
, &loop
->body
);
5037 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
5040 static void visit_cf_list(struct ac_nir_context
*ctx
,
5041 struct exec_list
*list
)
5043 foreach_list_typed(nir_cf_node
, node
, node
, list
)
5045 switch (node
->type
) {
5046 case nir_cf_node_block
:
5047 visit_block(ctx
, nir_cf_node_as_block(node
));
5050 case nir_cf_node_if
:
5051 visit_if(ctx
, nir_cf_node_as_if(node
));
5054 case nir_cf_node_loop
:
5055 visit_loop(ctx
, nir_cf_node_as_loop(node
));
5065 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
5066 struct ac_shader_abi
*abi
,
5067 struct nir_shader
*nir
,
5068 struct nir_variable
*variable
,
5069 gl_shader_stage stage
)
5071 unsigned output_loc
= variable
->data
.driver_location
/ 4;
5072 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5074 /* tess ctrl has it's own load/store paths for outputs */
5075 if (stage
== MESA_SHADER_TESS_CTRL
)
5078 if (stage
== MESA_SHADER_VERTEX
||
5079 stage
== MESA_SHADER_TESS_EVAL
||
5080 stage
== MESA_SHADER_GEOMETRY
) {
5081 int idx
= variable
->data
.location
+ variable
->data
.index
;
5082 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
5083 int length
= nir
->info
.clip_distance_array_size
+
5084 nir
->info
.cull_distance_array_size
;
5093 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
5094 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
5095 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
5096 for (unsigned chan
= 0; chan
< 4; chan
++) {
5097 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
5098 ac_build_alloca_undef(ctx
, type
, "");
5104 setup_locals(struct ac_nir_context
*ctx
,
5105 struct nir_function
*func
)
5108 ctx
->num_locals
= 0;
5109 nir_foreach_variable(variable
, &func
->impl
->locals
) {
5110 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
5111 variable
->data
.driver_location
= ctx
->num_locals
* 4;
5112 variable
->data
.location_frac
= 0;
5113 ctx
->num_locals
+= attrib_count
;
5115 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
5119 for (i
= 0; i
< ctx
->num_locals
; i
++) {
5120 for (j
= 0; j
< 4; j
++) {
5121 ctx
->locals
[i
* 4 + j
] =
5122 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
5128 setup_scratch(struct ac_nir_context
*ctx
,
5129 struct nir_shader
*shader
)
5131 if (shader
->scratch_size
== 0)
5134 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
5135 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
5140 setup_constant_data(struct ac_nir_context
*ctx
,
5141 struct nir_shader
*shader
)
5143 if (!shader
->constant_data
)
5147 LLVMConstStringInContext(ctx
->ac
.context
,
5148 shader
->constant_data
,
5149 shader
->constant_data_size
,
5151 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
5153 /* We want to put the constant data in the CONST address space so that
5154 * we can use scalar loads. However, LLVM versions before 10 put these
5155 * variables in the same section as the code, which is unacceptable
5156 * for RadeonSI as it needs to relocate all the data sections after
5157 * the code sections. See https://reviews.llvm.org/D65813.
5159 unsigned address_space
=
5160 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
5162 LLVMValueRef global
=
5163 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5167 LLVMSetInitializer(global
, data
);
5168 LLVMSetGlobalConstant(global
, true);
5169 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
5170 ctx
->constant_data
= global
;
5174 setup_shared(struct ac_nir_context
*ctx
,
5175 struct nir_shader
*nir
)
5180 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
,
5181 nir
->info
.cs
.shared_size
);
5184 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
5187 LLVMSetAlignment(lds
, 64 * 1024);
5189 ctx
->ac
.lds
= LLVMBuildBitCast(ctx
->ac
.builder
, lds
,
5190 LLVMPointerType(ctx
->ac
.i8
,
5191 AC_ADDR_SPACE_LDS
), "");
5194 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
5195 const struct ac_shader_args
*args
, struct nir_shader
*nir
)
5197 struct ac_nir_context ctx
= {};
5198 struct nir_function
*func
;
5204 ctx
.stage
= nir
->info
.stage
;
5205 ctx
.info
= &nir
->info
;
5207 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
5209 nir_foreach_variable(variable
, &nir
->outputs
)
5210 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
5213 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5214 _mesa_key_pointer_equal
);
5215 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5216 _mesa_key_pointer_equal
);
5217 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
5218 _mesa_key_pointer_equal
);
5220 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
5222 nir_index_ssa_defs(func
->impl
);
5223 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
5225 setup_locals(&ctx
, func
);
5226 setup_scratch(&ctx
, nir
);
5227 setup_constant_data(&ctx
, nir
);
5229 if (gl_shader_stage_is_compute(nir
->info
.stage
))
5230 setup_shared(&ctx
, nir
);
5232 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&& nir
->info
.fs
.uses_demote
) {
5233 ctx
.ac
.postponed_kill
= ac_build_alloca_undef(&ctx
.ac
, ac
->i1
, "");
5234 /* true = don't kill. */
5235 LLVMBuildStore(ctx
.ac
.builder
, ctx
.ac
.i1true
, ctx
.ac
.postponed_kill
);
5238 visit_cf_list(&ctx
, &func
->impl
->body
);
5239 phi_post_pass(&ctx
);
5241 if (ctx
.ac
.postponed_kill
)
5242 ac_build_kill_if_false(&ctx
.ac
, LLVMBuildLoad(ctx
.ac
.builder
,
5243 ctx
.ac
.postponed_kill
, ""));
5245 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
5246 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
5251 ralloc_free(ctx
.defs
);
5252 ralloc_free(ctx
.phis
);
5253 ralloc_free(ctx
.vars
);
5257 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
5259 bool progress
= false;
5261 /* Lower large variables to scratch first so that we won't bloat the
5262 * shader by generating large if ladders for them. We later lower
5263 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
5265 NIR_PASS(progress
, nir
, nir_lower_vars_to_scratch
,
5266 nir_var_function_temp
,
5268 glsl_get_natural_size_align_bytes
);
5270 /* While it would be nice not to have this flag, we are constrained
5271 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
5273 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
5275 /* TODO: Indirect indexing of GS inputs is unimplemented.
5277 * TCS and TES load inputs directly from LDS or offchip memory, so
5278 * indirect indexing is trivial.
5280 nir_variable_mode indirect_mask
= 0;
5281 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
5282 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
5283 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
5284 !llvm_has_working_vgpr_indexing
)) {
5285 indirect_mask
|= nir_var_shader_in
;
5287 if (!llvm_has_working_vgpr_indexing
&&
5288 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
5289 indirect_mask
|= nir_var_shader_out
;
5291 /* TODO: We shouldn't need to do this, however LLVM isn't currently
5292 * smart enough to handle indirects without causing excess spilling
5293 * causing the gpu to hang.
5295 * See the following thread for more details of the problem:
5296 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
5298 indirect_mask
|= nir_var_function_temp
;
5300 progress
|= nir_lower_indirect_derefs(nir
, indirect_mask
);
5305 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
5307 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
5311 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
5313 if (var
->data
.mode
!= nir_var_shader_out
)
5316 unsigned writemask
= 0;
5317 const int location
= var
->data
.location
;
5318 unsigned first_component
= var
->data
.location_frac
;
5319 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
5321 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
5322 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
5323 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
5324 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
5330 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
5331 unsigned *cond_block_tf_writemask
,
5332 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
5334 switch (cf_node
->type
) {
5335 case nir_cf_node_block
: {
5336 nir_block
*block
= nir_cf_node_as_block(cf_node
);
5337 nir_foreach_instr(instr
, block
) {
5338 if (instr
->type
!= nir_instr_type_intrinsic
)
5341 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
5342 if (intrin
->intrinsic
== nir_intrinsic_control_barrier
) {
5344 /* If we find a barrier in nested control flow put this in the
5345 * too hard basket. In GLSL this is not possible but it is in
5349 *tessfactors_are_def_in_all_invocs
= false;
5353 /* The following case must be prevented:
5354 * gl_TessLevelInner = ...;
5356 * if (gl_InvocationID == 1)
5357 * gl_TessLevelInner = ...;
5359 * If you consider disjoint code segments separated by barriers, each
5360 * such segment that writes tess factor channels should write the same
5361 * channels in all codepaths within that segment.
5363 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
5364 /* Accumulate the result: */
5365 *tessfactors_are_def_in_all_invocs
&=
5366 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
5368 /* Analyze the next code segment from scratch. */
5369 *upper_block_tf_writemask
= 0;
5370 *cond_block_tf_writemask
= 0;
5373 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
5378 case nir_cf_node_if
: {
5379 unsigned then_tessfactor_writemask
= 0;
5380 unsigned else_tessfactor_writemask
= 0;
5382 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
5383 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
5384 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
5385 cond_block_tf_writemask
,
5386 tessfactors_are_def_in_all_invocs
, true);
5389 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
5390 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
5391 cond_block_tf_writemask
,
5392 tessfactors_are_def_in_all_invocs
, true);
5395 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
5396 /* If both statements write the same tess factor channels,
5397 * we can say that the upper block writes them too.
5399 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
5400 else_tessfactor_writemask
;
5401 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
5402 else_tessfactor_writemask
;
5407 case nir_cf_node_loop
: {
5408 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
5409 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
5410 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
5411 cond_block_tf_writemask
,
5412 tessfactors_are_def_in_all_invocs
, true);
5418 unreachable("unknown cf node type");
5423 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
5425 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
5427 /* The pass works as follows:
5428 * If all codepaths write tess factors, we can say that all
5429 * invocations define tess factors.
5431 * Each tess factor channel is tracked separately.
5433 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
5434 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
5436 /* Initial value = true. Here the pass will accumulate results from
5437 * multiple segments surrounded by barriers. If tess factors aren't
5438 * written at all, it's a shader bug and we don't care if this will be
5441 bool tessfactors_are_def_in_all_invocs
= true;
5443 nir_foreach_function(function
, nir
) {
5444 if (function
->impl
) {
5445 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
5446 scan_tess_ctrl(node
, &main_block_tf_writemask
,
5447 &cond_block_tf_writemask
,
5448 &tessfactors_are_def_in_all_invocs
,
5454 /* Accumulate the result for the last code segment separated by a
5457 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
5458 tessfactors_are_def_in_all_invocs
&=
5459 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
5462 return tessfactors_are_def_in_all_invocs
;