2 * Copyright © 2016 Bas Nieuwenhuizen
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <llvm/Config/llvm-config.h>
26 #include "ac_nir_to_llvm.h"
27 #include "ac_llvm_build.h"
28 #include "ac_llvm_util.h"
29 #include "ac_binary.h"
32 #include "nir/nir_deref.h"
33 #include "util/bitscan.h"
34 #include "util/u_math.h"
35 #include "ac_shader_abi.h"
36 #include "ac_shader_util.h"
38 struct ac_nir_context
{
39 struct ac_llvm_context ac
;
40 struct ac_shader_abi
*abi
;
42 gl_shader_stage stage
;
45 LLVMValueRef
*ssa_defs
;
48 LLVMValueRef constant_data
;
50 struct hash_table
*defs
;
51 struct hash_table
*phis
;
52 struct hash_table
*vars
;
54 LLVMValueRef main_function
;
55 LLVMBasicBlockRef continue_block
;
56 LLVMBasicBlockRef break_block
;
62 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
63 nir_deref_instr
*deref_instr
,
64 enum ac_descriptor_type desc_type
,
65 const nir_instr
*instr
,
66 bool image
, bool write
);
69 build_store_values_extended(struct ac_llvm_context
*ac
,
72 unsigned value_stride
,
75 LLVMBuilderRef builder
= ac
->builder
;
78 for (i
= 0; i
< value_count
; i
++) {
79 LLVMValueRef ptr
= values
[i
* value_stride
];
80 LLVMValueRef index
= LLVMConstInt(ac
->i32
, i
, false);
81 LLVMValueRef value
= LLVMBuildExtractElement(builder
, vec
, index
, "");
82 LLVMBuildStore(builder
, value
, ptr
);
86 static LLVMTypeRef
get_def_type(struct ac_nir_context
*ctx
,
87 const nir_ssa_def
*def
)
89 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, def
->bit_size
);
90 if (def
->num_components
> 1) {
91 type
= LLVMVectorType(type
, def
->num_components
);
96 static LLVMValueRef
get_src(struct ac_nir_context
*nir
, nir_src src
)
99 return nir
->ssa_defs
[src
.ssa
->index
];
103 get_memory_ptr(struct ac_nir_context
*ctx
, nir_src src
)
105 LLVMValueRef ptr
= get_src(ctx
, src
);
106 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->ac
.lds
, &ptr
, 1, "");
107 int addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
109 return LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
110 LLVMPointerType(ctx
->ac
.i32
, addr_space
), "");
113 static LLVMBasicBlockRef
get_block(struct ac_nir_context
*nir
,
114 const struct nir_block
*b
)
116 struct hash_entry
*entry
= _mesa_hash_table_search(nir
->defs
, b
);
117 return (LLVMBasicBlockRef
)entry
->data
;
120 static LLVMValueRef
get_alu_src(struct ac_nir_context
*ctx
,
122 unsigned num_components
)
124 LLVMValueRef value
= get_src(ctx
, src
.src
);
125 bool need_swizzle
= false;
128 unsigned src_components
= ac_get_llvm_num_components(value
);
129 for (unsigned i
= 0; i
< num_components
; ++i
) {
130 assert(src
.swizzle
[i
] < src_components
);
131 if (src
.swizzle
[i
] != i
)
135 if (need_swizzle
|| num_components
!= src_components
) {
136 LLVMValueRef masks
[] = {
137 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[0], false),
138 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[1], false),
139 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[2], false),
140 LLVMConstInt(ctx
->ac
.i32
, src
.swizzle
[3], false)};
142 if (src_components
> 1 && num_components
== 1) {
143 value
= LLVMBuildExtractElement(ctx
->ac
.builder
, value
,
145 } else if (src_components
== 1 && num_components
> 1) {
146 LLVMValueRef values
[] = {value
, value
, value
, value
};
147 value
= ac_build_gather_values(&ctx
->ac
, values
, num_components
);
149 LLVMValueRef swizzle
= LLVMConstVector(masks
, num_components
);
150 value
= LLVMBuildShuffleVector(ctx
->ac
.builder
, value
, value
,
159 static LLVMValueRef
emit_int_cmp(struct ac_llvm_context
*ctx
,
160 LLVMIntPredicate pred
, LLVMValueRef src0
,
163 LLVMValueRef result
= LLVMBuildICmp(ctx
->builder
, pred
, src0
, src1
, "");
164 return LLVMBuildSelect(ctx
->builder
, result
,
165 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
169 static LLVMValueRef
emit_float_cmp(struct ac_llvm_context
*ctx
,
170 LLVMRealPredicate pred
, LLVMValueRef src0
,
174 src0
= ac_to_float(ctx
, src0
);
175 src1
= ac_to_float(ctx
, src1
);
176 result
= LLVMBuildFCmp(ctx
->builder
, pred
, src0
, src1
, "");
177 return LLVMBuildSelect(ctx
->builder
, result
,
178 LLVMConstInt(ctx
->i32
, 0xFFFFFFFF, false),
182 static LLVMValueRef
emit_intrin_1f_param(struct ac_llvm_context
*ctx
,
184 LLVMTypeRef result_type
,
188 LLVMValueRef params
[] = {
189 ac_to_float(ctx
, src0
),
192 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
193 ac_get_elem_bits(ctx
, result_type
));
194 assert(length
< sizeof(name
));
195 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 1, AC_FUNC_ATTR_READNONE
);
198 static LLVMValueRef
emit_intrin_2f_param(struct ac_llvm_context
*ctx
,
200 LLVMTypeRef result_type
,
201 LLVMValueRef src0
, LLVMValueRef src1
)
204 LLVMValueRef params
[] = {
205 ac_to_float(ctx
, src0
),
206 ac_to_float(ctx
, src1
),
209 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
210 ac_get_elem_bits(ctx
, result_type
));
211 assert(length
< sizeof(name
));
212 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 2, AC_FUNC_ATTR_READNONE
);
215 static LLVMValueRef
emit_intrin_3f_param(struct ac_llvm_context
*ctx
,
217 LLVMTypeRef result_type
,
218 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
221 LLVMValueRef params
[] = {
222 ac_to_float(ctx
, src0
),
223 ac_to_float(ctx
, src1
),
224 ac_to_float(ctx
, src2
),
227 ASSERTED
const int length
= snprintf(name
, sizeof(name
), "%s.f%d", intrin
,
228 ac_get_elem_bits(ctx
, result_type
));
229 assert(length
< sizeof(name
));
230 return ac_build_intrinsic(ctx
, name
, result_type
, params
, 3, AC_FUNC_ATTR_READNONE
);
233 static LLVMValueRef
emit_bcsel(struct ac_llvm_context
*ctx
,
234 LLVMValueRef src0
, LLVMValueRef src1
, LLVMValueRef src2
)
236 assert(LLVMGetTypeKind(LLVMTypeOf(src0
)) != LLVMVectorTypeKind
);
238 LLVMValueRef v
= LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
,
240 return LLVMBuildSelect(ctx
->builder
, v
,
241 ac_to_integer_or_pointer(ctx
, src1
),
242 ac_to_integer_or_pointer(ctx
, src2
), "");
245 static LLVMValueRef
emit_iabs(struct ac_llvm_context
*ctx
,
248 return ac_build_imax(ctx
, src0
, LLVMBuildNeg(ctx
->builder
, src0
, ""));
251 static LLVMValueRef
emit_uint_carry(struct ac_llvm_context
*ctx
,
253 LLVMValueRef src0
, LLVMValueRef src1
)
255 LLVMTypeRef ret_type
;
256 LLVMTypeRef types
[] = { ctx
->i32
, ctx
->i1
};
258 LLVMValueRef params
[] = { src0
, src1
};
259 ret_type
= LLVMStructTypeInContext(ctx
->context
, types
,
262 res
= ac_build_intrinsic(ctx
, intrin
, ret_type
,
263 params
, 2, AC_FUNC_ATTR_READNONE
);
265 res
= LLVMBuildExtractValue(ctx
->builder
, res
, 1, "");
266 res
= LLVMBuildZExt(ctx
->builder
, res
, ctx
->i32
, "");
270 static LLVMValueRef
emit_b2f(struct ac_llvm_context
*ctx
,
274 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
,
275 LLVMBuildBitCast(ctx
->builder
, LLVMConstReal(ctx
->f32
, 1.0), ctx
->i32
, ""),
277 result
= LLVMBuildBitCast(ctx
->builder
, result
, ctx
->f32
, "");
281 return LLVMBuildFPTrunc(ctx
->builder
, result
, ctx
->f16
, "");
285 return LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f64
, "");
287 unreachable("Unsupported bit size.");
291 static LLVMValueRef
emit_f2b(struct ac_llvm_context
*ctx
,
294 src0
= ac_to_float(ctx
, src0
);
295 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
296 return LLVMBuildSExt(ctx
->builder
,
297 LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
, src0
, zero
, ""),
301 static LLVMValueRef
emit_b2i(struct ac_llvm_context
*ctx
,
305 LLVMValueRef result
= LLVMBuildAnd(ctx
->builder
, src0
, ctx
->i32_1
, "");
309 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i8
, "");
311 return LLVMBuildTrunc(ctx
->builder
, result
, ctx
->i16
, "");
315 return LLVMBuildZExt(ctx
->builder
, result
, ctx
->i64
, "");
317 unreachable("Unsupported bit size.");
321 static LLVMValueRef
emit_i2b(struct ac_llvm_context
*ctx
,
324 LLVMValueRef zero
= LLVMConstNull(LLVMTypeOf(src0
));
325 return LLVMBuildSExt(ctx
->builder
,
326 LLVMBuildICmp(ctx
->builder
, LLVMIntNE
, src0
, zero
, ""),
330 static LLVMValueRef
emit_f2f16(struct ac_llvm_context
*ctx
,
334 LLVMValueRef cond
= NULL
;
336 src0
= ac_to_float(ctx
, src0
);
337 result
= LLVMBuildFPTrunc(ctx
->builder
, src0
, ctx
->f16
, "");
339 if (ctx
->chip_class
>= GFX8
) {
340 LLVMValueRef args
[2];
341 /* Check if the result is a denormal - and flush to 0 if so. */
343 args
[1] = LLVMConstInt(ctx
->i32
, N_SUBNORMAL
| P_SUBNORMAL
, false);
344 cond
= ac_build_intrinsic(ctx
, "llvm.amdgcn.class.f16", ctx
->i1
, args
, 2, AC_FUNC_ATTR_READNONE
);
347 /* need to convert back up to f32 */
348 result
= LLVMBuildFPExt(ctx
->builder
, result
, ctx
->f32
, "");
350 if (ctx
->chip_class
>= GFX8
)
351 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
354 /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
355 * so compare the result and flush to 0 if it's smaller.
357 LLVMValueRef temp
, cond2
;
358 temp
= emit_intrin_1f_param(ctx
, "llvm.fabs", ctx
->f32
, result
);
359 cond
= LLVMBuildFCmp(ctx
->builder
, LLVMRealUGT
,
360 LLVMBuildBitCast(ctx
->builder
, LLVMConstInt(ctx
->i32
, 0x38800000, false), ctx
->f32
, ""),
362 cond2
= LLVMBuildFCmp(ctx
->builder
, LLVMRealUNE
,
363 temp
, ctx
->f32_0
, "");
364 cond
= LLVMBuildAnd(ctx
->builder
, cond
, cond2
, "");
365 result
= LLVMBuildSelect(ctx
->builder
, cond
, ctx
->f32_0
, result
, "");
370 static LLVMValueRef
emit_umul_high(struct ac_llvm_context
*ctx
,
371 LLVMValueRef src0
, LLVMValueRef src1
)
373 LLVMValueRef dst64
, result
;
374 src0
= LLVMBuildZExt(ctx
->builder
, src0
, ctx
->i64
, "");
375 src1
= LLVMBuildZExt(ctx
->builder
, src1
, ctx
->i64
, "");
377 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
378 dst64
= LLVMBuildLShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
379 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
383 static LLVMValueRef
emit_imul_high(struct ac_llvm_context
*ctx
,
384 LLVMValueRef src0
, LLVMValueRef src1
)
386 LLVMValueRef dst64
, result
;
387 src0
= LLVMBuildSExt(ctx
->builder
, src0
, ctx
->i64
, "");
388 src1
= LLVMBuildSExt(ctx
->builder
, src1
, ctx
->i64
, "");
390 dst64
= LLVMBuildMul(ctx
->builder
, src0
, src1
, "");
391 dst64
= LLVMBuildAShr(ctx
->builder
, dst64
, LLVMConstInt(ctx
->i64
, 32, false), "");
392 result
= LLVMBuildTrunc(ctx
->builder
, dst64
, ctx
->i32
, "");
396 static LLVMValueRef
emit_bfm(struct ac_llvm_context
*ctx
,
397 LLVMValueRef bits
, LLVMValueRef offset
)
399 /* mask = ((1 << bits) - 1) << offset */
400 return LLVMBuildShl(ctx
->builder
,
401 LLVMBuildSub(ctx
->builder
,
402 LLVMBuildShl(ctx
->builder
,
409 static LLVMValueRef
emit_bitfield_select(struct ac_llvm_context
*ctx
,
410 LLVMValueRef mask
, LLVMValueRef insert
,
414 * (mask & insert) | (~mask & base) = base ^ (mask & (insert ^ base))
415 * Use the right-hand side, which the LLVM backend can convert to V_BFI.
417 return LLVMBuildXor(ctx
->builder
, base
,
418 LLVMBuildAnd(ctx
->builder
, mask
,
419 LLVMBuildXor(ctx
->builder
, insert
, base
, ""), ""), "");
422 static LLVMValueRef
emit_pack_2x16(struct ac_llvm_context
*ctx
,
424 LLVMValueRef (*pack
)(struct ac_llvm_context
*ctx
,
425 LLVMValueRef args
[2]))
427 LLVMValueRef comp
[2];
429 src0
= ac_to_float(ctx
, src0
);
430 comp
[0] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_0
, "");
431 comp
[1] = LLVMBuildExtractElement(ctx
->builder
, src0
, ctx
->i32_1
, "");
433 return LLVMBuildBitCast(ctx
->builder
, pack(ctx
, comp
), ctx
->i32
, "");
436 static LLVMValueRef
emit_unpack_half_2x16(struct ac_llvm_context
*ctx
,
439 LLVMValueRef const16
= LLVMConstInt(ctx
->i32
, 16, false);
440 LLVMValueRef temps
[2], val
;
443 for (i
= 0; i
< 2; i
++) {
444 val
= i
== 1 ? LLVMBuildLShr(ctx
->builder
, src0
, const16
, "") : src0
;
445 val
= LLVMBuildTrunc(ctx
->builder
, val
, ctx
->i16
, "");
446 val
= LLVMBuildBitCast(ctx
->builder
, val
, ctx
->f16
, "");
447 temps
[i
] = LLVMBuildFPExt(ctx
->builder
, val
, ctx
->f32
, "");
449 return ac_build_gather_values(ctx
, temps
, 2);
452 static LLVMValueRef
emit_ddxy(struct ac_nir_context
*ctx
,
460 if (op
== nir_op_fddx_fine
)
461 mask
= AC_TID_MASK_LEFT
;
462 else if (op
== nir_op_fddy_fine
)
463 mask
= AC_TID_MASK_TOP
;
465 mask
= AC_TID_MASK_TOP_LEFT
;
467 /* for DDX we want to next X pixel, DDY next Y pixel. */
468 if (op
== nir_op_fddx_fine
||
469 op
== nir_op_fddx_coarse
||
475 result
= ac_build_ddxy(&ctx
->ac
, mask
, idx
, src0
);
479 static void visit_alu(struct ac_nir_context
*ctx
, const nir_alu_instr
*instr
)
481 LLVMValueRef src
[4], result
= NULL
;
482 unsigned num_components
= instr
->dest
.dest
.ssa
.num_components
;
483 unsigned src_components
;
484 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.dest
.ssa
);
486 assert(nir_op_infos
[instr
->op
].num_inputs
<= ARRAY_SIZE(src
));
493 case nir_op_pack_half_2x16
:
494 case nir_op_pack_snorm_2x16
:
495 case nir_op_pack_unorm_2x16
:
498 case nir_op_unpack_half_2x16
:
501 case nir_op_cube_face_coord
:
502 case nir_op_cube_face_index
:
506 src_components
= num_components
;
509 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
510 src
[i
] = get_alu_src(ctx
, instr
->src
[i
], src_components
);
517 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
518 result
= LLVMBuildFNeg(ctx
->ac
.builder
, src
[0], "");
521 result
= LLVMBuildNeg(ctx
->ac
.builder
, src
[0], "");
524 result
= LLVMBuildNot(ctx
->ac
.builder
, src
[0], "");
527 result
= LLVMBuildAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
530 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
531 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
532 result
= LLVMBuildFAdd(ctx
->ac
.builder
, src
[0], src
[1], "");
535 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
536 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
537 result
= LLVMBuildFSub(ctx
->ac
.builder
, src
[0], src
[1], "");
540 result
= LLVMBuildSub(ctx
->ac
.builder
, src
[0], src
[1], "");
543 result
= LLVMBuildMul(ctx
->ac
.builder
, src
[0], src
[1], "");
546 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
549 result
= LLVMBuildURem(ctx
->ac
.builder
, src
[0], src
[1], "");
552 result
= LLVMBuildSRem(ctx
->ac
.builder
, src
[0], src
[1], "");
555 result
= LLVMBuildSDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
558 result
= LLVMBuildUDiv(ctx
->ac
.builder
, src
[0], src
[1], "");
561 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
562 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
563 result
= LLVMBuildFMul(ctx
->ac
.builder
, src
[0], src
[1], "");
566 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
567 result
= ac_build_fdiv(&ctx
->ac
, LLVMConstReal(LLVMTypeOf(src
[0]), 1.0), src
[0]);
570 result
= LLVMBuildAnd(ctx
->ac
.builder
, src
[0], src
[1], "");
573 result
= LLVMBuildOr(ctx
->ac
.builder
, src
[0], src
[1], "");
576 result
= LLVMBuildXor(ctx
->ac
.builder
, src
[0], src
[1], "");
579 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
580 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
581 LLVMTypeOf(src
[0]), "");
582 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
583 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
584 LLVMTypeOf(src
[0]), "");
585 result
= LLVMBuildShl(ctx
->ac
.builder
, src
[0], src
[1], "");
588 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
589 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
590 LLVMTypeOf(src
[0]), "");
591 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
592 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
593 LLVMTypeOf(src
[0]), "");
594 result
= LLVMBuildAShr(ctx
->ac
.builder
, src
[0], src
[1], "");
597 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) < ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
598 src
[1] = LLVMBuildZExt(ctx
->ac
.builder
, src
[1],
599 LLVMTypeOf(src
[0]), "");
600 else if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[1])) > ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])))
601 src
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, src
[1],
602 LLVMTypeOf(src
[0]), "");
603 result
= LLVMBuildLShr(ctx
->ac
.builder
, src
[0], src
[1], "");
606 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSLT
, src
[0], src
[1]);
609 result
= emit_int_cmp(&ctx
->ac
, LLVMIntNE
, src
[0], src
[1]);
612 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, src
[0], src
[1]);
615 result
= emit_int_cmp(&ctx
->ac
, LLVMIntSGE
, src
[0], src
[1]);
618 result
= emit_int_cmp(&ctx
->ac
, LLVMIntULT
, src
[0], src
[1]);
621 result
= emit_int_cmp(&ctx
->ac
, LLVMIntUGE
, src
[0], src
[1]);
624 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOEQ
, src
[0], src
[1]);
627 result
= emit_float_cmp(&ctx
->ac
, LLVMRealUNE
, src
[0], src
[1]);
630 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOLT
, src
[0], src
[1]);
633 result
= emit_float_cmp(&ctx
->ac
, LLVMRealOGE
, src
[0], src
[1]);
636 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.fabs",
637 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
640 result
= emit_iabs(&ctx
->ac
, src
[0]);
643 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
646 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
649 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
652 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
655 result
= ac_build_isign(&ctx
->ac
, src
[0],
656 instr
->dest
.dest
.ssa
.bit_size
);
659 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
660 result
= ac_build_fsign(&ctx
->ac
, src
[0],
661 instr
->dest
.dest
.ssa
.bit_size
);
664 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.floor",
665 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
668 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.trunc",
669 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
672 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.ceil",
673 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
675 case nir_op_fround_even
:
676 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.rint",
677 ac_to_float_type(&ctx
->ac
, def_type
),src
[0]);
680 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
681 result
= ac_build_fract(&ctx
->ac
, src
[0],
682 instr
->dest
.dest
.ssa
.bit_size
);
685 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sin",
686 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
689 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.cos",
690 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
693 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
694 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
697 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.exp2",
698 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
701 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.log2",
702 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
705 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.sqrt",
706 ac_to_float_type(&ctx
->ac
, def_type
), src
[0]);
707 result
= ac_build_fdiv(&ctx
->ac
, LLVMConstReal(LLVMTypeOf(result
), 1.0), result
);
709 case nir_op_frexp_exp
:
710 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
711 result
= ac_build_frexp_exp(&ctx
->ac
, src
[0],
712 ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])));
713 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) == 16)
714 result
= LLVMBuildSExt(ctx
->ac
.builder
, result
,
717 case nir_op_frexp_sig
:
718 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
719 result
= ac_build_frexp_mant(&ctx
->ac
, src
[0],
720 instr
->dest
.dest
.ssa
.bit_size
);
723 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.pow",
724 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
727 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
728 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
729 if (ctx
->ac
.chip_class
< GFX9
&&
730 instr
->dest
.dest
.ssa
.bit_size
== 32) {
731 /* Only pre-GFX9 chips do not flush denorms. */
732 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.canonicalize",
733 ac_to_float_type(&ctx
->ac
, def_type
),
738 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
739 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
740 if (ctx
->ac
.chip_class
< GFX9
&&
741 instr
->dest
.dest
.ssa
.bit_size
== 32) {
742 /* Only pre-GFX9 chips do not flush denorms. */
743 result
= emit_intrin_1f_param(&ctx
->ac
, "llvm.canonicalize",
744 ac_to_float_type(&ctx
->ac
, def_type
),
749 /* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
750 result
= emit_intrin_3f_param(&ctx
->ac
, ctx
->ac
.chip_class
>= GFX10
? "llvm.fma" : "llvm.fmuladd",
751 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1], src
[2]);
754 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
755 if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 32)
756 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f32", ctx
->ac
.f32
, src
, 2, AC_FUNC_ATTR_READNONE
);
757 else if (ac_get_elem_bits(&ctx
->ac
, def_type
) == 16)
758 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f16", ctx
->ac
.f16
, src
, 2, AC_FUNC_ATTR_READNONE
);
760 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.ldexp.f64", ctx
->ac
.f64
, src
, 2, AC_FUNC_ATTR_READNONE
);
763 result
= emit_bfm(&ctx
->ac
, src
[0], src
[1]);
765 case nir_op_bitfield_select
:
766 result
= emit_bitfield_select(&ctx
->ac
, src
[0], src
[1], src
[2]);
769 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], false);
772 result
= ac_build_bfe(&ctx
->ac
, src
[0], src
[1], src
[2], true);
774 case nir_op_bitfield_reverse
:
775 result
= ac_build_bitfield_reverse(&ctx
->ac
, src
[0]);
777 case nir_op_bit_count
:
778 result
= ac_build_bit_count(&ctx
->ac
, src
[0]);
783 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
784 src
[i
] = ac_to_integer(&ctx
->ac
, src
[i
]);
785 result
= ac_build_gather_values(&ctx
->ac
, src
, num_components
);
791 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
792 result
= LLVMBuildFPToSI(ctx
->ac
.builder
, src
[0], def_type
, "");
798 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
799 result
= LLVMBuildFPToUI(ctx
->ac
.builder
, src
[0], def_type
, "");
804 result
= LLVMBuildSIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
809 result
= LLVMBuildUIToFP(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
811 case nir_op_f2f16_rtz
:
812 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
813 if (LLVMTypeOf(src
[0]) == ctx
->ac
.f64
)
814 src
[0] = LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ctx
->ac
.f32
, "");
815 LLVMValueRef param
[2] = { src
[0], ctx
->ac
.f32_0
};
816 result
= ac_build_cvt_pkrtz_f16(&ctx
->ac
, param
);
817 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
819 case nir_op_f2f16_rtne
:
823 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
824 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
825 result
= LLVMBuildFPExt(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
827 result
= LLVMBuildFPTrunc(ctx
->ac
.builder
, src
[0], ac_to_float_type(&ctx
->ac
, def_type
), "");
833 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
834 result
= LLVMBuildZExt(ctx
->ac
.builder
, src
[0], def_type
, "");
836 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
842 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
[0])) < ac_get_elem_bits(&ctx
->ac
, def_type
))
843 result
= LLVMBuildSExt(ctx
->ac
.builder
, src
[0], def_type
, "");
845 result
= LLVMBuildTrunc(ctx
->ac
.builder
, src
[0], def_type
, "");
848 result
= emit_bcsel(&ctx
->ac
, src
[0], src
[1], src
[2]);
850 case nir_op_find_lsb
:
851 result
= ac_find_lsb(&ctx
->ac
, ctx
->ac
.i32
, src
[0]);
853 case nir_op_ufind_msb
:
854 result
= ac_build_umsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
856 case nir_op_ifind_msb
:
857 result
= ac_build_imsb(&ctx
->ac
, src
[0], ctx
->ac
.i32
);
859 case nir_op_uadd_carry
:
860 result
= emit_uint_carry(&ctx
->ac
, "llvm.uadd.with.overflow.i32", src
[0], src
[1]);
862 case nir_op_usub_borrow
:
863 result
= emit_uint_carry(&ctx
->ac
, "llvm.usub.with.overflow.i32", src
[0], src
[1]);
868 result
= emit_b2f(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
871 result
= emit_f2b(&ctx
->ac
, src
[0]);
877 result
= emit_b2i(&ctx
->ac
, src
[0], instr
->dest
.dest
.ssa
.bit_size
);
880 result
= emit_i2b(&ctx
->ac
, src
[0]);
882 case nir_op_fquantize2f16
:
883 result
= emit_f2f16(&ctx
->ac
, src
[0]);
885 case nir_op_umul_high
:
886 result
= emit_umul_high(&ctx
->ac
, src
[0], src
[1]);
888 case nir_op_imul_high
:
889 result
= emit_imul_high(&ctx
->ac
, src
[0], src
[1]);
891 case nir_op_pack_half_2x16
:
892 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pkrtz_f16
);
894 case nir_op_pack_snorm_2x16
:
895 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_i16
);
897 case nir_op_pack_unorm_2x16
:
898 result
= emit_pack_2x16(&ctx
->ac
, src
[0], ac_build_cvt_pknorm_u16
);
900 case nir_op_unpack_half_2x16
:
901 result
= emit_unpack_half_2x16(&ctx
->ac
, src
[0]);
905 case nir_op_fddx_fine
:
906 case nir_op_fddy_fine
:
907 case nir_op_fddx_coarse
:
908 case nir_op_fddy_coarse
:
909 result
= emit_ddxy(ctx
, instr
->op
, src
[0]);
912 case nir_op_unpack_64_2x32_split_x
: {
913 assert(ac_get_llvm_num_components(src
[0]) == 1);
914 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
917 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
922 case nir_op_unpack_64_2x32_split_y
: {
923 assert(ac_get_llvm_num_components(src
[0]) == 1);
924 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
927 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
932 case nir_op_pack_64_2x32_split
: {
933 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
934 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i64
, "");
938 case nir_op_pack_32_2x16_split
: {
939 LLVMValueRef tmp
= ac_build_gather_values(&ctx
->ac
, src
, 2);
940 result
= LLVMBuildBitCast(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
944 case nir_op_unpack_32_2x16_split_x
: {
945 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
948 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
953 case nir_op_unpack_32_2x16_split_y
: {
954 LLVMValueRef tmp
= LLVMBuildBitCast(ctx
->ac
.builder
, src
[0],
957 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, tmp
,
962 case nir_op_cube_face_coord
: {
963 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
964 LLVMValueRef results
[2];
966 for (unsigned chan
= 0; chan
< 3; chan
++)
967 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
968 results
[0] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubesc",
969 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
970 results
[1] = ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubetc",
971 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
972 LLVMValueRef ma
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubema",
973 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
974 results
[0] = ac_build_fdiv(&ctx
->ac
, results
[0], ma
);
975 results
[1] = ac_build_fdiv(&ctx
->ac
, results
[1], ma
);
976 LLVMValueRef offset
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
977 results
[0] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[0], offset
, "");
978 results
[1] = LLVMBuildFAdd(ctx
->ac
.builder
, results
[1], offset
, "");
979 result
= ac_build_gather_values(&ctx
->ac
, results
, 2);
983 case nir_op_cube_face_index
: {
984 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
986 for (unsigned chan
= 0; chan
< 3; chan
++)
987 in
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src
[0], chan
);
988 result
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.cubeid",
989 ctx
->ac
.f32
, in
, 3, AC_FUNC_ATTR_READNONE
);
994 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
995 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
996 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.minnum",
997 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1000 result
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1001 result
= ac_build_umin(&ctx
->ac
, result
, src
[2]);
1004 result
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1005 result
= ac_build_imin(&ctx
->ac
, result
, src
[2]);
1008 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1009 ac_to_float_type(&ctx
->ac
, def_type
), src
[0], src
[1]);
1010 result
= emit_intrin_2f_param(&ctx
->ac
, "llvm.maxnum",
1011 ac_to_float_type(&ctx
->ac
, def_type
), result
, src
[2]);
1014 result
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1015 result
= ac_build_umax(&ctx
->ac
, result
, src
[2]);
1018 result
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1019 result
= ac_build_imax(&ctx
->ac
, result
, src
[2]);
1021 case nir_op_fmed3
: {
1022 src
[0] = ac_to_float(&ctx
->ac
, src
[0]);
1023 src
[1] = ac_to_float(&ctx
->ac
, src
[1]);
1024 src
[2] = ac_to_float(&ctx
->ac
, src
[2]);
1025 result
= ac_build_fmed3(&ctx
->ac
, src
[0], src
[1], src
[2],
1026 instr
->dest
.dest
.ssa
.bit_size
);
1029 case nir_op_imed3
: {
1030 LLVMValueRef tmp1
= ac_build_imin(&ctx
->ac
, src
[0], src
[1]);
1031 LLVMValueRef tmp2
= ac_build_imax(&ctx
->ac
, src
[0], src
[1]);
1032 tmp2
= ac_build_imin(&ctx
->ac
, tmp2
, src
[2]);
1033 result
= ac_build_imax(&ctx
->ac
, tmp1
, tmp2
);
1036 case nir_op_umed3
: {
1037 LLVMValueRef tmp1
= ac_build_umin(&ctx
->ac
, src
[0], src
[1]);
1038 LLVMValueRef tmp2
= ac_build_umax(&ctx
->ac
, src
[0], src
[1]);
1039 tmp2
= ac_build_umin(&ctx
->ac
, tmp2
, src
[2]);
1040 result
= ac_build_umax(&ctx
->ac
, tmp1
, tmp2
);
1045 fprintf(stderr
, "Unknown NIR alu instr: ");
1046 nir_print_instr(&instr
->instr
, stderr
);
1047 fprintf(stderr
, "\n");
1052 assert(instr
->dest
.dest
.is_ssa
);
1053 result
= ac_to_integer_or_pointer(&ctx
->ac
, result
);
1054 ctx
->ssa_defs
[instr
->dest
.dest
.ssa
.index
] = result
;
1058 static void visit_load_const(struct ac_nir_context
*ctx
,
1059 const nir_load_const_instr
*instr
)
1061 LLVMValueRef values
[4], value
= NULL
;
1062 LLVMTypeRef element_type
=
1063 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
1065 for (unsigned i
= 0; i
< instr
->def
.num_components
; ++i
) {
1066 switch (instr
->def
.bit_size
) {
1068 values
[i
] = LLVMConstInt(element_type
,
1069 instr
->value
[i
].u8
, false);
1072 values
[i
] = LLVMConstInt(element_type
,
1073 instr
->value
[i
].u16
, false);
1076 values
[i
] = LLVMConstInt(element_type
,
1077 instr
->value
[i
].u32
, false);
1080 values
[i
] = LLVMConstInt(element_type
,
1081 instr
->value
[i
].u64
, false);
1085 "unsupported nir load_const bit_size: %d\n",
1086 instr
->def
.bit_size
);
1090 if (instr
->def
.num_components
> 1) {
1091 value
= LLVMConstVector(values
, instr
->def
.num_components
);
1095 ctx
->ssa_defs
[instr
->def
.index
] = value
;
1099 get_buffer_size(struct ac_nir_context
*ctx
, LLVMValueRef descriptor
, bool in_elements
)
1102 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1103 LLVMConstInt(ctx
->ac
.i32
, 2, false), "");
1106 if (ctx
->ac
.chip_class
== GFX8
&& in_elements
) {
1107 /* On GFX8, the descriptor contains the size in bytes,
1108 * but TXQ must return the size in elements.
1109 * The stride is always non-zero for resources using TXQ.
1111 LLVMValueRef stride
=
1112 LLVMBuildExtractElement(ctx
->ac
.builder
, descriptor
,
1114 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
,
1115 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
1116 stride
= LLVMBuildAnd(ctx
->ac
.builder
, stride
,
1117 LLVMConstInt(ctx
->ac
.i32
, 0x3fff, false), "");
1119 size
= LLVMBuildUDiv(ctx
->ac
.builder
, size
, stride
, "");
1124 /* Gather4 should follow the same rules as bilinear filtering, but the hardware
1125 * incorrectly forces nearest filtering if the texture format is integer.
1126 * The only effect it has on Gather4, which always returns 4 texels for
1127 * bilinear filtering, is that the final coordinates are off by 0.5 of
1130 * The workaround is to subtract 0.5 from the unnormalized coordinates,
1131 * or (0.5 / size) from the normalized coordinates.
1133 * However, cube textures with 8_8_8_8 data formats require a different
1134 * workaround of overriding the num format to USCALED/SSCALED. This would lose
1135 * precision in 32-bit data formats, so it needs to be applied dynamically at
1136 * runtime. In this case, return an i1 value that indicates whether the
1137 * descriptor was overridden (and hence a fixup of the sampler result is needed).
1139 static LLVMValueRef
lower_gather4_integer(struct ac_llvm_context
*ctx
,
1141 struct ac_image_args
*args
,
1142 const nir_tex_instr
*instr
)
1144 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1145 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1146 LLVMValueRef wa_8888
= NULL
;
1147 LLVMValueRef half_texel
[2];
1148 LLVMValueRef result
;
1150 assert(stype
== GLSL_TYPE_INT
|| stype
== GLSL_TYPE_UINT
);
1152 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1153 LLVMValueRef formats
;
1154 LLVMValueRef data_format
;
1155 LLVMValueRef wa_formats
;
1157 formats
= LLVMBuildExtractElement(ctx
->builder
, args
->resource
, ctx
->i32_1
, "");
1159 data_format
= LLVMBuildLShr(ctx
->builder
, formats
,
1160 LLVMConstInt(ctx
->i32
, 20, false), "");
1161 data_format
= LLVMBuildAnd(ctx
->builder
, data_format
,
1162 LLVMConstInt(ctx
->i32
, (1u << 6) - 1, false), "");
1163 wa_8888
= LLVMBuildICmp(
1164 ctx
->builder
, LLVMIntEQ
, data_format
,
1165 LLVMConstInt(ctx
->i32
, V_008F14_IMG_DATA_FORMAT_8_8_8_8
, false),
1168 uint32_t wa_num_format
=
1169 stype
== GLSL_TYPE_UINT
?
1170 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED
) :
1171 S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED
);
1172 wa_formats
= LLVMBuildAnd(ctx
->builder
, formats
,
1173 LLVMConstInt(ctx
->i32
, C_008F14_NUM_FORMAT
, false),
1175 wa_formats
= LLVMBuildOr(ctx
->builder
, wa_formats
,
1176 LLVMConstInt(ctx
->i32
, wa_num_format
, false), "");
1178 formats
= LLVMBuildSelect(ctx
->builder
, wa_8888
, wa_formats
, formats
, "");
1179 args
->resource
= LLVMBuildInsertElement(
1180 ctx
->builder
, args
->resource
, formats
, ctx
->i32_1
, "");
1183 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
) {
1185 half_texel
[0] = half_texel
[1] = LLVMConstReal(ctx
->f32
, -0.5);
1187 struct ac_image_args resinfo
= {};
1188 LLVMBasicBlockRef bbs
[2];
1190 LLVMValueRef unnorm
= NULL
;
1191 LLVMValueRef default_offset
= ctx
->f32_0
;
1192 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
&&
1194 /* In vulkan, whether the sampler uses unnormalized
1195 * coordinates or not is a dynamic property of the
1196 * sampler. Hence, to figure out whether or not we
1197 * need to divide by the texture size, we need to test
1198 * the sampler at runtime. This tests the bit set by
1199 * radv_init_sampler().
1201 LLVMValueRef sampler0
=
1202 LLVMBuildExtractElement(ctx
->builder
, args
->sampler
, ctx
->i32_0
, "");
1203 sampler0
= LLVMBuildLShr(ctx
->builder
, sampler0
,
1204 LLVMConstInt(ctx
->i32
, 15, false), "");
1205 sampler0
= LLVMBuildAnd(ctx
->builder
, sampler0
, ctx
->i32_1
, "");
1206 unnorm
= LLVMBuildICmp(ctx
->builder
, LLVMIntEQ
, sampler0
, ctx
->i32_1
, "");
1207 default_offset
= LLVMConstReal(ctx
->f32
, -0.5);
1210 bbs
[0] = LLVMGetInsertBlock(ctx
->builder
);
1211 if (wa_8888
|| unnorm
) {
1212 assert(!(wa_8888
&& unnorm
));
1213 LLVMValueRef not_needed
= wa_8888
? wa_8888
: unnorm
;
1214 /* Skip the texture size query entirely if we don't need it. */
1215 ac_build_ifcc(ctx
, LLVMBuildNot(ctx
->builder
, not_needed
, ""), 2000);
1216 bbs
[1] = LLVMGetInsertBlock(ctx
->builder
);
1219 /* Query the texture size. */
1220 resinfo
.dim
= ac_get_sampler_dim(ctx
->chip_class
, instr
->sampler_dim
, instr
->is_array
);
1221 resinfo
.opcode
= ac_image_get_resinfo
;
1222 resinfo
.dmask
= 0xf;
1223 resinfo
.lod
= ctx
->i32_0
;
1224 resinfo
.resource
= args
->resource
;
1225 resinfo
.attributes
= AC_FUNC_ATTR_READNONE
;
1226 LLVMValueRef size
= ac_build_image_opcode(ctx
, &resinfo
);
1228 /* Compute -0.5 / size. */
1229 for (unsigned c
= 0; c
< 2; c
++) {
1231 LLVMBuildExtractElement(ctx
->builder
, size
,
1232 LLVMConstInt(ctx
->i32
, c
, 0), "");
1233 half_texel
[c
] = LLVMBuildUIToFP(ctx
->builder
, half_texel
[c
], ctx
->f32
, "");
1234 half_texel
[c
] = ac_build_fdiv(ctx
, ctx
->f32_1
, half_texel
[c
]);
1235 half_texel
[c
] = LLVMBuildFMul(ctx
->builder
, half_texel
[c
],
1236 LLVMConstReal(ctx
->f32
, -0.5), "");
1239 if (wa_8888
|| unnorm
) {
1240 ac_build_endif(ctx
, 2000);
1242 for (unsigned c
= 0; c
< 2; c
++) {
1243 LLVMValueRef values
[2] = { default_offset
, half_texel
[c
] };
1244 half_texel
[c
] = ac_build_phi(ctx
, ctx
->f32
, 2,
1250 for (unsigned c
= 0; c
< 2; c
++) {
1252 tmp
= LLVMBuildBitCast(ctx
->builder
, args
->coords
[c
], ctx
->f32
, "");
1253 args
->coords
[c
] = LLVMBuildFAdd(ctx
->builder
, tmp
, half_texel
[c
], "");
1256 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1257 result
= ac_build_image_opcode(ctx
, args
);
1259 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
) {
1260 LLVMValueRef tmp
, tmp2
;
1262 /* if the cube workaround is in place, f2i the result. */
1263 for (unsigned c
= 0; c
< 4; c
++) {
1264 tmp
= LLVMBuildExtractElement(ctx
->builder
, result
, LLVMConstInt(ctx
->i32
, c
, false), "");
1265 if (stype
== GLSL_TYPE_UINT
)
1266 tmp2
= LLVMBuildFPToUI(ctx
->builder
, tmp
, ctx
->i32
, "");
1268 tmp2
= LLVMBuildFPToSI(ctx
->builder
, tmp
, ctx
->i32
, "");
1269 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->i32
, "");
1270 tmp2
= LLVMBuildBitCast(ctx
->builder
, tmp2
, ctx
->i32
, "");
1271 tmp
= LLVMBuildSelect(ctx
->builder
, wa_8888
, tmp2
, tmp
, "");
1272 tmp
= LLVMBuildBitCast(ctx
->builder
, tmp
, ctx
->f32
, "");
1273 result
= LLVMBuildInsertElement(ctx
->builder
, result
, tmp
, LLVMConstInt(ctx
->i32
, c
, false), "");
1279 static nir_deref_instr
*get_tex_texture_deref(const nir_tex_instr
*instr
)
1281 nir_deref_instr
*texture_deref_instr
= NULL
;
1283 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1284 switch (instr
->src
[i
].src_type
) {
1285 case nir_tex_src_texture_deref
:
1286 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
1292 return texture_deref_instr
;
1295 static LLVMValueRef
build_tex_intrinsic(struct ac_nir_context
*ctx
,
1296 const nir_tex_instr
*instr
,
1297 struct ac_image_args
*args
)
1299 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
1300 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
1302 return ac_build_buffer_load_format(&ctx
->ac
,
1306 util_last_bit(mask
),
1310 args
->opcode
= ac_image_sample
;
1312 switch (instr
->op
) {
1314 case nir_texop_txf_ms
:
1315 case nir_texop_samples_identical
:
1316 args
->opcode
= args
->level_zero
||
1317 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
?
1318 ac_image_load
: ac_image_load_mip
;
1319 args
->level_zero
= false;
1322 case nir_texop_query_levels
:
1323 args
->opcode
= ac_image_get_resinfo
;
1325 args
->lod
= ctx
->ac
.i32_0
;
1326 args
->level_zero
= false;
1329 if (ctx
->stage
!= MESA_SHADER_FRAGMENT
) {
1331 args
->level_zero
= true;
1335 args
->opcode
= ac_image_gather4
;
1336 args
->level_zero
= true;
1339 args
->opcode
= ac_image_get_lod
;
1345 if (instr
->op
== nir_texop_tg4
&& ctx
->ac
.chip_class
<= GFX8
) {
1346 nir_deref_instr
*texture_deref_instr
= get_tex_texture_deref(instr
);
1347 nir_variable
*var
= nir_deref_instr_get_variable(texture_deref_instr
);
1348 const struct glsl_type
*type
= glsl_without_array(var
->type
);
1349 enum glsl_base_type stype
= glsl_get_sampler_result_type(type
);
1350 if (stype
== GLSL_TYPE_UINT
|| stype
== GLSL_TYPE_INT
) {
1351 return lower_gather4_integer(&ctx
->ac
, var
, args
, instr
);
1355 /* Fixup for GFX9 which allocates 1D textures as 2D. */
1356 if (instr
->op
== nir_texop_lod
&& ctx
->ac
.chip_class
== GFX9
) {
1357 if ((args
->dim
== ac_image_2darray
||
1358 args
->dim
== ac_image_2d
) && !args
->coords
[1]) {
1359 args
->coords
[1] = ctx
->ac
.i32_0
;
1363 args
->attributes
= AC_FUNC_ATTR_READNONE
;
1364 bool cs_derivs
= ctx
->stage
== MESA_SHADER_COMPUTE
&&
1365 ctx
->info
->cs
.derivative_group
!= DERIVATIVE_GROUP_NONE
;
1366 if (ctx
->stage
== MESA_SHADER_FRAGMENT
|| cs_derivs
) {
1367 /* Prevent texture instructions with implicit derivatives from being
1368 * sinked into branches. */
1369 switch (instr
->op
) {
1373 args
->attributes
|= AC_FUNC_ATTR_CONVERGENT
;
1380 return ac_build_image_opcode(&ctx
->ac
, args
);
1383 static LLVMValueRef
visit_vulkan_resource_reindex(struct ac_nir_context
*ctx
,
1384 nir_intrinsic_instr
*instr
)
1386 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
1387 LLVMValueRef index
= get_src(ctx
, instr
->src
[1]);
1389 LLVMValueRef result
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
1390 LLVMSetMetadata(result
, ctx
->ac
.uniform_md_kind
, ctx
->ac
.empty_md
);
1394 static LLVMValueRef
visit_load_push_constant(struct ac_nir_context
*ctx
,
1395 nir_intrinsic_instr
*instr
)
1397 LLVMValueRef ptr
, addr
;
1398 LLVMValueRef src0
= get_src(ctx
, instr
->src
[0]);
1399 unsigned index
= nir_intrinsic_base(instr
);
1401 addr
= LLVMConstInt(ctx
->ac
.i32
, index
, 0);
1402 addr
= LLVMBuildAdd(ctx
->ac
.builder
, addr
, src0
, "");
1404 /* Load constant values from user SGPRS when possible, otherwise
1405 * fallback to the default path that loads directly from memory.
1407 if (LLVMIsConstant(src0
) &&
1408 instr
->dest
.ssa
.bit_size
== 32) {
1409 unsigned count
= instr
->dest
.ssa
.num_components
;
1410 unsigned offset
= index
;
1412 offset
+= LLVMConstIntGetZExtValue(src0
);
1415 offset
-= ctx
->abi
->base_inline_push_consts
;
1417 if (offset
+ count
<= ctx
->abi
->num_inline_push_consts
) {
1418 return ac_build_gather_values(&ctx
->ac
,
1419 ctx
->abi
->inline_push_consts
+ offset
,
1424 ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ctx
->abi
->push_constants
, &addr
, 1, "");
1426 if (instr
->dest
.ssa
.bit_size
== 8) {
1427 unsigned load_dwords
= instr
->dest
.ssa
.num_components
> 1 ? 2 : 1;
1428 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), 4 * load_dwords
);
1429 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1430 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1432 LLVMValueRef params
[3];
1433 if (load_dwords
> 1) {
1434 LLVMValueRef res_vec
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(ctx
->ac
.i32
, 2), "");
1435 params
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 1, false), "");
1436 params
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, res_vec
, LLVMConstInt(ctx
->ac
.i32
, 0, false), "");
1438 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, ctx
->ac
.i32
, "");
1439 params
[0] = ctx
->ac
.i32_0
;
1443 res
= ac_build_intrinsic(&ctx
->ac
, "llvm.amdgcn.alignbyte", ctx
->ac
.i32
, params
, 3, 0);
1445 res
= LLVMBuildTrunc(ctx
->ac
.builder
, res
, LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.num_components
* 8), "");
1446 if (instr
->dest
.ssa
.num_components
> 1)
1447 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, LLVMVectorType(LLVMInt8TypeInContext(ctx
->ac
.context
), instr
->dest
.ssa
.num_components
), "");
1449 } else if (instr
->dest
.ssa
.bit_size
== 16) {
1450 unsigned load_dwords
= instr
->dest
.ssa
.num_components
/ 2 + 1;
1451 LLVMTypeRef vec_type
= LLVMVectorType(LLVMInt16TypeInContext(ctx
->ac
.context
), 2 * load_dwords
);
1452 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, vec_type
);
1453 LLVMValueRef res
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1454 res
= LLVMBuildBitCast(ctx
->ac
.builder
, res
, vec_type
, "");
1455 LLVMValueRef cond
= LLVMBuildLShr(ctx
->ac
.builder
, addr
, ctx
->ac
.i32_1
, "");
1456 cond
= LLVMBuildTrunc(ctx
->ac
.builder
, cond
, ctx
->ac
.i1
, "");
1457 LLVMValueRef mask
[] = { LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
1458 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
1459 LLVMConstInt(ctx
->ac
.i32
, 4, false)};
1460 LLVMValueRef swizzle_aligned
= LLVMConstVector(&mask
[0], instr
->dest
.ssa
.num_components
);
1461 LLVMValueRef swizzle_unaligned
= LLVMConstVector(&mask
[1], instr
->dest
.ssa
.num_components
);
1462 LLVMValueRef shuffle_aligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_aligned
, "");
1463 LLVMValueRef shuffle_unaligned
= LLVMBuildShuffleVector(ctx
->ac
.builder
, res
, res
, swizzle_unaligned
, "");
1464 res
= LLVMBuildSelect(ctx
->ac
.builder
, cond
, shuffle_unaligned
, shuffle_aligned
, "");
1465 return LLVMBuildBitCast(ctx
->ac
.builder
, res
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
1468 ptr
= ac_cast_ptr(&ctx
->ac
, ptr
, get_def_type(ctx
, &instr
->dest
.ssa
));
1470 return LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
1473 static LLVMValueRef
visit_get_buffer_size(struct ac_nir_context
*ctx
,
1474 const nir_intrinsic_instr
*instr
)
1476 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
1478 return get_buffer_size(ctx
, ctx
->abi
->load_ssbo(ctx
->abi
, index
, false), false);
1481 static uint32_t widen_mask(uint32_t mask
, unsigned multiplier
)
1483 uint32_t new_mask
= 0;
1484 for(unsigned i
= 0; i
< 32 && (1u << i
) <= mask
; ++i
)
1485 if (mask
& (1u << i
))
1486 new_mask
|= ((1u << multiplier
) - 1u) << (i
* multiplier
);
1490 static LLVMValueRef
extract_vector_range(struct ac_llvm_context
*ctx
, LLVMValueRef src
,
1491 unsigned start
, unsigned count
)
1493 LLVMValueRef mask
[] = {
1494 ctx
->i32_0
, ctx
->i32_1
,
1495 LLVMConstInt(ctx
->i32
, 2, false), LLVMConstInt(ctx
->i32
, 3, false) };
1497 unsigned src_elements
= ac_get_llvm_num_components(src
);
1499 if (count
== src_elements
) {
1502 } else if (count
== 1) {
1503 assert(start
< src_elements
);
1504 return LLVMBuildExtractElement(ctx
->builder
, src
, mask
[start
], "");
1506 assert(start
+ count
<= src_elements
);
1508 LLVMValueRef swizzle
= LLVMConstVector(&mask
[start
], count
);
1509 return LLVMBuildShuffleVector(ctx
->builder
, src
, src
, swizzle
, "");
1513 static unsigned get_cache_policy(struct ac_nir_context
*ctx
,
1514 enum gl_access_qualifier access
,
1515 bool may_store_unaligned
,
1516 bool writeonly_memory
)
1518 unsigned cache_policy
= 0;
1520 /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores. All
1521 * store opcodes not aligned to a dword are affected. The only way to
1522 * get unaligned stores is through shader images.
1524 if (((may_store_unaligned
&& ctx
->ac
.chip_class
== GFX6
) ||
1525 /* If this is write-only, don't keep data in L1 to prevent
1526 * evicting L1 cache lines that may be needed by other
1530 access
& (ACCESS_COHERENT
| ACCESS_VOLATILE
))) {
1531 cache_policy
|= ac_glc
;
1534 if (access
& ACCESS_STREAM_CACHE_POLICY
)
1535 cache_policy
|= ac_slc
;
1537 return cache_policy
;
1540 static void visit_store_ssbo(struct ac_nir_context
*ctx
,
1541 nir_intrinsic_instr
*instr
)
1543 LLVMValueRef src_data
= get_src(ctx
, instr
->src
[0]);
1544 int elem_size_bytes
= ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src_data
)) / 8;
1545 unsigned writemask
= nir_intrinsic_write_mask(instr
);
1546 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1547 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
1548 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, writeonly_memory
);
1550 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
,
1551 get_src(ctx
, instr
->src
[1]), true);
1552 LLVMValueRef base_data
= src_data
;
1553 base_data
= ac_trim_vector(&ctx
->ac
, base_data
, instr
->num_components
);
1554 LLVMValueRef base_offset
= get_src(ctx
, instr
->src
[2]);
1558 LLVMValueRef data
, offset
;
1559 LLVMTypeRef data_type
;
1561 u_bit_scan_consecutive_range(&writemask
, &start
, &count
);
1563 /* Due to an LLVM limitation with LLVM < 9, split 3-element
1564 * writes into a 2-element and a 1-element write. */
1566 (elem_size_bytes
!= 4 || !ac_has_vec3_support(ctx
->ac
.chip_class
, false))) {
1567 writemask
|= 1 << (start
+ 2);
1570 int num_bytes
= count
* elem_size_bytes
; /* count in bytes */
1572 /* we can only store 4 DWords at the same time.
1573 * can only happen for 64 Bit vectors. */
1574 if (num_bytes
> 16) {
1575 writemask
|= ((1u << (count
- 2)) - 1u) << (start
+ 2);
1580 /* check alignment of 16 Bit stores */
1581 if (elem_size_bytes
== 2 && num_bytes
> 2 && (start
% 2) == 1) {
1582 writemask
|= ((1u << (count
- 1)) - 1u) << (start
+ 1);
1586 data
= extract_vector_range(&ctx
->ac
, base_data
, start
, count
);
1588 offset
= LLVMBuildAdd(ctx
->ac
.builder
, base_offset
,
1589 LLVMConstInt(ctx
->ac
.i32
, start
* elem_size_bytes
, false), "");
1591 if (num_bytes
== 1) {
1592 ac_build_tbuffer_store_byte(&ctx
->ac
, rsrc
, data
,
1593 offset
, ctx
->ac
.i32_0
,
1595 } else if (num_bytes
== 2) {
1596 ac_build_tbuffer_store_short(&ctx
->ac
, rsrc
, data
,
1597 offset
, ctx
->ac
.i32_0
,
1600 int num_channels
= num_bytes
/ 4;
1602 switch (num_bytes
) {
1603 case 16: /* v4f32 */
1604 data_type
= ctx
->ac
.v4f32
;
1606 case 12: /* v3f32 */
1607 data_type
= ctx
->ac
.v3f32
;
1610 data_type
= ctx
->ac
.v2f32
;
1613 data_type
= ctx
->ac
.f32
;
1616 unreachable("Malformed vector store.");
1618 data
= LLVMBuildBitCast(ctx
->ac
.builder
, data
, data_type
, "");
1620 ac_build_buffer_store_dword(&ctx
->ac
, rsrc
, data
,
1621 num_channels
, offset
,
1623 cache_policy
, false);
1628 static LLVMValueRef
emit_ssbo_comp_swap_64(struct ac_nir_context
*ctx
,
1629 LLVMValueRef descriptor
,
1630 LLVMValueRef offset
,
1631 LLVMValueRef compare
,
1632 LLVMValueRef exchange
)
1634 LLVMBasicBlockRef start_block
= NULL
, then_block
= NULL
;
1635 if (ctx
->abi
->robust_buffer_access
) {
1636 LLVMValueRef size
= ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 2);
1638 LLVMValueRef cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntULT
, offset
, size
, "");
1639 start_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1641 ac_build_ifcc(&ctx
->ac
, cond
, -1);
1643 then_block
= LLVMGetInsertBlock(ctx
->ac
.builder
);
1646 LLVMValueRef ptr_parts
[2] = {
1647 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 0),
1648 LLVMBuildAnd(ctx
->ac
.builder
,
1649 ac_llvm_extract_elem(&ctx
->ac
, descriptor
, 1),
1650 LLVMConstInt(ctx
->ac
.i32
, 65535, 0), "")
1653 ptr_parts
[1] = LLVMBuildTrunc(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i16
, "");
1654 ptr_parts
[1] = LLVMBuildSExt(ctx
->ac
.builder
, ptr_parts
[1], ctx
->ac
.i32
, "");
1656 offset
= LLVMBuildZExt(ctx
->ac
.builder
, offset
, ctx
->ac
.i64
, "");
1658 LLVMValueRef ptr
= ac_build_gather_values(&ctx
->ac
, ptr_parts
, 2);
1659 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
, ctx
->ac
.i64
, "");
1660 ptr
= LLVMBuildAdd(ctx
->ac
.builder
, ptr
, offset
, "");
1661 ptr
= LLVMBuildIntToPtr(ctx
->ac
.builder
, ptr
, LLVMPointerType(ctx
->ac
.i64
, AC_ADDR_SPACE_GLOBAL
), "");
1663 LLVMValueRef result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, compare
, exchange
, "singlethread-one-as");
1664 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
1666 if (ctx
->abi
->robust_buffer_access
) {
1667 ac_build_endif(&ctx
->ac
, -1);
1669 LLVMBasicBlockRef incoming_blocks
[2] = {
1674 LLVMValueRef incoming_values
[2] = {
1675 LLVMConstInt(ctx
->ac
.i64
, 0, 0),
1678 LLVMValueRef ret
= LLVMBuildPhi(ctx
->ac
.builder
, ctx
->ac
.i64
, "");
1679 LLVMAddIncoming(ret
, incoming_values
, incoming_blocks
, 2);
1686 static LLVMValueRef
visit_atomic_ssbo(struct ac_nir_context
*ctx
,
1687 const nir_intrinsic_instr
*instr
)
1689 LLVMTypeRef return_type
= LLVMTypeOf(get_src(ctx
, instr
->src
[2]));
1691 char name
[64], type
[8];
1692 LLVMValueRef params
[6], descriptor
;
1695 switch (instr
->intrinsic
) {
1696 case nir_intrinsic_ssbo_atomic_add
:
1699 case nir_intrinsic_ssbo_atomic_imin
:
1702 case nir_intrinsic_ssbo_atomic_umin
:
1705 case nir_intrinsic_ssbo_atomic_imax
:
1708 case nir_intrinsic_ssbo_atomic_umax
:
1711 case nir_intrinsic_ssbo_atomic_and
:
1714 case nir_intrinsic_ssbo_atomic_or
:
1717 case nir_intrinsic_ssbo_atomic_xor
:
1720 case nir_intrinsic_ssbo_atomic_exchange
:
1723 case nir_intrinsic_ssbo_atomic_comp_swap
:
1730 descriptor
= ctx
->abi
->load_ssbo(ctx
->abi
,
1731 get_src(ctx
, instr
->src
[0]),
1734 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
&&
1735 return_type
== ctx
->ac
.i64
) {
1736 return emit_ssbo_comp_swap_64(ctx
, descriptor
,
1737 get_src(ctx
, instr
->src
[1]),
1738 get_src(ctx
, instr
->src
[2]),
1739 get_src(ctx
, instr
->src
[3]));
1741 if (instr
->intrinsic
== nir_intrinsic_ssbo_atomic_comp_swap
) {
1742 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[3]), 0);
1744 params
[arg_count
++] = ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
1745 params
[arg_count
++] = descriptor
;
1747 if (LLVM_VERSION_MAJOR
>= 9) {
1748 /* XXX: The new raw/struct atomic intrinsics are buggy with
1749 * LLVM 8, see r358579.
1751 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1752 params
[arg_count
++] = ctx
->ac
.i32_0
; /* soffset */
1753 params
[arg_count
++] = ctx
->ac
.i32_0
; /* slc */
1755 ac_build_type_name_for_intr(return_type
, type
, sizeof(type
));
1756 snprintf(name
, sizeof(name
),
1757 "llvm.amdgcn.raw.buffer.atomic.%s.%s", op
, type
);
1759 params
[arg_count
++] = ctx
->ac
.i32_0
; /* vindex */
1760 params
[arg_count
++] = get_src(ctx
, instr
->src
[1]); /* voffset */
1761 params
[arg_count
++] = ctx
->ac
.i1false
; /* slc */
1763 assert(return_type
== ctx
->ac
.i32
);
1764 snprintf(name
, sizeof(name
),
1765 "llvm.amdgcn.buffer.atomic.%s", op
);
1768 return ac_build_intrinsic(&ctx
->ac
, name
, return_type
, params
,
1772 static LLVMValueRef
visit_load_buffer(struct ac_nir_context
*ctx
,
1773 const nir_intrinsic_instr
*instr
)
1775 int elem_size_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1776 int num_components
= instr
->num_components
;
1777 enum gl_access_qualifier access
= nir_intrinsic_access(instr
);
1778 unsigned cache_policy
= get_cache_policy(ctx
, access
, false, false);
1780 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1781 LLVMValueRef rsrc
= ctx
->abi
->load_ssbo(ctx
->abi
,
1782 get_src(ctx
, instr
->src
[0]), false);
1783 LLVMValueRef vindex
= ctx
->ac
.i32_0
;
1785 LLVMTypeRef def_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1786 LLVMTypeRef def_elem_type
= num_components
> 1 ? LLVMGetElementType(def_type
) : def_type
;
1788 LLVMValueRef results
[4];
1789 for (int i
= 0; i
< num_components
;) {
1790 int num_elems
= num_components
- i
;
1791 if (elem_size_bytes
< 4 && nir_intrinsic_align(instr
) % 4 != 0)
1793 if (num_elems
* elem_size_bytes
> 16)
1794 num_elems
= 16 / elem_size_bytes
;
1795 int load_bytes
= num_elems
* elem_size_bytes
;
1797 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
, i
* elem_size_bytes
, false);
1801 if (load_bytes
== 1) {
1802 ret
= ac_build_tbuffer_load_byte(&ctx
->ac
,
1808 } else if (load_bytes
== 2) {
1809 ret
= ac_build_tbuffer_load_short(&ctx
->ac
,
1816 int num_channels
= util_next_power_of_two(load_bytes
) / 4;
1817 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
1819 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_channels
,
1820 vindex
, offset
, immoffset
, 0,
1821 cache_policy
, can_speculate
, false);
1824 LLVMTypeRef byte_vec
= LLVMVectorType(ctx
->ac
.i8
, ac_get_type_size(LLVMTypeOf(ret
)));
1825 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, byte_vec
, "");
1826 ret
= ac_trim_vector(&ctx
->ac
, ret
, load_bytes
);
1828 LLVMTypeRef ret_type
= LLVMVectorType(def_elem_type
, num_elems
);
1829 ret
= LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ret_type
, "");
1831 for (unsigned j
= 0; j
< num_elems
; j
++) {
1832 results
[i
+ j
] = LLVMBuildExtractElement(ctx
->ac
.builder
, ret
, LLVMConstInt(ctx
->ac
.i32
, j
, false), "");
1837 return ac_build_gather_values(&ctx
->ac
, results
, num_components
);
1840 static LLVMValueRef
visit_load_ubo_buffer(struct ac_nir_context
*ctx
,
1841 const nir_intrinsic_instr
*instr
)
1844 LLVMValueRef rsrc
= get_src(ctx
, instr
->src
[0]);
1845 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
1846 int num_components
= instr
->num_components
;
1848 if (ctx
->abi
->load_ubo
)
1849 rsrc
= ctx
->abi
->load_ubo(ctx
->abi
, rsrc
);
1851 if (instr
->dest
.ssa
.bit_size
== 64)
1852 num_components
*= 2;
1854 if (instr
->dest
.ssa
.bit_size
== 16 || instr
->dest
.ssa
.bit_size
== 8) {
1855 unsigned load_bytes
= instr
->dest
.ssa
.bit_size
/ 8;
1856 LLVMValueRef results
[num_components
];
1857 for (unsigned i
= 0; i
< num_components
; ++i
) {
1858 LLVMValueRef immoffset
= LLVMConstInt(ctx
->ac
.i32
,
1861 if (load_bytes
== 1) {
1862 results
[i
] = ac_build_tbuffer_load_byte(&ctx
->ac
,
1869 assert(load_bytes
== 2);
1870 results
[i
] = ac_build_tbuffer_load_short(&ctx
->ac
,
1878 ret
= ac_build_gather_values(&ctx
->ac
, results
, num_components
);
1880 ret
= ac_build_buffer_load(&ctx
->ac
, rsrc
, num_components
, NULL
, offset
,
1881 NULL
, 0, 0, true, true);
1883 ret
= ac_trim_vector(&ctx
->ac
, ret
, num_components
);
1886 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
,
1887 get_def_type(ctx
, &instr
->dest
.ssa
), "");
1891 get_deref_offset(struct ac_nir_context
*ctx
, nir_deref_instr
*instr
,
1892 bool vs_in
, unsigned *vertex_index_out
,
1893 LLVMValueRef
*vertex_index_ref
,
1894 unsigned *const_out
, LLVMValueRef
*indir_out
)
1896 nir_variable
*var
= nir_deref_instr_get_variable(instr
);
1897 nir_deref_path path
;
1898 unsigned idx_lvl
= 1;
1900 nir_deref_path_init(&path
, instr
, NULL
);
1902 if (vertex_index_out
!= NULL
|| vertex_index_ref
!= NULL
) {
1903 if (vertex_index_ref
) {
1904 *vertex_index_ref
= get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
);
1905 if (vertex_index_out
)
1906 *vertex_index_out
= 0;
1908 *vertex_index_out
= nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
1913 uint32_t const_offset
= 0;
1914 LLVMValueRef offset
= NULL
;
1916 if (var
->data
.compact
) {
1917 assert(instr
->deref_type
== nir_deref_type_array
);
1918 const_offset
= nir_src_as_uint(instr
->arr
.index
);
1922 for (; path
.path
[idx_lvl
]; ++idx_lvl
) {
1923 const struct glsl_type
*parent_type
= path
.path
[idx_lvl
- 1]->type
;
1924 if (path
.path
[idx_lvl
]->deref_type
== nir_deref_type_struct
) {
1925 unsigned index
= path
.path
[idx_lvl
]->strct
.index
;
1927 for (unsigned i
= 0; i
< index
; i
++) {
1928 const struct glsl_type
*ft
= glsl_get_struct_field(parent_type
, i
);
1929 const_offset
+= glsl_count_attribute_slots(ft
, vs_in
);
1931 } else if(path
.path
[idx_lvl
]->deref_type
== nir_deref_type_array
) {
1932 unsigned size
= glsl_count_attribute_slots(path
.path
[idx_lvl
]->type
, vs_in
);
1933 if (nir_src_is_const(path
.path
[idx_lvl
]->arr
.index
)) {
1934 const_offset
+= size
*
1935 nir_src_as_uint(path
.path
[idx_lvl
]->arr
.index
);
1937 LLVMValueRef array_off
= LLVMBuildMul(ctx
->ac
.builder
, LLVMConstInt(ctx
->ac
.i32
, size
, 0),
1938 get_src(ctx
, path
.path
[idx_lvl
]->arr
.index
), "");
1940 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, array_off
, "");
1945 unreachable("Uhandled deref type in get_deref_instr_offset");
1949 nir_deref_path_finish(&path
);
1951 if (const_offset
&& offset
)
1952 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
,
1953 LLVMConstInt(ctx
->ac
.i32
, const_offset
, 0),
1956 *const_out
= const_offset
;
1957 *indir_out
= offset
;
1960 static LLVMValueRef
load_tess_varyings(struct ac_nir_context
*ctx
,
1961 nir_intrinsic_instr
*instr
,
1964 LLVMValueRef result
;
1965 LLVMValueRef vertex_index
= NULL
;
1966 LLVMValueRef indir_index
= NULL
;
1967 unsigned const_index
= 0;
1969 nir_variable
*var
= nir_deref_instr_get_variable(nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
));
1971 unsigned location
= var
->data
.location
;
1972 unsigned driver_location
= var
->data
.driver_location
;
1973 const bool is_patch
= var
->data
.patch
;
1974 const bool is_compact
= var
->data
.compact
;
1976 get_deref_offset(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
1977 false, NULL
, is_patch
? NULL
: &vertex_index
,
1978 &const_index
, &indir_index
);
1980 LLVMTypeRef dest_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
1982 LLVMTypeRef src_component_type
;
1983 if (LLVMGetTypeKind(dest_type
) == LLVMVectorTypeKind
)
1984 src_component_type
= LLVMGetElementType(dest_type
);
1986 src_component_type
= dest_type
;
1988 result
= ctx
->abi
->load_tess_varyings(ctx
->abi
, src_component_type
,
1989 vertex_index
, indir_index
,
1990 const_index
, location
, driver_location
,
1991 var
->data
.location_frac
,
1992 instr
->num_components
,
1993 is_patch
, is_compact
, load_inputs
);
1994 if (instr
->dest
.ssa
.bit_size
== 16) {
1995 result
= ac_to_integer(&ctx
->ac
, result
);
1996 result
= LLVMBuildTrunc(ctx
->ac
.builder
, result
, dest_type
, "");
1998 return LLVMBuildBitCast(ctx
->ac
.builder
, result
, dest_type
, "");
2002 type_scalar_size_bytes(const struct glsl_type
*type
)
2004 assert(glsl_type_is_vector_or_scalar(type
) ||
2005 glsl_type_is_matrix(type
));
2006 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
2009 static LLVMValueRef
visit_load_var(struct ac_nir_context
*ctx
,
2010 nir_intrinsic_instr
*instr
)
2012 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2013 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2015 LLVMValueRef values
[8];
2017 int ve
= instr
->dest
.ssa
.num_components
;
2019 LLVMValueRef indir_index
;
2021 unsigned const_index
;
2022 unsigned stride
= 4;
2023 int mode
= deref
->mode
;
2026 bool vs_in
= ctx
->stage
== MESA_SHADER_VERTEX
&&
2027 var
->data
.mode
== nir_var_shader_in
;
2028 idx
= var
->data
.driver_location
;
2029 comp
= var
->data
.location_frac
;
2030 mode
= var
->data
.mode
;
2032 get_deref_offset(ctx
, deref
, vs_in
, NULL
, NULL
,
2033 &const_index
, &indir_index
);
2035 if (var
->data
.compact
) {
2037 const_index
+= comp
;
2042 if (instr
->dest
.ssa
.bit_size
== 64 &&
2043 (deref
->mode
== nir_var_shader_in
||
2044 deref
->mode
== nir_var_shader_out
||
2045 deref
->mode
== nir_var_function_temp
))
2049 case nir_var_shader_in
:
2050 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
||
2051 ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
2052 return load_tess_varyings(ctx
, instr
, true);
2055 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
2056 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
2057 LLVMValueRef indir_index
;
2058 unsigned const_index
, vertex_index
;
2059 get_deref_offset(ctx
, deref
, false, &vertex_index
, NULL
,
2060 &const_index
, &indir_index
);
2061 assert(indir_index
== NULL
);
2063 return ctx
->abi
->load_inputs(ctx
->abi
, var
->data
.location
,
2064 var
->data
.driver_location
,
2065 var
->data
.location_frac
,
2066 instr
->num_components
, vertex_index
, const_index
, type
);
2069 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2071 unsigned count
= glsl_count_attribute_slots(
2073 ctx
->stage
== MESA_SHADER_VERTEX
);
2075 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2076 &ctx
->ac
, ctx
->abi
->inputs
+ idx
+ chan
, count
,
2077 stride
, false, true);
2079 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2083 values
[chan
] = ctx
->abi
->inputs
[idx
+ chan
+ const_index
* stride
];
2086 case nir_var_function_temp
:
2087 for (unsigned chan
= 0; chan
< ve
; chan
++) {
2089 unsigned count
= glsl_count_attribute_slots(
2092 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2093 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2094 stride
, true, true);
2096 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2100 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, ctx
->locals
[idx
+ chan
+ const_index
* stride
], "");
2104 case nir_var_mem_shared
: {
2105 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2106 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2107 return LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2108 get_def_type(ctx
, &instr
->dest
.ssa
),
2111 case nir_var_shader_out
:
2112 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2113 return load_tess_varyings(ctx
, instr
, false);
2116 if (ctx
->stage
== MESA_SHADER_FRAGMENT
&&
2117 var
->data
.fb_fetch_output
&&
2118 ctx
->abi
->emit_fbfetch
)
2119 return ctx
->abi
->emit_fbfetch(ctx
->abi
);
2121 for (unsigned chan
= comp
; chan
< ve
+ comp
; chan
++) {
2123 unsigned count
= glsl_count_attribute_slots(
2126 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2127 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2128 stride
, true, true);
2130 values
[chan
] = LLVMBuildExtractElement(ctx
->ac
.builder
,
2134 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
,
2135 ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
],
2140 case nir_var_mem_global
: {
2141 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2142 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2143 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2144 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2146 LLVMTypeRef result_type
= get_def_type(ctx
, &instr
->dest
.ssa
);
2147 if (stride
!= natural_stride
) {
2148 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(result_type
),
2149 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2150 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2152 for (unsigned i
= 0; i
< instr
->dest
.ssa
.num_components
; ++i
) {
2153 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, i
* stride
/ natural_stride
, 0);
2154 values
[i
] = LLVMBuildLoad(ctx
->ac
.builder
,
2155 ac_build_gep_ptr(&ctx
->ac
, address
, offset
), "");
2157 return ac_build_gather_values(&ctx
->ac
, values
, instr
->dest
.ssa
.num_components
);
2159 LLVMTypeRef ptr_type
= LLVMPointerType(result_type
,
2160 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2161 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2162 LLVMValueRef val
= LLVMBuildLoad(ctx
->ac
.builder
, address
, "");
2167 unreachable("unhandle variable mode");
2169 ret
= ac_build_varying_gather_values(&ctx
->ac
, values
, ve
, comp
);
2170 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2174 visit_store_var(struct ac_nir_context
*ctx
,
2175 nir_intrinsic_instr
*instr
)
2177 nir_deref_instr
*deref
= nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2178 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
2180 LLVMValueRef temp_ptr
, value
;
2183 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[1]));
2184 int writemask
= instr
->const_index
[0];
2185 LLVMValueRef indir_index
;
2186 unsigned const_index
;
2189 get_deref_offset(ctx
, deref
, false,
2190 NULL
, NULL
, &const_index
, &indir_index
);
2191 idx
= var
->data
.driver_location
;
2192 comp
= var
->data
.location_frac
;
2194 if (var
->data
.compact
) {
2195 const_index
+= comp
;
2200 if (ac_get_elem_bits(&ctx
->ac
, LLVMTypeOf(src
)) == 64 &&
2201 (deref
->mode
== nir_var_shader_out
||
2202 deref
->mode
== nir_var_function_temp
)) {
2204 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2205 LLVMVectorType(ctx
->ac
.f32
, ac_get_llvm_num_components(src
) * 2),
2208 writemask
= widen_mask(writemask
, 2);
2211 writemask
= writemask
<< comp
;
2213 switch (deref
->mode
) {
2214 case nir_var_shader_out
:
2216 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
2217 LLVMValueRef vertex_index
= NULL
;
2218 LLVMValueRef indir_index
= NULL
;
2219 unsigned const_index
= 0;
2220 const bool is_patch
= var
->data
.patch
;
2222 get_deref_offset(ctx
, deref
, false, NULL
,
2223 is_patch
? NULL
: &vertex_index
,
2224 &const_index
, &indir_index
);
2226 ctx
->abi
->store_tcs_outputs(ctx
->abi
, var
,
2227 vertex_index
, indir_index
,
2228 const_index
, src
, writemask
);
2232 for (unsigned chan
= 0; chan
< 8; chan
++) {
2234 if (!(writemask
& (1 << chan
)))
2237 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
- comp
);
2239 if (var
->data
.compact
)
2242 unsigned count
= glsl_count_attribute_slots(
2245 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2246 &ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
, count
,
2247 stride
, true, true);
2249 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2250 value
, indir_index
, "");
2251 build_store_values_extended(&ctx
->ac
, ctx
->abi
->outputs
+ idx
+ chan
,
2252 count
, stride
, tmp_vec
);
2255 temp_ptr
= ctx
->abi
->outputs
[idx
+ chan
+ const_index
* stride
];
2257 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2261 case nir_var_function_temp
:
2262 for (unsigned chan
= 0; chan
< 8; chan
++) {
2263 if (!(writemask
& (1 << chan
)))
2266 value
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2268 unsigned count
= glsl_count_attribute_slots(
2271 LLVMValueRef tmp_vec
= ac_build_gather_values_extended(
2272 &ctx
->ac
, ctx
->locals
+ idx
+ chan
, count
,
2275 tmp_vec
= LLVMBuildInsertElement(ctx
->ac
.builder
, tmp_vec
,
2276 value
, indir_index
, "");
2277 build_store_values_extended(&ctx
->ac
, ctx
->locals
+ idx
+ chan
,
2280 temp_ptr
= ctx
->locals
[idx
+ chan
+ const_index
* 4];
2282 LLVMBuildStore(ctx
->ac
.builder
, value
, temp_ptr
);
2287 case nir_var_mem_global
:
2288 case nir_var_mem_shared
: {
2289 int writemask
= instr
->const_index
[0];
2290 LLVMValueRef address
= get_src(ctx
, instr
->src
[0]);
2291 LLVMValueRef val
= get_src(ctx
, instr
->src
[1]);
2293 unsigned explicit_stride
= glsl_get_explicit_stride(deref
->type
);
2294 unsigned natural_stride
= type_scalar_size_bytes(deref
->type
);
2295 unsigned stride
= explicit_stride
? explicit_stride
: natural_stride
;
2297 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2298 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2299 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2301 if (writemask
== (1u << ac_get_llvm_num_components(val
)) - 1 &&
2302 stride
== natural_stride
) {
2303 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMTypeOf(val
),
2304 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2305 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2307 val
= LLVMBuildBitCast(ctx
->ac
.builder
, val
,
2308 LLVMGetElementType(LLVMTypeOf(address
)), "");
2309 LLVMBuildStore(ctx
->ac
.builder
, val
, address
);
2311 LLVMTypeRef ptr_type
= LLVMPointerType(LLVMGetElementType(LLVMTypeOf(val
)),
2312 LLVMGetPointerAddressSpace(LLVMTypeOf(address
)));
2313 address
= LLVMBuildBitCast(ctx
->ac
.builder
, address
, ptr_type
, "");
2314 for (unsigned chan
= 0; chan
< 4; chan
++) {
2315 if (!(writemask
& (1 << chan
)))
2318 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, chan
* stride
/ natural_stride
, 0);
2320 LLVMValueRef ptr
= ac_build_gep_ptr(&ctx
->ac
, address
, offset
);
2321 LLVMValueRef src
= ac_llvm_extract_elem(&ctx
->ac
, val
,
2323 src
= LLVMBuildBitCast(ctx
->ac
.builder
, src
,
2324 LLVMGetElementType(LLVMTypeOf(ptr
)), "");
2325 LLVMBuildStore(ctx
->ac
.builder
, src
, ptr
);
2336 static int image_type_to_components_count(enum glsl_sampler_dim dim
, bool array
)
2339 case GLSL_SAMPLER_DIM_BUF
:
2341 case GLSL_SAMPLER_DIM_1D
:
2342 return array
? 2 : 1;
2343 case GLSL_SAMPLER_DIM_2D
:
2344 return array
? 3 : 2;
2345 case GLSL_SAMPLER_DIM_MS
:
2346 return array
? 4 : 3;
2347 case GLSL_SAMPLER_DIM_3D
:
2348 case GLSL_SAMPLER_DIM_CUBE
:
2350 case GLSL_SAMPLER_DIM_RECT
:
2351 case GLSL_SAMPLER_DIM_SUBPASS
:
2353 case GLSL_SAMPLER_DIM_SUBPASS_MS
:
2361 static LLVMValueRef
adjust_sample_index_using_fmask(struct ac_llvm_context
*ctx
,
2362 LLVMValueRef coord_x
, LLVMValueRef coord_y
,
2363 LLVMValueRef coord_z
,
2364 LLVMValueRef sample_index
,
2365 LLVMValueRef fmask_desc_ptr
)
2367 unsigned sample_chan
= coord_z
? 3 : 2;
2368 LLVMValueRef addr
[4] = {coord_x
, coord_y
, coord_z
};
2369 addr
[sample_chan
] = sample_index
;
2371 ac_apply_fmask_to_sample(ctx
, fmask_desc_ptr
, addr
, coord_z
!= NULL
);
2372 return addr
[sample_chan
];
2375 static nir_deref_instr
*get_image_deref(const nir_intrinsic_instr
*instr
)
2377 assert(instr
->src
[0].is_ssa
);
2378 return nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
);
2381 static LLVMValueRef
get_image_descriptor(struct ac_nir_context
*ctx
,
2382 const nir_intrinsic_instr
*instr
,
2383 enum ac_descriptor_type desc_type
,
2386 nir_deref_instr
*deref_instr
=
2387 instr
->src
[0].ssa
->parent_instr
->type
== nir_instr_type_deref
?
2388 nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
) : NULL
;
2390 return get_sampler_desc(ctx
, deref_instr
, desc_type
, &instr
->instr
, true, write
);
2393 static void get_image_coords(struct ac_nir_context
*ctx
,
2394 const nir_intrinsic_instr
*instr
,
2395 struct ac_image_args
*args
,
2396 enum glsl_sampler_dim dim
,
2399 LLVMValueRef src0
= get_src(ctx
, instr
->src
[1]);
2400 LLVMValueRef masks
[] = {
2401 LLVMConstInt(ctx
->ac
.i32
, 0, false), LLVMConstInt(ctx
->ac
.i32
, 1, false),
2402 LLVMConstInt(ctx
->ac
.i32
, 2, false), LLVMConstInt(ctx
->ac
.i32
, 3, false),
2404 LLVMValueRef sample_index
= ac_llvm_extract_elem(&ctx
->ac
, get_src(ctx
, instr
->src
[2]), 0);
2407 ASSERTED
bool add_frag_pos
= (dim
== GLSL_SAMPLER_DIM_SUBPASS
||
2408 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2409 bool is_ms
= (dim
== GLSL_SAMPLER_DIM_MS
||
2410 dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
);
2411 bool gfx9_1d
= ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
;
2412 assert(!add_frag_pos
&& "Input attachments should be lowered by this point.");
2413 count
= image_type_to_components_count(dim
, is_array
);
2415 if (is_ms
&& (instr
->intrinsic
== nir_intrinsic_image_deref_load
||
2416 instr
->intrinsic
== nir_intrinsic_bindless_image_load
)) {
2417 LLVMValueRef fmask_load_address
[3];
2419 fmask_load_address
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2420 fmask_load_address
[1] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[1], "");
2422 fmask_load_address
[2] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[2], "");
2424 fmask_load_address
[2] = NULL
;
2426 sample_index
= adjust_sample_index_using_fmask(&ctx
->ac
,
2427 fmask_load_address
[0],
2428 fmask_load_address
[1],
2429 fmask_load_address
[2],
2431 get_sampler_desc(ctx
, nir_instr_as_deref(instr
->src
[0].ssa
->parent_instr
),
2432 AC_DESC_FMASK
, &instr
->instr
, true, false));
2434 if (count
== 1 && !gfx9_1d
) {
2435 if (instr
->src
[1].ssa
->num_components
)
2436 args
->coords
[0] = LLVMBuildExtractElement(ctx
->ac
.builder
, src0
, masks
[0], "");
2438 args
->coords
[0] = src0
;
2443 for (chan
= 0; chan
< count
; ++chan
) {
2444 args
->coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, src0
, chan
);
2449 args
->coords
[2] = args
->coords
[1];
2450 args
->coords
[1] = ctx
->ac
.i32_0
;
2452 args
->coords
[1] = ctx
->ac
.i32_0
;
2455 if (ctx
->ac
.chip_class
== GFX9
&&
2456 dim
== GLSL_SAMPLER_DIM_2D
&&
2458 /* The hw can't bind a slice of a 3D image as a 2D
2459 * image, because it ignores BASE_ARRAY if the target
2460 * is 3D. The workaround is to read BASE_ARRAY and set
2461 * it as the 3rd address operand for all 2D images.
2463 LLVMValueRef first_layer
, const5
, mask
;
2465 const5
= LLVMConstInt(ctx
->ac
.i32
, 5, 0);
2466 mask
= LLVMConstInt(ctx
->ac
.i32
, S_008F24_BASE_ARRAY(~0), 0);
2467 first_layer
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
->resource
, const5
, "");
2468 first_layer
= LLVMBuildAnd(ctx
->ac
.builder
, first_layer
, mask
, "");
2470 args
->coords
[count
] = first_layer
;
2476 args
->coords
[count
] = sample_index
;
2482 static LLVMValueRef
get_image_buffer_descriptor(struct ac_nir_context
*ctx
,
2483 const nir_intrinsic_instr
*instr
,
2484 bool write
, bool atomic
)
2486 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, AC_DESC_BUFFER
, write
);
2487 if (ctx
->ac
.chip_class
== GFX9
&& LLVM_VERSION_MAJOR
< 9 && atomic
) {
2488 LLVMValueRef elem_count
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2489 LLVMValueRef stride
= LLVMBuildExtractElement(ctx
->ac
.builder
, rsrc
, LLVMConstInt(ctx
->ac
.i32
, 1, 0), "");
2490 stride
= LLVMBuildLShr(ctx
->ac
.builder
, stride
, LLVMConstInt(ctx
->ac
.i32
, 16, 0), "");
2492 LLVMValueRef new_elem_count
= LLVMBuildSelect(ctx
->ac
.builder
,
2493 LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntUGT
, elem_count
, stride
, ""),
2494 elem_count
, stride
, "");
2496 rsrc
= LLVMBuildInsertElement(ctx
->ac
.builder
, rsrc
, new_elem_count
,
2497 LLVMConstInt(ctx
->ac
.i32
, 2, 0), "");
2502 static LLVMValueRef
visit_image_load(struct ac_nir_context
*ctx
,
2503 const nir_intrinsic_instr
*instr
,
2508 enum glsl_sampler_dim dim
;
2509 enum gl_access_qualifier access
;
2512 dim
= nir_intrinsic_image_dim(instr
);
2513 access
= nir_intrinsic_access(instr
);
2514 is_array
= nir_intrinsic_image_array(instr
);
2516 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2517 const struct glsl_type
*type
= image_deref
->type
;
2518 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2519 dim
= glsl_get_sampler_dim(type
);
2520 access
= var
->data
.image
.access
;
2521 is_array
= glsl_sampler_type_is_array(type
);
2524 struct ac_image_args args
= {};
2526 args
.cache_policy
= get_cache_policy(ctx
, access
, false, false);
2528 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2529 unsigned mask
= nir_ssa_def_components_read(&instr
->dest
.ssa
);
2530 unsigned num_channels
= util_last_bit(mask
);
2531 LLVMValueRef rsrc
, vindex
;
2533 rsrc
= get_image_buffer_descriptor(ctx
, instr
, false, false);
2534 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2537 bool can_speculate
= access
& ACCESS_CAN_REORDER
;
2538 res
= ac_build_buffer_load_format(&ctx
->ac
, rsrc
, vindex
,
2539 ctx
->ac
.i32_0
, num_channels
,
2542 res
= ac_build_expand_to_vec4(&ctx
->ac
, res
, num_channels
);
2544 res
= ac_trim_vector(&ctx
->ac
, res
, instr
->dest
.ssa
.num_components
);
2545 res
= ac_to_integer(&ctx
->ac
, res
);
2547 args
.opcode
= ac_image_load
;
2548 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2549 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2550 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2552 args
.attributes
= AC_FUNC_ATTR_READONLY
;
2554 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2559 static void visit_image_store(struct ac_nir_context
*ctx
,
2560 nir_intrinsic_instr
*instr
,
2565 enum glsl_sampler_dim dim
;
2566 enum gl_access_qualifier access
;
2569 dim
= nir_intrinsic_image_dim(instr
);
2570 access
= nir_intrinsic_access(instr
);
2571 is_array
= nir_intrinsic_image_array(instr
);
2573 const nir_deref_instr
*image_deref
= get_image_deref(instr
);
2574 const struct glsl_type
*type
= image_deref
->type
;
2575 const nir_variable
*var
= nir_deref_instr_get_variable(image_deref
);
2576 dim
= glsl_get_sampler_dim(type
);
2577 access
= var
->data
.image
.access
;
2578 is_array
= glsl_sampler_type_is_array(type
);
2581 bool writeonly_memory
= access
& ACCESS_NON_READABLE
;
2582 struct ac_image_args args
= {};
2584 args
.cache_policy
= get_cache_policy(ctx
, access
, true, writeonly_memory
);
2586 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2587 LLVMValueRef rsrc
= get_image_buffer_descriptor(ctx
, instr
, true, false);
2588 LLVMValueRef src
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2589 unsigned src_channels
= ac_get_llvm_num_components(src
);
2590 LLVMValueRef vindex
;
2592 if (src_channels
== 3)
2593 src
= ac_build_expand_to_vec4(&ctx
->ac
, src
, 3);
2595 vindex
= LLVMBuildExtractElement(ctx
->ac
.builder
,
2596 get_src(ctx
, instr
->src
[1]),
2599 ac_build_buffer_store_format(&ctx
->ac
, rsrc
, src
, vindex
,
2600 ctx
->ac
.i32_0
, src_channels
,
2603 args
.opcode
= ac_image_store
;
2604 args
.data
[0] = ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[3]));
2605 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, true);
2606 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2607 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2610 ac_build_image_opcode(&ctx
->ac
, &args
);
2615 static LLVMValueRef
visit_image_atomic(struct ac_nir_context
*ctx
,
2616 const nir_intrinsic_instr
*instr
,
2619 LLVMValueRef params
[7];
2620 int param_count
= 0;
2622 bool cmpswap
= instr
->intrinsic
== nir_intrinsic_image_deref_atomic_comp_swap
||
2623 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_comp_swap
;
2624 const char *atomic_name
;
2625 char intrinsic_name
[64];
2626 enum ac_atomic_op atomic_subop
;
2627 ASSERTED
int length
;
2629 enum glsl_sampler_dim dim
;
2632 if (instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imin
||
2633 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umin
||
2634 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_imax
||
2635 instr
->intrinsic
== nir_intrinsic_bindless_image_atomic_umax
) {
2636 const GLenum format
= nir_intrinsic_format(instr
);
2637 assert(format
== GL_R32UI
|| format
== GL_R32I
);
2639 dim
= nir_intrinsic_image_dim(instr
);
2640 is_array
= nir_intrinsic_image_array(instr
);
2642 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2643 dim
= glsl_get_sampler_dim(type
);
2644 is_array
= glsl_sampler_type_is_array(type
);
2647 switch (instr
->intrinsic
) {
2648 case nir_intrinsic_bindless_image_atomic_add
:
2649 case nir_intrinsic_image_deref_atomic_add
:
2650 atomic_name
= "add";
2651 atomic_subop
= ac_atomic_add
;
2653 case nir_intrinsic_bindless_image_atomic_imin
:
2654 case nir_intrinsic_image_deref_atomic_imin
:
2655 atomic_name
= "smin";
2656 atomic_subop
= ac_atomic_smin
;
2658 case nir_intrinsic_bindless_image_atomic_umin
:
2659 case nir_intrinsic_image_deref_atomic_umin
:
2660 atomic_name
= "umin";
2661 atomic_subop
= ac_atomic_umin
;
2663 case nir_intrinsic_bindless_image_atomic_imax
:
2664 case nir_intrinsic_image_deref_atomic_imax
:
2665 atomic_name
= "smax";
2666 atomic_subop
= ac_atomic_smax
;
2668 case nir_intrinsic_bindless_image_atomic_umax
:
2669 case nir_intrinsic_image_deref_atomic_umax
:
2670 atomic_name
= "umax";
2671 atomic_subop
= ac_atomic_umax
;
2673 case nir_intrinsic_bindless_image_atomic_and
:
2674 case nir_intrinsic_image_deref_atomic_and
:
2675 atomic_name
= "and";
2676 atomic_subop
= ac_atomic_and
;
2678 case nir_intrinsic_bindless_image_atomic_or
:
2679 case nir_intrinsic_image_deref_atomic_or
:
2681 atomic_subop
= ac_atomic_or
;
2683 case nir_intrinsic_bindless_image_atomic_xor
:
2684 case nir_intrinsic_image_deref_atomic_xor
:
2685 atomic_name
= "xor";
2686 atomic_subop
= ac_atomic_xor
;
2688 case nir_intrinsic_bindless_image_atomic_exchange
:
2689 case nir_intrinsic_image_deref_atomic_exchange
:
2690 atomic_name
= "swap";
2691 atomic_subop
= ac_atomic_swap
;
2693 case nir_intrinsic_bindless_image_atomic_comp_swap
:
2694 case nir_intrinsic_image_deref_atomic_comp_swap
:
2695 atomic_name
= "cmpswap";
2696 atomic_subop
= 0; /* not used */
2698 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
2699 case nir_intrinsic_image_deref_atomic_inc_wrap
: {
2700 atomic_name
= "inc";
2701 atomic_subop
= ac_atomic_inc_wrap
;
2702 /* ATOMIC_INC instruction does:
2703 * value = (value + 1) % (data + 1)
2705 * value = (value + 1) % data
2706 * So replace 'data' by 'data - 1'.
2708 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
] =
2709 LLVMBuildSub(ctx
->ac
.builder
,
2710 ctx
->ssa_defs
[instr
->src
[3].ssa
->index
],
2714 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
2715 case nir_intrinsic_image_deref_atomic_dec_wrap
:
2716 atomic_name
= "dec";
2717 atomic_subop
= ac_atomic_dec_wrap
;
2724 params
[param_count
++] = get_src(ctx
, instr
->src
[4]);
2725 params
[param_count
++] = get_src(ctx
, instr
->src
[3]);
2727 if (dim
== GLSL_SAMPLER_DIM_BUF
) {
2728 params
[param_count
++] = get_image_buffer_descriptor(ctx
, instr
, true, true);
2729 params
[param_count
++] = LLVMBuildExtractElement(ctx
->ac
.builder
, get_src(ctx
, instr
->src
[1]),
2730 ctx
->ac
.i32_0
, ""); /* vindex */
2731 params
[param_count
++] = ctx
->ac
.i32_0
; /* voffset */
2732 if (LLVM_VERSION_MAJOR
>= 9) {
2733 /* XXX: The new raw/struct atomic intrinsics are buggy
2734 * with LLVM 8, see r358579.
2736 params
[param_count
++] = ctx
->ac
.i32_0
; /* soffset */
2737 params
[param_count
++] = ctx
->ac
.i32_0
; /* slc */
2739 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2740 "llvm.amdgcn.struct.buffer.atomic.%s.i32", atomic_name
);
2742 params
[param_count
++] = ctx
->ac
.i1false
; /* slc */
2744 length
= snprintf(intrinsic_name
, sizeof(intrinsic_name
),
2745 "llvm.amdgcn.buffer.atomic.%s", atomic_name
);
2748 assert(length
< sizeof(intrinsic_name
));
2749 return ac_build_intrinsic(&ctx
->ac
, intrinsic_name
, ctx
->ac
.i32
,
2750 params
, param_count
, 0);
2752 struct ac_image_args args
= {};
2753 args
.opcode
= cmpswap
? ac_image_atomic_cmpswap
: ac_image_atomic
;
2754 args
.atomic
= atomic_subop
;
2755 args
.data
[0] = params
[0];
2757 args
.data
[1] = params
[1];
2758 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, true);
2759 get_image_coords(ctx
, instr
, &args
, dim
, is_array
);
2760 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2762 return ac_build_image_opcode(&ctx
->ac
, &args
);
2766 static LLVMValueRef
visit_image_samples(struct ac_nir_context
*ctx
,
2767 const nir_intrinsic_instr
*instr
)
2769 LLVMValueRef rsrc
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2771 return ac_build_image_get_sample_count(&ctx
->ac
, rsrc
);
2774 static LLVMValueRef
visit_image_size(struct ac_nir_context
*ctx
,
2775 const nir_intrinsic_instr
*instr
,
2780 enum glsl_sampler_dim dim
;
2783 dim
= nir_intrinsic_image_dim(instr
);
2784 is_array
= nir_intrinsic_image_array(instr
);
2786 const struct glsl_type
*type
= get_image_deref(instr
)->type
;
2787 dim
= glsl_get_sampler_dim(type
);
2788 is_array
= glsl_sampler_type_is_array(type
);
2791 if (dim
== GLSL_SAMPLER_DIM_BUF
)
2792 return get_buffer_size(ctx
, get_image_descriptor(ctx
, instr
, AC_DESC_BUFFER
, false), true);
2794 struct ac_image_args args
= { 0 };
2796 args
.dim
= ac_get_image_dim(ctx
->ac
.chip_class
, dim
, is_array
);
2798 args
.resource
= get_image_descriptor(ctx
, instr
, AC_DESC_IMAGE
, false);
2799 args
.opcode
= ac_image_get_resinfo
;
2800 args
.lod
= ctx
->ac
.i32_0
;
2801 args
.attributes
= AC_FUNC_ATTR_READNONE
;
2803 res
= ac_build_image_opcode(&ctx
->ac
, &args
);
2805 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
2807 if (dim
== GLSL_SAMPLER_DIM_CUBE
&& is_array
) {
2808 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
2809 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
2810 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
2811 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, z
, two
, "");
2813 if (ctx
->ac
.chip_class
== GFX9
&& dim
== GLSL_SAMPLER_DIM_1D
&& is_array
) {
2814 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
, two
, "");
2815 res
= LLVMBuildInsertElement(ctx
->ac
.builder
, res
, layers
,
2822 static void emit_membar(struct ac_llvm_context
*ac
,
2823 const nir_intrinsic_instr
*instr
)
2825 unsigned wait_flags
= 0;
2827 switch (instr
->intrinsic
) {
2828 case nir_intrinsic_memory_barrier
:
2829 case nir_intrinsic_group_memory_barrier
:
2830 wait_flags
= AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
2832 case nir_intrinsic_memory_barrier_atomic_counter
:
2833 case nir_intrinsic_memory_barrier_buffer
:
2834 case nir_intrinsic_memory_barrier_image
:
2835 wait_flags
= AC_WAIT_VLOAD
| AC_WAIT_VSTORE
;
2837 case nir_intrinsic_memory_barrier_shared
:
2838 wait_flags
= AC_WAIT_LGKM
;
2844 ac_build_waitcnt(ac
, wait_flags
);
2847 void ac_emit_barrier(struct ac_llvm_context
*ac
, gl_shader_stage stage
)
2849 /* GFX6 only (thanks to a hw bug workaround):
2850 * The real barrier instruction isn’t needed, because an entire patch
2851 * always fits into a single wave.
2853 if (ac
->chip_class
== GFX6
&& stage
== MESA_SHADER_TESS_CTRL
) {
2854 ac_build_waitcnt(ac
, AC_WAIT_LGKM
| AC_WAIT_VLOAD
| AC_WAIT_VSTORE
);
2857 ac_build_s_barrier(ac
);
2860 static void emit_discard(struct ac_nir_context
*ctx
,
2861 const nir_intrinsic_instr
*instr
)
2865 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
2866 cond
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
,
2867 get_src(ctx
, instr
->src
[0]),
2870 assert(instr
->intrinsic
== nir_intrinsic_discard
);
2871 cond
= ctx
->ac
.i1false
;
2874 ctx
->abi
->emit_kill(ctx
->abi
, cond
);
2878 visit_load_local_invocation_index(struct ac_nir_context
*ctx
)
2880 LLVMValueRef result
;
2881 LLVMValueRef thread_id
= ac_get_thread_id(&ctx
->ac
);
2882 result
= LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
2883 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
2885 return LLVMBuildAdd(ctx
->ac
.builder
, result
, thread_id
, "");
2889 visit_load_subgroup_id(struct ac_nir_context
*ctx
)
2891 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
2892 LLVMValueRef result
;
2893 result
= LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
2894 LLVMConstInt(ctx
->ac
.i32
, 0xfc0, false), "");
2895 return LLVMBuildLShr(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 6, false), "");
2897 return LLVMConstInt(ctx
->ac
.i32
, 0, false);
2902 visit_load_num_subgroups(struct ac_nir_context
*ctx
)
2904 if (ctx
->stage
== MESA_SHADER_COMPUTE
) {
2905 return LLVMBuildAnd(ctx
->ac
.builder
, ctx
->abi
->tg_size
,
2906 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
2908 return LLVMConstInt(ctx
->ac
.i32
, 1, false);
2913 visit_first_invocation(struct ac_nir_context
*ctx
)
2915 LLVMValueRef active_set
= ac_build_ballot(&ctx
->ac
, ctx
->ac
.i32_1
);
2916 const char *intr
= ctx
->ac
.wave_size
== 32 ? "llvm.cttz.i32" : "llvm.cttz.i64";
2918 /* The second argument is whether cttz(0) should be defined, but we do not care. */
2919 LLVMValueRef args
[] = {active_set
, ctx
->ac
.i1false
};
2920 LLVMValueRef result
= ac_build_intrinsic(&ctx
->ac
, intr
,
2921 ctx
->ac
.iN_wavemask
, args
, 2,
2922 AC_FUNC_ATTR_NOUNWIND
|
2923 AC_FUNC_ATTR_READNONE
);
2925 return LLVMBuildTrunc(ctx
->ac
.builder
, result
, ctx
->ac
.i32
, "");
2929 visit_load_shared(struct ac_nir_context
*ctx
,
2930 const nir_intrinsic_instr
*instr
)
2932 LLVMValueRef values
[4], derived_ptr
, index
, ret
;
2934 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0]);
2936 for (int chan
= 0; chan
< instr
->num_components
; chan
++) {
2937 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
2938 derived_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &index
, 1, "");
2939 values
[chan
] = LLVMBuildLoad(ctx
->ac
.builder
, derived_ptr
, "");
2942 ret
= ac_build_gather_values(&ctx
->ac
, values
, instr
->num_components
);
2943 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, get_def_type(ctx
, &instr
->dest
.ssa
), "");
2947 visit_store_shared(struct ac_nir_context
*ctx
,
2948 const nir_intrinsic_instr
*instr
)
2950 LLVMValueRef derived_ptr
, data
,index
;
2951 LLVMBuilderRef builder
= ctx
->ac
.builder
;
2953 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[1]);
2954 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
2956 int writemask
= nir_intrinsic_write_mask(instr
);
2957 for (int chan
= 0; chan
< 4; chan
++) {
2958 if (!(writemask
& (1 << chan
))) {
2961 data
= ac_llvm_extract_elem(&ctx
->ac
, src
, chan
);
2962 index
= LLVMConstInt(ctx
->ac
.i32
, chan
, 0);
2963 derived_ptr
= LLVMBuildGEP(builder
, ptr
, &index
, 1, "");
2964 LLVMBuildStore(builder
, data
, derived_ptr
);
2968 static LLVMValueRef
visit_var_atomic(struct ac_nir_context
*ctx
,
2969 const nir_intrinsic_instr
*instr
,
2970 LLVMValueRef ptr
, int src_idx
)
2972 LLVMValueRef result
;
2973 LLVMValueRef src
= get_src(ctx
, instr
->src
[src_idx
]);
2975 const char *sync_scope
= LLVM_VERSION_MAJOR
>= 9 ? "workgroup-one-as" : "workgroup";
2977 if (instr
->intrinsic
== nir_intrinsic_shared_atomic_comp_swap
||
2978 instr
->intrinsic
== nir_intrinsic_deref_atomic_comp_swap
) {
2979 LLVMValueRef src1
= get_src(ctx
, instr
->src
[src_idx
+ 1]);
2980 result
= ac_build_atomic_cmp_xchg(&ctx
->ac
, ptr
, src
, src1
, sync_scope
);
2981 result
= LLVMBuildExtractValue(ctx
->ac
.builder
, result
, 0, "");
2983 LLVMAtomicRMWBinOp op
;
2984 switch (instr
->intrinsic
) {
2985 case nir_intrinsic_shared_atomic_add
:
2986 case nir_intrinsic_deref_atomic_add
:
2987 op
= LLVMAtomicRMWBinOpAdd
;
2989 case nir_intrinsic_shared_atomic_umin
:
2990 case nir_intrinsic_deref_atomic_umin
:
2991 op
= LLVMAtomicRMWBinOpUMin
;
2993 case nir_intrinsic_shared_atomic_umax
:
2994 case nir_intrinsic_deref_atomic_umax
:
2995 op
= LLVMAtomicRMWBinOpUMax
;
2997 case nir_intrinsic_shared_atomic_imin
:
2998 case nir_intrinsic_deref_atomic_imin
:
2999 op
= LLVMAtomicRMWBinOpMin
;
3001 case nir_intrinsic_shared_atomic_imax
:
3002 case nir_intrinsic_deref_atomic_imax
:
3003 op
= LLVMAtomicRMWBinOpMax
;
3005 case nir_intrinsic_shared_atomic_and
:
3006 case nir_intrinsic_deref_atomic_and
:
3007 op
= LLVMAtomicRMWBinOpAnd
;
3009 case nir_intrinsic_shared_atomic_or
:
3010 case nir_intrinsic_deref_atomic_or
:
3011 op
= LLVMAtomicRMWBinOpOr
;
3013 case nir_intrinsic_shared_atomic_xor
:
3014 case nir_intrinsic_deref_atomic_xor
:
3015 op
= LLVMAtomicRMWBinOpXor
;
3017 case nir_intrinsic_shared_atomic_exchange
:
3018 case nir_intrinsic_deref_atomic_exchange
:
3019 op
= LLVMAtomicRMWBinOpXchg
;
3025 result
= ac_build_atomic_rmw(&ctx
->ac
, op
, ptr
, ac_to_integer(&ctx
->ac
, src
), sync_scope
);
3030 static LLVMValueRef
load_sample_pos(struct ac_nir_context
*ctx
)
3032 LLVMValueRef values
[2];
3033 LLVMValueRef pos
[2];
3035 pos
[0] = ac_to_float(&ctx
->ac
, ctx
->abi
->frag_pos
[0]);
3036 pos
[1] = ac_to_float(&ctx
->ac
, ctx
->abi
->frag_pos
[1]);
3038 values
[0] = ac_build_fract(&ctx
->ac
, pos
[0], 32);
3039 values
[1] = ac_build_fract(&ctx
->ac
, pos
[1], 32);
3040 return ac_build_gather_values(&ctx
->ac
, values
, 2);
3043 static LLVMValueRef
lookup_interp_param(struct ac_nir_context
*ctx
,
3044 enum glsl_interp_mode interp
, unsigned location
)
3047 case INTERP_MODE_FLAT
:
3050 case INTERP_MODE_SMOOTH
:
3051 case INTERP_MODE_NONE
:
3052 if (location
== INTERP_CENTER
)
3053 return ctx
->abi
->persp_center
;
3054 else if (location
== INTERP_CENTROID
)
3055 return ctx
->abi
->persp_centroid
;
3056 else if (location
== INTERP_SAMPLE
)
3057 return ctx
->abi
->persp_sample
;
3059 case INTERP_MODE_NOPERSPECTIVE
:
3060 if (location
== INTERP_CENTER
)
3061 return ctx
->abi
->linear_center
;
3062 else if (location
== INTERP_CENTROID
)
3063 return ctx
->abi
->linear_centroid
;
3064 else if (location
== INTERP_SAMPLE
)
3065 return ctx
->abi
->linear_sample
;
3071 static LLVMValueRef
barycentric_center(struct ac_nir_context
*ctx
,
3074 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3075 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3078 static LLVMValueRef
barycentric_offset(struct ac_nir_context
*ctx
,
3080 LLVMValueRef offset
)
3082 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTER
);
3083 LLVMValueRef src_c0
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_0
, ""));
3084 LLVMValueRef src_c1
= ac_to_float(&ctx
->ac
, LLVMBuildExtractElement(ctx
->ac
.builder
, offset
, ctx
->ac
.i32_1
, ""));
3086 LLVMValueRef ij_out
[2];
3087 LLVMValueRef ddxy_out
= ac_build_ddxy_interp(&ctx
->ac
, interp_param
);
3090 * take the I then J parameters, and the DDX/Y for it, and
3091 * calculate the IJ inputs for the interpolator.
3092 * temp1 = ddx * offset/sample.x + I;
3093 * interp_param.I = ddy * offset/sample.y + temp1;
3094 * temp1 = ddx * offset/sample.x + J;
3095 * interp_param.J = ddy * offset/sample.y + temp1;
3097 for (unsigned i
= 0; i
< 2; i
++) {
3098 LLVMValueRef ix_ll
= LLVMConstInt(ctx
->ac
.i32
, i
, false);
3099 LLVMValueRef iy_ll
= LLVMConstInt(ctx
->ac
.i32
, i
+ 2, false);
3100 LLVMValueRef ddx_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3101 ddxy_out
, ix_ll
, "");
3102 LLVMValueRef ddy_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3103 ddxy_out
, iy_ll
, "");
3104 LLVMValueRef interp_el
= LLVMBuildExtractElement(ctx
->ac
.builder
,
3105 interp_param
, ix_ll
, "");
3106 LLVMValueRef temp1
, temp2
;
3108 interp_el
= LLVMBuildBitCast(ctx
->ac
.builder
, interp_el
,
3111 temp1
= ac_build_fmad(&ctx
->ac
, ddx_el
, src_c0
, interp_el
);
3112 temp2
= ac_build_fmad(&ctx
->ac
, ddy_el
, src_c1
, temp1
);
3114 ij_out
[i
] = LLVMBuildBitCast(ctx
->ac
.builder
,
3115 temp2
, ctx
->ac
.i32
, "");
3117 interp_param
= ac_build_gather_values(&ctx
->ac
, ij_out
, 2);
3118 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3121 static LLVMValueRef
barycentric_centroid(struct ac_nir_context
*ctx
,
3124 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_CENTROID
);
3125 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3128 static LLVMValueRef
barycentric_at_sample(struct ac_nir_context
*ctx
,
3130 LLVMValueRef sample_id
)
3132 if (ctx
->abi
->interp_at_sample_force_center
)
3133 return barycentric_center(ctx
, mode
);
3135 LLVMValueRef halfval
= LLVMConstReal(ctx
->ac
.f32
, 0.5f
);
3137 /* fetch sample ID */
3138 LLVMValueRef sample_pos
= ctx
->abi
->load_sample_position(ctx
->abi
, sample_id
);
3140 LLVMValueRef src_c0
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_0
, "");
3141 src_c0
= LLVMBuildFSub(ctx
->ac
.builder
, src_c0
, halfval
, "");
3142 LLVMValueRef src_c1
= LLVMBuildExtractElement(ctx
->ac
.builder
, sample_pos
, ctx
->ac
.i32_1
, "");
3143 src_c1
= LLVMBuildFSub(ctx
->ac
.builder
, src_c1
, halfval
, "");
3144 LLVMValueRef coords
[] = { src_c0
, src_c1
};
3145 LLVMValueRef offset
= ac_build_gather_values(&ctx
->ac
, coords
, 2);
3147 return barycentric_offset(ctx
, mode
, offset
);
3151 static LLVMValueRef
barycentric_sample(struct ac_nir_context
*ctx
,
3154 LLVMValueRef interp_param
= lookup_interp_param(ctx
, mode
, INTERP_SAMPLE
);
3155 return LLVMBuildBitCast(ctx
->ac
.builder
, interp_param
, ctx
->ac
.v2i32
, "");
3158 static LLVMValueRef
load_interpolated_input(struct ac_nir_context
*ctx
,
3159 LLVMValueRef interp_param
,
3160 unsigned index
, unsigned comp_start
,
3161 unsigned num_components
,
3164 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3166 interp_param
= LLVMBuildBitCast(ctx
->ac
.builder
,
3167 interp_param
, ctx
->ac
.v2f32
, "");
3168 LLVMValueRef i
= LLVMBuildExtractElement(
3169 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_0
, "");
3170 LLVMValueRef j
= LLVMBuildExtractElement(
3171 ctx
->ac
.builder
, interp_param
, ctx
->ac
.i32_1
, "");
3173 LLVMValueRef values
[4];
3174 assert(bitsize
== 16 || bitsize
== 32);
3175 for (unsigned comp
= 0; comp
< num_components
; comp
++) {
3176 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, comp_start
+ comp
, false);
3177 if (bitsize
== 16) {
3178 values
[comp
] = ac_build_fs_interp_f16(&ctx
->ac
, llvm_chan
, attr_number
,
3179 ctx
->abi
->prim_mask
, i
, j
);
3181 values
[comp
] = ac_build_fs_interp(&ctx
->ac
, llvm_chan
, attr_number
,
3182 ctx
->abi
->prim_mask
, i
, j
);
3186 return ac_to_integer(&ctx
->ac
, ac_build_gather_values(&ctx
->ac
, values
, num_components
));
3189 static LLVMValueRef
load_flat_input(struct ac_nir_context
*ctx
,
3190 unsigned index
, unsigned comp_start
,
3191 unsigned num_components
,
3194 LLVMValueRef attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
, false);
3196 LLVMValueRef values
[8];
3198 /* Each component of a 64-bit value takes up two GL-level channels. */
3200 bit_size
== 64 ? num_components
* 2 : num_components
;
3202 for (unsigned chan
= 0; chan
< channels
; chan
++) {
3203 if (comp_start
+ chan
> 4)
3204 attr_number
= LLVMConstInt(ctx
->ac
.i32
, index
+ 1, false);
3205 LLVMValueRef llvm_chan
= LLVMConstInt(ctx
->ac
.i32
, (comp_start
+ chan
) % 4, false);
3206 values
[chan
] = ac_build_fs_interp_mov(&ctx
->ac
,
3207 LLVMConstInt(ctx
->ac
.i32
, 2, false),
3210 ctx
->abi
->prim_mask
);
3211 values
[chan
] = LLVMBuildBitCast(ctx
->ac
.builder
, values
[chan
], ctx
->ac
.i32
, "");
3212 values
[chan
] = LLVMBuildTruncOrBitCast(ctx
->ac
.builder
, values
[chan
],
3213 bit_size
== 16 ? ctx
->ac
.i16
: ctx
->ac
.i32
, "");
3216 LLVMValueRef result
= ac_build_gather_values(&ctx
->ac
, values
, channels
);
3217 if (bit_size
== 64) {
3218 LLVMTypeRef type
= num_components
== 1 ? ctx
->ac
.i64
:
3219 LLVMVectorType(ctx
->ac
.i64
, num_components
);
3220 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
, type
, "");
3225 static void visit_intrinsic(struct ac_nir_context
*ctx
,
3226 nir_intrinsic_instr
*instr
)
3228 LLVMValueRef result
= NULL
;
3230 switch (instr
->intrinsic
) {
3231 case nir_intrinsic_ballot
:
3232 result
= ac_build_ballot(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3233 if (ctx
->ac
.ballot_mask_bits
> ctx
->ac
.wave_size
)
3234 result
= LLVMBuildZExt(ctx
->ac
.builder
, result
, ctx
->ac
.iN_ballotmask
, "");
3236 case nir_intrinsic_read_invocation
:
3237 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3238 get_src(ctx
, instr
->src
[1]));
3240 case nir_intrinsic_read_first_invocation
:
3241 result
= ac_build_readlane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), NULL
);
3243 case nir_intrinsic_load_subgroup_invocation
:
3244 result
= ac_get_thread_id(&ctx
->ac
);
3246 case nir_intrinsic_load_work_group_id
: {
3247 LLVMValueRef values
[3];
3249 for (int i
= 0; i
< 3; i
++) {
3250 values
[i
] = ctx
->abi
->workgroup_ids
[i
] ?
3251 ctx
->abi
->workgroup_ids
[i
] : ctx
->ac
.i32_0
;
3254 result
= ac_build_gather_values(&ctx
->ac
, values
, 3);
3257 case nir_intrinsic_load_base_vertex
:
3258 case nir_intrinsic_load_first_vertex
:
3259 result
= ctx
->abi
->load_base_vertex(ctx
->abi
);
3261 case nir_intrinsic_load_local_group_size
:
3262 result
= ctx
->abi
->load_local_group_size(ctx
->abi
);
3264 case nir_intrinsic_load_vertex_id
:
3265 result
= LLVMBuildAdd(ctx
->ac
.builder
, ctx
->abi
->vertex_id
,
3266 ctx
->abi
->base_vertex
, "");
3268 case nir_intrinsic_load_vertex_id_zero_base
: {
3269 result
= ctx
->abi
->vertex_id
;
3272 case nir_intrinsic_load_local_invocation_id
: {
3273 result
= ctx
->abi
->local_invocation_ids
;
3276 case nir_intrinsic_load_base_instance
:
3277 result
= ctx
->abi
->start_instance
;
3279 case nir_intrinsic_load_draw_id
:
3280 result
= ctx
->abi
->draw_id
;
3282 case nir_intrinsic_load_view_index
:
3283 result
= ctx
->abi
->view_index
;
3285 case nir_intrinsic_load_invocation_id
:
3286 if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3287 result
= ac_unpack_param(&ctx
->ac
, ctx
->abi
->tcs_rel_ids
, 8, 5);
3289 if (ctx
->ac
.chip_class
>= GFX10
) {
3290 result
= LLVMBuildAnd(ctx
->ac
.builder
,
3291 ctx
->abi
->gs_invocation_id
,
3292 LLVMConstInt(ctx
->ac
.i32
, 127, 0), "");
3294 result
= ctx
->abi
->gs_invocation_id
;
3298 case nir_intrinsic_load_primitive_id
:
3299 if (ctx
->stage
== MESA_SHADER_GEOMETRY
) {
3300 result
= ctx
->abi
->gs_prim_id
;
3301 } else if (ctx
->stage
== MESA_SHADER_TESS_CTRL
) {
3302 result
= ctx
->abi
->tcs_patch_id
;
3303 } else if (ctx
->stage
== MESA_SHADER_TESS_EVAL
) {
3304 result
= ctx
->abi
->tes_patch_id
;
3306 fprintf(stderr
, "Unknown primitive id intrinsic: %d", ctx
->stage
);
3308 case nir_intrinsic_load_sample_id
:
3309 result
= ac_unpack_param(&ctx
->ac
, ctx
->abi
->ancillary
, 8, 4);
3311 case nir_intrinsic_load_sample_pos
:
3312 result
= load_sample_pos(ctx
);
3314 case nir_intrinsic_load_sample_mask_in
:
3315 result
= ctx
->abi
->load_sample_mask_in(ctx
->abi
);
3317 case nir_intrinsic_load_frag_coord
: {
3318 LLVMValueRef values
[4] = {
3319 ctx
->abi
->frag_pos
[0],
3320 ctx
->abi
->frag_pos
[1],
3321 ctx
->abi
->frag_pos
[2],
3322 ac_build_fdiv(&ctx
->ac
, ctx
->ac
.f32_1
, ctx
->abi
->frag_pos
[3])
3324 result
= ac_to_integer(&ctx
->ac
,
3325 ac_build_gather_values(&ctx
->ac
, values
, 4));
3328 case nir_intrinsic_load_layer_id
:
3329 result
= ctx
->abi
->inputs
[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER
, 0)];
3331 case nir_intrinsic_load_front_face
:
3332 result
= ctx
->abi
->front_face
;
3334 case nir_intrinsic_load_helper_invocation
:
3335 result
= ac_build_load_helper_invocation(&ctx
->ac
);
3337 case nir_intrinsic_load_color0
:
3338 result
= ctx
->abi
->color0
;
3340 case nir_intrinsic_load_color1
:
3341 result
= ctx
->abi
->color1
;
3343 case nir_intrinsic_load_user_data_amd
:
3344 assert(LLVMTypeOf(ctx
->abi
->user_data
) == ctx
->ac
.v4i32
);
3345 result
= ctx
->abi
->user_data
;
3347 case nir_intrinsic_load_instance_id
:
3348 result
= ctx
->abi
->instance_id
;
3350 case nir_intrinsic_load_num_work_groups
:
3351 result
= ctx
->abi
->num_work_groups
;
3353 case nir_intrinsic_load_local_invocation_index
:
3354 result
= visit_load_local_invocation_index(ctx
);
3356 case nir_intrinsic_load_subgroup_id
:
3357 result
= visit_load_subgroup_id(ctx
);
3359 case nir_intrinsic_load_num_subgroups
:
3360 result
= visit_load_num_subgroups(ctx
);
3362 case nir_intrinsic_first_invocation
:
3363 result
= visit_first_invocation(ctx
);
3365 case nir_intrinsic_load_push_constant
:
3366 result
= visit_load_push_constant(ctx
, instr
);
3368 case nir_intrinsic_vulkan_resource_index
: {
3369 LLVMValueRef index
= get_src(ctx
, instr
->src
[0]);
3370 unsigned desc_set
= nir_intrinsic_desc_set(instr
);
3371 unsigned binding
= nir_intrinsic_binding(instr
);
3373 result
= ctx
->abi
->load_resource(ctx
->abi
, index
, desc_set
,
3377 case nir_intrinsic_vulkan_resource_reindex
:
3378 result
= visit_vulkan_resource_reindex(ctx
, instr
);
3380 case nir_intrinsic_store_ssbo
:
3381 visit_store_ssbo(ctx
, instr
);
3383 case nir_intrinsic_load_ssbo
:
3384 result
= visit_load_buffer(ctx
, instr
);
3386 case nir_intrinsic_ssbo_atomic_add
:
3387 case nir_intrinsic_ssbo_atomic_imin
:
3388 case nir_intrinsic_ssbo_atomic_umin
:
3389 case nir_intrinsic_ssbo_atomic_imax
:
3390 case nir_intrinsic_ssbo_atomic_umax
:
3391 case nir_intrinsic_ssbo_atomic_and
:
3392 case nir_intrinsic_ssbo_atomic_or
:
3393 case nir_intrinsic_ssbo_atomic_xor
:
3394 case nir_intrinsic_ssbo_atomic_exchange
:
3395 case nir_intrinsic_ssbo_atomic_comp_swap
:
3396 result
= visit_atomic_ssbo(ctx
, instr
);
3398 case nir_intrinsic_load_ubo
:
3399 result
= visit_load_ubo_buffer(ctx
, instr
);
3401 case nir_intrinsic_get_buffer_size
:
3402 result
= visit_get_buffer_size(ctx
, instr
);
3404 case nir_intrinsic_load_deref
:
3405 result
= visit_load_var(ctx
, instr
);
3407 case nir_intrinsic_store_deref
:
3408 visit_store_var(ctx
, instr
);
3410 case nir_intrinsic_load_shared
:
3411 result
= visit_load_shared(ctx
, instr
);
3413 case nir_intrinsic_store_shared
:
3414 visit_store_shared(ctx
, instr
);
3416 case nir_intrinsic_bindless_image_samples
:
3417 case nir_intrinsic_image_deref_samples
:
3418 result
= visit_image_samples(ctx
, instr
);
3420 case nir_intrinsic_bindless_image_load
:
3421 result
= visit_image_load(ctx
, instr
, true);
3423 case nir_intrinsic_image_deref_load
:
3424 result
= visit_image_load(ctx
, instr
, false);
3426 case nir_intrinsic_bindless_image_store
:
3427 visit_image_store(ctx
, instr
, true);
3429 case nir_intrinsic_image_deref_store
:
3430 visit_image_store(ctx
, instr
, false);
3432 case nir_intrinsic_bindless_image_atomic_add
:
3433 case nir_intrinsic_bindless_image_atomic_imin
:
3434 case nir_intrinsic_bindless_image_atomic_umin
:
3435 case nir_intrinsic_bindless_image_atomic_imax
:
3436 case nir_intrinsic_bindless_image_atomic_umax
:
3437 case nir_intrinsic_bindless_image_atomic_and
:
3438 case nir_intrinsic_bindless_image_atomic_or
:
3439 case nir_intrinsic_bindless_image_atomic_xor
:
3440 case nir_intrinsic_bindless_image_atomic_exchange
:
3441 case nir_intrinsic_bindless_image_atomic_comp_swap
:
3442 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
3443 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
3444 result
= visit_image_atomic(ctx
, instr
, true);
3446 case nir_intrinsic_image_deref_atomic_add
:
3447 case nir_intrinsic_image_deref_atomic_imin
:
3448 case nir_intrinsic_image_deref_atomic_umin
:
3449 case nir_intrinsic_image_deref_atomic_imax
:
3450 case nir_intrinsic_image_deref_atomic_umax
:
3451 case nir_intrinsic_image_deref_atomic_and
:
3452 case nir_intrinsic_image_deref_atomic_or
:
3453 case nir_intrinsic_image_deref_atomic_xor
:
3454 case nir_intrinsic_image_deref_atomic_exchange
:
3455 case nir_intrinsic_image_deref_atomic_comp_swap
:
3456 case nir_intrinsic_image_deref_atomic_inc_wrap
:
3457 case nir_intrinsic_image_deref_atomic_dec_wrap
:
3458 result
= visit_image_atomic(ctx
, instr
, false);
3460 case nir_intrinsic_bindless_image_size
:
3461 result
= visit_image_size(ctx
, instr
, true);
3463 case nir_intrinsic_image_deref_size
:
3464 result
= visit_image_size(ctx
, instr
, false);
3466 case nir_intrinsic_shader_clock
:
3467 result
= ac_build_shader_clock(&ctx
->ac
);
3469 case nir_intrinsic_discard
:
3470 case nir_intrinsic_discard_if
:
3471 emit_discard(ctx
, instr
);
3473 case nir_intrinsic_memory_barrier
:
3474 case nir_intrinsic_group_memory_barrier
:
3475 case nir_intrinsic_memory_barrier_atomic_counter
:
3476 case nir_intrinsic_memory_barrier_buffer
:
3477 case nir_intrinsic_memory_barrier_image
:
3478 case nir_intrinsic_memory_barrier_shared
:
3479 emit_membar(&ctx
->ac
, instr
);
3481 case nir_intrinsic_barrier
:
3482 ac_emit_barrier(&ctx
->ac
, ctx
->stage
);
3484 case nir_intrinsic_shared_atomic_add
:
3485 case nir_intrinsic_shared_atomic_imin
:
3486 case nir_intrinsic_shared_atomic_umin
:
3487 case nir_intrinsic_shared_atomic_imax
:
3488 case nir_intrinsic_shared_atomic_umax
:
3489 case nir_intrinsic_shared_atomic_and
:
3490 case nir_intrinsic_shared_atomic_or
:
3491 case nir_intrinsic_shared_atomic_xor
:
3492 case nir_intrinsic_shared_atomic_exchange
:
3493 case nir_intrinsic_shared_atomic_comp_swap
: {
3494 LLVMValueRef ptr
= get_memory_ptr(ctx
, instr
->src
[0]);
3495 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3498 case nir_intrinsic_deref_atomic_add
:
3499 case nir_intrinsic_deref_atomic_imin
:
3500 case nir_intrinsic_deref_atomic_umin
:
3501 case nir_intrinsic_deref_atomic_imax
:
3502 case nir_intrinsic_deref_atomic_umax
:
3503 case nir_intrinsic_deref_atomic_and
:
3504 case nir_intrinsic_deref_atomic_or
:
3505 case nir_intrinsic_deref_atomic_xor
:
3506 case nir_intrinsic_deref_atomic_exchange
:
3507 case nir_intrinsic_deref_atomic_comp_swap
: {
3508 LLVMValueRef ptr
= get_src(ctx
, instr
->src
[0]);
3509 result
= visit_var_atomic(ctx
, instr
, ptr
, 1);
3512 case nir_intrinsic_load_barycentric_pixel
:
3513 result
= barycentric_center(ctx
, nir_intrinsic_interp_mode(instr
));
3515 case nir_intrinsic_load_barycentric_centroid
:
3516 result
= barycentric_centroid(ctx
, nir_intrinsic_interp_mode(instr
));
3518 case nir_intrinsic_load_barycentric_sample
:
3519 result
= barycentric_sample(ctx
, nir_intrinsic_interp_mode(instr
));
3521 case nir_intrinsic_load_barycentric_at_offset
: {
3522 LLVMValueRef offset
= ac_to_float(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3523 result
= barycentric_offset(ctx
, nir_intrinsic_interp_mode(instr
), offset
);
3526 case nir_intrinsic_load_barycentric_at_sample
: {
3527 LLVMValueRef sample_id
= get_src(ctx
, instr
->src
[0]);
3528 result
= barycentric_at_sample(ctx
, nir_intrinsic_interp_mode(instr
), sample_id
);
3531 case nir_intrinsic_load_interpolated_input
: {
3532 /* We assume any indirect loads have been lowered away */
3533 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[1]);
3535 assert(offset
[0].i32
== 0);
3537 LLVMValueRef interp_param
= get_src(ctx
, instr
->src
[0]);
3538 unsigned index
= nir_intrinsic_base(instr
);
3539 unsigned component
= nir_intrinsic_component(instr
);
3540 result
= load_interpolated_input(ctx
, interp_param
, index
,
3542 instr
->dest
.ssa
.num_components
,
3543 instr
->dest
.ssa
.bit_size
);
3546 case nir_intrinsic_load_input
: {
3547 /* We only lower inputs for fragment shaders ATM */
3548 ASSERTED nir_const_value
*offset
= nir_src_as_const_value(instr
->src
[0]);
3550 assert(offset
[0].i32
== 0);
3552 unsigned index
= nir_intrinsic_base(instr
);
3553 unsigned component
= nir_intrinsic_component(instr
);
3554 result
= load_flat_input(ctx
, index
, component
,
3555 instr
->dest
.ssa
.num_components
,
3556 instr
->dest
.ssa
.bit_size
);
3559 case nir_intrinsic_emit_vertex
:
3560 ctx
->abi
->emit_vertex(ctx
->abi
, nir_intrinsic_stream_id(instr
), ctx
->abi
->outputs
);
3562 case nir_intrinsic_end_primitive
:
3563 ctx
->abi
->emit_primitive(ctx
->abi
, nir_intrinsic_stream_id(instr
));
3565 case nir_intrinsic_load_tess_coord
:
3566 result
= ctx
->abi
->load_tess_coord(ctx
->abi
);
3568 case nir_intrinsic_load_tess_level_outer
:
3569 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, false);
3571 case nir_intrinsic_load_tess_level_inner
:
3572 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, false);
3574 case nir_intrinsic_load_tess_level_outer_default
:
3575 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_OUTER
, true);
3577 case nir_intrinsic_load_tess_level_inner_default
:
3578 result
= ctx
->abi
->load_tess_level(ctx
->abi
, VARYING_SLOT_TESS_LEVEL_INNER
, true);
3580 case nir_intrinsic_load_patch_vertices_in
:
3581 result
= ctx
->abi
->load_patch_vertices_in(ctx
->abi
);
3583 case nir_intrinsic_vote_all
: {
3584 LLVMValueRef tmp
= ac_build_vote_all(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3585 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3588 case nir_intrinsic_vote_any
: {
3589 LLVMValueRef tmp
= ac_build_vote_any(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3590 result
= LLVMBuildSExt(ctx
->ac
.builder
, tmp
, ctx
->ac
.i32
, "");
3593 case nir_intrinsic_shuffle
:
3594 result
= ac_build_shuffle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3595 get_src(ctx
, instr
->src
[1]));
3597 case nir_intrinsic_reduce
:
3598 result
= ac_build_reduce(&ctx
->ac
,
3599 get_src(ctx
, instr
->src
[0]),
3600 instr
->const_index
[0],
3601 instr
->const_index
[1]);
3603 case nir_intrinsic_inclusive_scan
:
3604 result
= ac_build_inclusive_scan(&ctx
->ac
,
3605 get_src(ctx
, instr
->src
[0]),
3606 instr
->const_index
[0]);
3608 case nir_intrinsic_exclusive_scan
:
3609 result
= ac_build_exclusive_scan(&ctx
->ac
,
3610 get_src(ctx
, instr
->src
[0]),
3611 instr
->const_index
[0]);
3613 case nir_intrinsic_quad_broadcast
: {
3614 unsigned lane
= nir_src_as_uint(instr
->src
[1]);
3615 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3616 lane
, lane
, lane
, lane
);
3619 case nir_intrinsic_quad_swap_horizontal
:
3620 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 1, 0, 3 ,2);
3622 case nir_intrinsic_quad_swap_vertical
:
3623 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 2, 3, 0 ,1);
3625 case nir_intrinsic_quad_swap_diagonal
:
3626 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), 3, 2, 1 ,0);
3628 case nir_intrinsic_quad_swizzle_amd
: {
3629 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
3630 result
= ac_build_quad_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3631 mask
& 0x3, (mask
>> 2) & 0x3,
3632 (mask
>> 4) & 0x3, (mask
>> 6) & 0x3);
3635 case nir_intrinsic_masked_swizzle_amd
: {
3636 uint32_t mask
= nir_intrinsic_swizzle_mask(instr
);
3637 result
= ac_build_ds_swizzle(&ctx
->ac
, get_src(ctx
, instr
->src
[0]), mask
);
3640 case nir_intrinsic_write_invocation_amd
:
3641 result
= ac_build_writelane(&ctx
->ac
, get_src(ctx
, instr
->src
[0]),
3642 get_src(ctx
, instr
->src
[1]),
3643 get_src(ctx
, instr
->src
[2]));
3645 case nir_intrinsic_mbcnt_amd
:
3646 result
= ac_build_mbcnt(&ctx
->ac
, get_src(ctx
, instr
->src
[0]));
3648 case nir_intrinsic_load_scratch
: {
3649 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
3650 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
3652 LLVMTypeRef comp_type
=
3653 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
3654 LLVMTypeRef vec_type
=
3655 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
3656 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
3657 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3658 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3659 LLVMPointerType(vec_type
, addr_space
), "");
3660 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
3663 case nir_intrinsic_store_scratch
: {
3664 LLVMValueRef offset
= get_src(ctx
, instr
->src
[1]);
3665 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->scratch
,
3667 LLVMTypeRef comp_type
=
3668 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->src
[0].ssa
->bit_size
);
3669 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3670 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3671 LLVMPointerType(comp_type
, addr_space
), "");
3672 LLVMValueRef src
= get_src(ctx
, instr
->src
[0]);
3673 unsigned wrmask
= nir_intrinsic_write_mask(instr
);
3676 u_bit_scan_consecutive_range(&wrmask
, &start
, &count
);
3678 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, start
, false);
3679 LLVMValueRef offset_ptr
= LLVMBuildGEP(ctx
->ac
.builder
, ptr
, &offset
, 1, "");
3680 LLVMTypeRef vec_type
=
3681 count
== 1 ? comp_type
: LLVMVectorType(comp_type
, count
);
3682 offset_ptr
= LLVMBuildBitCast(ctx
->ac
.builder
,
3684 LLVMPointerType(vec_type
, addr_space
),
3686 LLVMValueRef offset_src
=
3687 ac_extract_components(&ctx
->ac
, src
, start
, count
);
3688 LLVMBuildStore(ctx
->ac
.builder
, offset_src
, offset_ptr
);
3692 case nir_intrinsic_load_constant
: {
3693 LLVMValueRef offset
= get_src(ctx
, instr
->src
[0]);
3694 LLVMValueRef base
= LLVMConstInt(ctx
->ac
.i32
,
3695 nir_intrinsic_base(instr
),
3697 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, base
, "");
3698 LLVMValueRef ptr
= ac_build_gep0(&ctx
->ac
, ctx
->constant_data
,
3700 LLVMTypeRef comp_type
=
3701 LLVMIntTypeInContext(ctx
->ac
.context
, instr
->dest
.ssa
.bit_size
);
3702 LLVMTypeRef vec_type
=
3703 instr
->dest
.ssa
.num_components
== 1 ? comp_type
:
3704 LLVMVectorType(comp_type
, instr
->dest
.ssa
.num_components
);
3705 unsigned addr_space
= LLVMGetPointerAddressSpace(LLVMTypeOf(ptr
));
3706 ptr
= LLVMBuildBitCast(ctx
->ac
.builder
, ptr
,
3707 LLVMPointerType(vec_type
, addr_space
), "");
3708 result
= LLVMBuildLoad(ctx
->ac
.builder
, ptr
, "");
3712 fprintf(stderr
, "Unknown intrinsic: ");
3713 nir_print_instr(&instr
->instr
, stderr
);
3714 fprintf(stderr
, "\n");
3718 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
3722 static LLVMValueRef
get_bindless_index_from_uniform(struct ac_nir_context
*ctx
,
3723 unsigned base_index
,
3724 unsigned constant_index
,
3725 LLVMValueRef dynamic_index
)
3727 LLVMValueRef offset
= LLVMConstInt(ctx
->ac
.i32
, base_index
* 4, 0);
3728 LLVMValueRef index
= LLVMBuildAdd(ctx
->ac
.builder
, dynamic_index
,
3729 LLVMConstInt(ctx
->ac
.i32
, constant_index
, 0), "");
3731 /* Bindless uniforms are 64bit so multiple index by 8 */
3732 index
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i32
, 8, 0), "");
3733 offset
= LLVMBuildAdd(ctx
->ac
.builder
, offset
, index
, "");
3735 LLVMValueRef ubo_index
= ctx
->abi
->load_ubo(ctx
->abi
, ctx
->ac
.i32_0
);
3737 LLVMValueRef ret
= ac_build_buffer_load(&ctx
->ac
, ubo_index
, 1, NULL
, offset
,
3738 NULL
, 0, 0, true, true);
3740 return LLVMBuildBitCast(ctx
->ac
.builder
, ret
, ctx
->ac
.i32
, "");
3743 static LLVMValueRef
get_sampler_desc(struct ac_nir_context
*ctx
,
3744 nir_deref_instr
*deref_instr
,
3745 enum ac_descriptor_type desc_type
,
3746 const nir_instr
*instr
,
3747 bool image
, bool write
)
3749 LLVMValueRef index
= NULL
;
3750 unsigned constant_index
= 0;
3751 unsigned descriptor_set
;
3752 unsigned base_index
;
3753 bool bindless
= false;
3758 nir_intrinsic_instr
*img_instr
= nir_instr_as_intrinsic(instr
);
3761 index
= get_src(ctx
, img_instr
->src
[0]);
3763 nir_tex_instr
*tex_instr
= nir_instr_as_tex(instr
);
3764 int sampSrcIdx
= nir_tex_instr_src_index(tex_instr
,
3765 nir_tex_src_sampler_handle
);
3766 if (sampSrcIdx
!= -1) {
3769 index
= get_src(ctx
, tex_instr
->src
[sampSrcIdx
].src
);
3771 assert(tex_instr
&& !image
);
3772 base_index
= tex_instr
->sampler_index
;
3776 while(deref_instr
->deref_type
!= nir_deref_type_var
) {
3777 if (deref_instr
->deref_type
== nir_deref_type_array
) {
3778 unsigned array_size
= glsl_get_aoa_size(deref_instr
->type
);
3782 if (nir_src_is_const(deref_instr
->arr
.index
)) {
3783 constant_index
+= array_size
* nir_src_as_uint(deref_instr
->arr
.index
);
3785 LLVMValueRef indirect
= get_src(ctx
, deref_instr
->arr
.index
);
3787 indirect
= LLVMBuildMul(ctx
->ac
.builder
, indirect
,
3788 LLVMConstInt(ctx
->ac
.i32
, array_size
, false), "");
3793 index
= LLVMBuildAdd(ctx
->ac
.builder
, index
, indirect
, "");
3796 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
3797 } else if (deref_instr
->deref_type
== nir_deref_type_struct
) {
3798 unsigned sidx
= deref_instr
->strct
.index
;
3799 deref_instr
= nir_src_as_deref(deref_instr
->parent
);
3800 constant_index
+= glsl_get_struct_location_offset(deref_instr
->type
, sidx
);
3802 unreachable("Unsupported deref type");
3805 descriptor_set
= deref_instr
->var
->data
.descriptor_set
;
3807 if (deref_instr
->var
->data
.bindless
) {
3808 /* For now just assert on unhandled variable types */
3809 assert(deref_instr
->var
->data
.mode
== nir_var_uniform
);
3811 base_index
= deref_instr
->var
->data
.driver_location
;
3814 index
= index
? index
: ctx
->ac
.i32_0
;
3815 index
= get_bindless_index_from_uniform(ctx
, base_index
,
3816 constant_index
, index
);
3818 base_index
= deref_instr
->var
->data
.binding
;
3821 return ctx
->abi
->load_sampler_desc(ctx
->abi
,
3824 constant_index
, index
,
3825 desc_type
, image
, write
, bindless
);
3828 /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
3831 * If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
3832 * filtering manually. The driver sets img7 to a mask clearing
3833 * MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
3834 * s_and_b32 samp0, samp0, img7
3837 * The ANISO_OVERRIDE sampler field enables this fix in TA.
3839 static LLVMValueRef
sici_fix_sampler_aniso(struct ac_nir_context
*ctx
,
3840 LLVMValueRef res
, LLVMValueRef samp
)
3842 LLVMBuilderRef builder
= ctx
->ac
.builder
;
3843 LLVMValueRef img7
, samp0
;
3845 if (ctx
->ac
.chip_class
>= GFX8
)
3848 img7
= LLVMBuildExtractElement(builder
, res
,
3849 LLVMConstInt(ctx
->ac
.i32
, 7, 0), "");
3850 samp0
= LLVMBuildExtractElement(builder
, samp
,
3851 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
3852 samp0
= LLVMBuildAnd(builder
, samp0
, img7
, "");
3853 return LLVMBuildInsertElement(builder
, samp
, samp0
,
3854 LLVMConstInt(ctx
->ac
.i32
, 0, 0), "");
3857 static void tex_fetch_ptrs(struct ac_nir_context
*ctx
,
3858 nir_tex_instr
*instr
,
3859 LLVMValueRef
*res_ptr
, LLVMValueRef
*samp_ptr
,
3860 LLVMValueRef
*fmask_ptr
)
3862 nir_deref_instr
*texture_deref_instr
= NULL
;
3863 nir_deref_instr
*sampler_deref_instr
= NULL
;
3866 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
3867 switch (instr
->src
[i
].src_type
) {
3868 case nir_tex_src_texture_deref
:
3869 texture_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
3871 case nir_tex_src_sampler_deref
:
3872 sampler_deref_instr
= nir_src_as_deref(instr
->src
[i
].src
);
3874 case nir_tex_src_plane
:
3875 plane
= nir_src_as_int(instr
->src
[i
].src
);
3882 if (!sampler_deref_instr
)
3883 sampler_deref_instr
= texture_deref_instr
;
3885 enum ac_descriptor_type main_descriptor
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
? AC_DESC_BUFFER
: AC_DESC_IMAGE
;
3888 assert(instr
->op
!= nir_texop_txf_ms
&&
3889 instr
->op
!= nir_texop_samples_identical
);
3890 assert(instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
);
3892 main_descriptor
= AC_DESC_PLANE_0
+ plane
;
3895 *res_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, main_descriptor
, &instr
->instr
, false, false);
3898 *samp_ptr
= get_sampler_desc(ctx
, sampler_deref_instr
, AC_DESC_SAMPLER
, &instr
->instr
, false, false);
3899 if (instr
->sampler_dim
< GLSL_SAMPLER_DIM_RECT
)
3900 *samp_ptr
= sici_fix_sampler_aniso(ctx
, *res_ptr
, *samp_ptr
);
3902 if (fmask_ptr
&& (instr
->op
== nir_texop_txf_ms
||
3903 instr
->op
== nir_texop_samples_identical
))
3904 *fmask_ptr
= get_sampler_desc(ctx
, texture_deref_instr
, AC_DESC_FMASK
, &instr
->instr
, false, false);
3907 static LLVMValueRef
apply_round_slice(struct ac_llvm_context
*ctx
,
3910 coord
= ac_to_float(ctx
, coord
);
3911 coord
= ac_build_round(ctx
, coord
);
3912 coord
= ac_to_integer(ctx
, coord
);
3916 static void visit_tex(struct ac_nir_context
*ctx
, nir_tex_instr
*instr
)
3918 LLVMValueRef result
= NULL
;
3919 struct ac_image_args args
= { 0 };
3920 LLVMValueRef fmask_ptr
= NULL
, sample_index
= NULL
;
3921 LLVMValueRef ddx
= NULL
, ddy
= NULL
;
3922 unsigned offset_src
= 0;
3924 tex_fetch_ptrs(ctx
, instr
, &args
.resource
, &args
.sampler
, &fmask_ptr
);
3926 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
3927 switch (instr
->src
[i
].src_type
) {
3928 case nir_tex_src_coord
: {
3929 LLVMValueRef coord
= get_src(ctx
, instr
->src
[i
].src
);
3930 for (unsigned chan
= 0; chan
< instr
->coord_components
; ++chan
)
3931 args
.coords
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, coord
, chan
);
3934 case nir_tex_src_projector
:
3936 case nir_tex_src_comparator
:
3937 if (instr
->is_shadow
) {
3938 args
.compare
= get_src(ctx
, instr
->src
[i
].src
);
3939 args
.compare
= ac_to_float(&ctx
->ac
, args
.compare
);
3942 case nir_tex_src_offset
:
3943 args
.offset
= get_src(ctx
, instr
->src
[i
].src
);
3946 case nir_tex_src_bias
:
3947 if (instr
->op
== nir_texop_txb
)
3948 args
.bias
= get_src(ctx
, instr
->src
[i
].src
);
3950 case nir_tex_src_lod
: {
3951 if (nir_src_is_const(instr
->src
[i
].src
) && nir_src_as_uint(instr
->src
[i
].src
) == 0)
3952 args
.level_zero
= true;
3954 args
.lod
= get_src(ctx
, instr
->src
[i
].src
);
3957 case nir_tex_src_ms_index
:
3958 sample_index
= get_src(ctx
, instr
->src
[i
].src
);
3960 case nir_tex_src_ms_mcs
:
3962 case nir_tex_src_ddx
:
3963 ddx
= get_src(ctx
, instr
->src
[i
].src
);
3965 case nir_tex_src_ddy
:
3966 ddy
= get_src(ctx
, instr
->src
[i
].src
);
3968 case nir_tex_src_texture_offset
:
3969 case nir_tex_src_sampler_offset
:
3970 case nir_tex_src_plane
:
3976 if (instr
->op
== nir_texop_txs
&& instr
->sampler_dim
== GLSL_SAMPLER_DIM_BUF
) {
3977 result
= get_buffer_size(ctx
, args
.resource
, true);
3981 if (instr
->op
== nir_texop_texture_samples
) {
3982 LLVMValueRef res
, samples
, is_msaa
;
3983 res
= LLVMBuildBitCast(ctx
->ac
.builder
, args
.resource
, ctx
->ac
.v8i32
, "");
3984 samples
= LLVMBuildExtractElement(ctx
->ac
.builder
, res
,
3985 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
3986 is_msaa
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
3987 LLVMConstInt(ctx
->ac
.i32
, 28, false), "");
3988 is_msaa
= LLVMBuildAnd(ctx
->ac
.builder
, is_msaa
,
3989 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
3990 is_msaa
= LLVMBuildICmp(ctx
->ac
.builder
, LLVMIntEQ
, is_msaa
,
3991 LLVMConstInt(ctx
->ac
.i32
, 0xe, false), "");
3993 samples
= LLVMBuildLShr(ctx
->ac
.builder
, samples
,
3994 LLVMConstInt(ctx
->ac
.i32
, 16, false), "");
3995 samples
= LLVMBuildAnd(ctx
->ac
.builder
, samples
,
3996 LLVMConstInt(ctx
->ac
.i32
, 0xf, false), "");
3997 samples
= LLVMBuildShl(ctx
->ac
.builder
, ctx
->ac
.i32_1
,
3999 samples
= LLVMBuildSelect(ctx
->ac
.builder
, is_msaa
, samples
,
4005 if (args
.offset
&& instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4006 LLVMValueRef offset
[3], pack
;
4007 for (unsigned chan
= 0; chan
< 3; ++chan
)
4008 offset
[chan
] = ctx
->ac
.i32_0
;
4010 unsigned num_components
= ac_get_llvm_num_components(args
.offset
);
4011 for (unsigned chan
= 0; chan
< num_components
; chan
++) {
4012 offset
[chan
] = ac_llvm_extract_elem(&ctx
->ac
, args
.offset
, chan
);
4013 offset
[chan
] = LLVMBuildAnd(ctx
->ac
.builder
, offset
[chan
],
4014 LLVMConstInt(ctx
->ac
.i32
, 0x3f, false), "");
4016 offset
[chan
] = LLVMBuildShl(ctx
->ac
.builder
, offset
[chan
],
4017 LLVMConstInt(ctx
->ac
.i32
, chan
* 8, false), "");
4019 pack
= LLVMBuildOr(ctx
->ac
.builder
, offset
[0], offset
[1], "");
4020 pack
= LLVMBuildOr(ctx
->ac
.builder
, pack
, offset
[2], "");
4024 /* Section 8.23.1 (Depth Texture Comparison Mode) of the
4025 * OpenGL 4.5 spec says:
4027 * "If the texture’s internal format indicates a fixed-point
4028 * depth texture, then D_t and D_ref are clamped to the
4029 * range [0, 1]; otherwise no clamping is performed."
4031 * TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
4032 * so the depth comparison value isn't clamped for Z16 and
4033 * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
4034 * an explicitly clamped 32-bit float format.
4037 ctx
->ac
.chip_class
>= GFX8
&&
4038 ctx
->ac
.chip_class
<= GFX9
&&
4039 ctx
->abi
->clamp_shadow_reference
) {
4040 LLVMValueRef upgraded
, clamped
;
4042 upgraded
= LLVMBuildExtractElement(ctx
->ac
.builder
, args
.sampler
,
4043 LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4044 upgraded
= LLVMBuildLShr(ctx
->ac
.builder
, upgraded
,
4045 LLVMConstInt(ctx
->ac
.i32
, 29, false), "");
4046 upgraded
= LLVMBuildTrunc(ctx
->ac
.builder
, upgraded
, ctx
->ac
.i1
, "");
4047 clamped
= ac_build_clamp(&ctx
->ac
, args
.compare
);
4048 args
.compare
= LLVMBuildSelect(ctx
->ac
.builder
, upgraded
, clamped
,
4052 /* pack derivatives */
4054 int num_src_deriv_channels
, num_dest_deriv_channels
;
4055 switch (instr
->sampler_dim
) {
4056 case GLSL_SAMPLER_DIM_3D
:
4057 case GLSL_SAMPLER_DIM_CUBE
:
4058 num_src_deriv_channels
= 3;
4059 num_dest_deriv_channels
= 3;
4061 case GLSL_SAMPLER_DIM_2D
:
4063 num_src_deriv_channels
= 2;
4064 num_dest_deriv_channels
= 2;
4066 case GLSL_SAMPLER_DIM_1D
:
4067 num_src_deriv_channels
= 1;
4068 if (ctx
->ac
.chip_class
== GFX9
) {
4069 num_dest_deriv_channels
= 2;
4071 num_dest_deriv_channels
= 1;
4076 for (unsigned i
= 0; i
< num_src_deriv_channels
; i
++) {
4077 args
.derivs
[i
] = ac_to_float(&ctx
->ac
,
4078 ac_llvm_extract_elem(&ctx
->ac
, ddx
, i
));
4079 args
.derivs
[num_dest_deriv_channels
+ i
] = ac_to_float(&ctx
->ac
,
4080 ac_llvm_extract_elem(&ctx
->ac
, ddy
, i
));
4082 for (unsigned i
= num_src_deriv_channels
; i
< num_dest_deriv_channels
; i
++) {
4083 args
.derivs
[i
] = ctx
->ac
.f32_0
;
4084 args
.derivs
[num_dest_deriv_channels
+ i
] = ctx
->ac
.f32_0
;
4088 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&& args
.coords
[0]) {
4089 for (unsigned chan
= 0; chan
< instr
->coord_components
; chan
++)
4090 args
.coords
[chan
] = ac_to_float(&ctx
->ac
, args
.coords
[chan
]);
4091 if (instr
->coord_components
== 3)
4092 args
.coords
[3] = LLVMGetUndef(ctx
->ac
.f32
);
4093 ac_prepare_cube_coords(&ctx
->ac
,
4094 instr
->op
== nir_texop_txd
, instr
->is_array
,
4095 instr
->op
== nir_texop_lod
, args
.coords
, args
.derivs
);
4098 /* Texture coordinates fixups */
4099 if (instr
->coord_components
> 1 &&
4100 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4102 instr
->op
!= nir_texop_txf
) {
4103 args
.coords
[1] = apply_round_slice(&ctx
->ac
, args
.coords
[1]);
4106 if (instr
->coord_components
> 2 &&
4107 (instr
->sampler_dim
== GLSL_SAMPLER_DIM_2D
||
4108 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
||
4109 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS
||
4110 instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
) &&
4112 instr
->op
!= nir_texop_txf
&& instr
->op
!= nir_texop_txf_ms
) {
4113 args
.coords
[2] = apply_round_slice(&ctx
->ac
, args
.coords
[2]);
4116 if (ctx
->ac
.chip_class
== GFX9
&&
4117 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4118 instr
->op
!= nir_texop_lod
) {
4119 LLVMValueRef filler
;
4120 if (instr
->op
== nir_texop_txf
)
4121 filler
= ctx
->ac
.i32_0
;
4123 filler
= LLVMConstReal(ctx
->ac
.f32
, 0.5);
4125 if (instr
->is_array
)
4126 args
.coords
[2] = args
.coords
[1];
4127 args
.coords
[1] = filler
;
4130 /* Pack sample index */
4131 if (instr
->op
== nir_texop_txf_ms
&& sample_index
)
4132 args
.coords
[instr
->coord_components
] = sample_index
;
4134 if (instr
->op
== nir_texop_samples_identical
) {
4135 struct ac_image_args txf_args
= { 0 };
4136 memcpy(txf_args
.coords
, args
.coords
, sizeof(txf_args
.coords
));
4138 txf_args
.dmask
= 0xf;
4139 txf_args
.resource
= fmask_ptr
;
4140 txf_args
.dim
= instr
->is_array
? ac_image_2darray
: ac_image_2d
;
4141 result
= build_tex_intrinsic(ctx
, instr
, &txf_args
);
4143 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4144 result
= emit_int_cmp(&ctx
->ac
, LLVMIntEQ
, result
, ctx
->ac
.i32_0
);
4148 if ((instr
->sampler_dim
== GLSL_SAMPLER_DIM_SUBPASS_MS
||
4149 instr
->sampler_dim
== GLSL_SAMPLER_DIM_MS
) &&
4150 instr
->op
!= nir_texop_txs
) {
4151 unsigned sample_chan
= instr
->is_array
? 3 : 2;
4152 args
.coords
[sample_chan
] = adjust_sample_index_using_fmask(
4153 &ctx
->ac
, args
.coords
[0], args
.coords
[1],
4154 instr
->is_array
? args
.coords
[2] : NULL
,
4155 args
.coords
[sample_chan
], fmask_ptr
);
4158 if (args
.offset
&& (instr
->op
== nir_texop_txf
|| instr
->op
== nir_texop_txf_ms
)) {
4159 int num_offsets
= instr
->src
[offset_src
].src
.ssa
->num_components
;
4160 num_offsets
= MIN2(num_offsets
, instr
->coord_components
);
4161 for (unsigned i
= 0; i
< num_offsets
; ++i
) {
4162 args
.coords
[i
] = LLVMBuildAdd(
4163 ctx
->ac
.builder
, args
.coords
[i
],
4164 LLVMConstInt(ctx
->ac
.i32
, nir_src_comp_as_uint(instr
->src
[offset_src
].src
, i
), false), "");
4169 /* DMASK was repurposed for GATHER4. 4 components are always
4170 * returned and DMASK works like a swizzle - it selects
4171 * the component to fetch. The only valid DMASK values are
4172 * 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
4173 * (red,red,red,red) etc.) The ISA document doesn't mention
4177 if (instr
->op
== nir_texop_tg4
) {
4178 if (instr
->is_shadow
)
4181 args
.dmask
= 1 << instr
->component
;
4184 if (instr
->sampler_dim
!= GLSL_SAMPLER_DIM_BUF
) {
4185 args
.dim
= ac_get_sampler_dim(ctx
->ac
.chip_class
, instr
->sampler_dim
, instr
->is_array
);
4186 args
.unorm
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
4188 result
= build_tex_intrinsic(ctx
, instr
, &args
);
4190 if (instr
->op
== nir_texop_query_levels
)
4191 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, LLVMConstInt(ctx
->ac
.i32
, 3, false), "");
4192 else if (instr
->is_shadow
&& instr
->is_new_style_shadow
&&
4193 instr
->op
!= nir_texop_txs
&& instr
->op
!= nir_texop_lod
&&
4194 instr
->op
!= nir_texop_tg4
)
4195 result
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, ctx
->ac
.i32_0
, "");
4196 else if (instr
->op
== nir_texop_txs
&&
4197 instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
4199 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4200 LLVMValueRef six
= LLVMConstInt(ctx
->ac
.i32
, 6, false);
4201 LLVMValueRef z
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4202 z
= LLVMBuildSDiv(ctx
->ac
.builder
, z
, six
, "");
4203 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, z
, two
, "");
4204 } else if (ctx
->ac
.chip_class
== GFX9
&&
4205 instr
->op
== nir_texop_txs
&&
4206 instr
->sampler_dim
== GLSL_SAMPLER_DIM_1D
&&
4208 LLVMValueRef two
= LLVMConstInt(ctx
->ac
.i32
, 2, false);
4209 LLVMValueRef layers
= LLVMBuildExtractElement(ctx
->ac
.builder
, result
, two
, "");
4210 result
= LLVMBuildInsertElement(ctx
->ac
.builder
, result
, layers
,
4212 } else if (instr
->dest
.ssa
.num_components
!= 4)
4213 result
= ac_trim_vector(&ctx
->ac
, result
, instr
->dest
.ssa
.num_components
);
4217 assert(instr
->dest
.is_ssa
);
4218 result
= ac_to_integer(&ctx
->ac
, result
);
4219 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4224 static void visit_phi(struct ac_nir_context
*ctx
, nir_phi_instr
*instr
)
4226 LLVMTypeRef type
= get_def_type(ctx
, &instr
->dest
.ssa
);
4227 LLVMValueRef result
= LLVMBuildPhi(ctx
->ac
.builder
, type
, "");
4229 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4230 _mesa_hash_table_insert(ctx
->phis
, instr
, result
);
4233 static void visit_post_phi(struct ac_nir_context
*ctx
,
4234 nir_phi_instr
*instr
,
4235 LLVMValueRef llvm_phi
)
4237 nir_foreach_phi_src(src
, instr
) {
4238 LLVMBasicBlockRef block
= get_block(ctx
, src
->pred
);
4239 LLVMValueRef llvm_src
= get_src(ctx
, src
->src
);
4241 LLVMAddIncoming(llvm_phi
, &llvm_src
, &block
, 1);
4245 static void phi_post_pass(struct ac_nir_context
*ctx
)
4247 hash_table_foreach(ctx
->phis
, entry
) {
4248 visit_post_phi(ctx
, (nir_phi_instr
*)entry
->key
,
4249 (LLVMValueRef
)entry
->data
);
4254 static void visit_ssa_undef(struct ac_nir_context
*ctx
,
4255 const nir_ssa_undef_instr
*instr
)
4257 unsigned num_components
= instr
->def
.num_components
;
4258 LLVMTypeRef type
= LLVMIntTypeInContext(ctx
->ac
.context
, instr
->def
.bit_size
);
4261 if (num_components
== 1)
4262 undef
= LLVMGetUndef(type
);
4264 undef
= LLVMGetUndef(LLVMVectorType(type
, num_components
));
4266 ctx
->ssa_defs
[instr
->def
.index
] = undef
;
4269 static void visit_jump(struct ac_llvm_context
*ctx
,
4270 const nir_jump_instr
*instr
)
4272 switch (instr
->type
) {
4273 case nir_jump_break
:
4274 ac_build_break(ctx
);
4276 case nir_jump_continue
:
4277 ac_build_continue(ctx
);
4280 fprintf(stderr
, "Unknown NIR jump instr: ");
4281 nir_print_instr(&instr
->instr
, stderr
);
4282 fprintf(stderr
, "\n");
4288 glsl_base_to_llvm_type(struct ac_llvm_context
*ac
,
4289 enum glsl_base_type type
)
4293 case GLSL_TYPE_UINT
:
4294 case GLSL_TYPE_BOOL
:
4295 case GLSL_TYPE_SUBROUTINE
:
4297 case GLSL_TYPE_INT8
:
4298 case GLSL_TYPE_UINT8
:
4300 case GLSL_TYPE_INT16
:
4301 case GLSL_TYPE_UINT16
:
4303 case GLSL_TYPE_FLOAT
:
4305 case GLSL_TYPE_FLOAT16
:
4307 case GLSL_TYPE_INT64
:
4308 case GLSL_TYPE_UINT64
:
4310 case GLSL_TYPE_DOUBLE
:
4313 unreachable("unknown GLSL type");
4318 glsl_to_llvm_type(struct ac_llvm_context
*ac
,
4319 const struct glsl_type
*type
)
4321 if (glsl_type_is_scalar(type
)) {
4322 return glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
));
4325 if (glsl_type_is_vector(type
)) {
4326 return LLVMVectorType(
4327 glsl_base_to_llvm_type(ac
, glsl_get_base_type(type
)),
4328 glsl_get_vector_elements(type
));
4331 if (glsl_type_is_matrix(type
)) {
4332 return LLVMArrayType(
4333 glsl_to_llvm_type(ac
, glsl_get_column_type(type
)),
4334 glsl_get_matrix_columns(type
));
4337 if (glsl_type_is_array(type
)) {
4338 return LLVMArrayType(
4339 glsl_to_llvm_type(ac
, glsl_get_array_element(type
)),
4340 glsl_get_length(type
));
4343 assert(glsl_type_is_struct_or_ifc(type
));
4345 LLVMTypeRef member_types
[glsl_get_length(type
)];
4347 for (unsigned i
= 0; i
< glsl_get_length(type
); i
++) {
4349 glsl_to_llvm_type(ac
,
4350 glsl_get_struct_field(type
, i
));
4353 return LLVMStructTypeInContext(ac
->context
, member_types
,
4354 glsl_get_length(type
), false);
4357 static void visit_deref(struct ac_nir_context
*ctx
,
4358 nir_deref_instr
*instr
)
4360 if (instr
->mode
!= nir_var_mem_shared
&&
4361 instr
->mode
!= nir_var_mem_global
)
4364 LLVMValueRef result
= NULL
;
4365 switch(instr
->deref_type
) {
4366 case nir_deref_type_var
: {
4367 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->vars
, instr
->var
);
4368 result
= entry
->data
;
4371 case nir_deref_type_struct
:
4372 if (instr
->mode
== nir_var_mem_global
) {
4373 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4374 uint64_t offset
= glsl_get_struct_field_offset(parent
->type
,
4375 instr
->strct
.index
);
4376 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4377 LLVMConstInt(ctx
->ac
.i32
, offset
, 0));
4379 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4380 LLVMConstInt(ctx
->ac
.i32
, instr
->strct
.index
, 0));
4383 case nir_deref_type_array
:
4384 if (instr
->mode
== nir_var_mem_global
) {
4385 nir_deref_instr
*parent
= nir_deref_instr_parent(instr
);
4386 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
4388 if ((glsl_type_is_matrix(parent
->type
) &&
4389 glsl_matrix_type_is_row_major(parent
->type
)) ||
4390 (glsl_type_is_vector(parent
->type
) && stride
== 0))
4391 stride
= type_scalar_size_bytes(parent
->type
);
4394 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4395 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4396 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4398 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4400 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4402 result
= ac_build_gep0(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4403 get_src(ctx
, instr
->arr
.index
));
4406 case nir_deref_type_ptr_as_array
:
4407 if (instr
->mode
== nir_var_mem_global
) {
4408 unsigned stride
= nir_deref_instr_ptr_as_array_stride(instr
);
4410 LLVMValueRef index
= get_src(ctx
, instr
->arr
.index
);
4411 if (LLVMTypeOf(index
) != ctx
->ac
.i64
)
4412 index
= LLVMBuildZExt(ctx
->ac
.builder
, index
, ctx
->ac
.i64
, "");
4414 LLVMValueRef offset
= LLVMBuildMul(ctx
->ac
.builder
, index
, LLVMConstInt(ctx
->ac
.i64
, stride
, 0), "");
4416 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
), offset
);
4418 result
= ac_build_gep_ptr(&ctx
->ac
, get_src(ctx
, instr
->parent
),
4419 get_src(ctx
, instr
->arr
.index
));
4422 case nir_deref_type_cast
: {
4423 result
= get_src(ctx
, instr
->parent
);
4425 /* We can't use the structs from LLVM because the shader
4426 * specifies its own offsets. */
4427 LLVMTypeRef pointee_type
= ctx
->ac
.i8
;
4428 if (instr
->mode
== nir_var_mem_shared
)
4429 pointee_type
= glsl_to_llvm_type(&ctx
->ac
, instr
->type
);
4431 unsigned address_space
;
4433 switch(instr
->mode
) {
4434 case nir_var_mem_shared
:
4435 address_space
= AC_ADDR_SPACE_LDS
;
4437 case nir_var_mem_global
:
4438 address_space
= AC_ADDR_SPACE_GLOBAL
;
4441 unreachable("Unhandled address space");
4444 LLVMTypeRef type
= LLVMPointerType(pointee_type
, address_space
);
4446 if (LLVMTypeOf(result
) != type
) {
4447 if (LLVMGetTypeKind(LLVMTypeOf(result
)) == LLVMVectorTypeKind
) {
4448 result
= LLVMBuildBitCast(ctx
->ac
.builder
, result
,
4451 result
= LLVMBuildIntToPtr(ctx
->ac
.builder
, result
,
4458 unreachable("Unhandled deref_instr deref type");
4461 ctx
->ssa_defs
[instr
->dest
.ssa
.index
] = result
;
4464 static void visit_cf_list(struct ac_nir_context
*ctx
,
4465 struct exec_list
*list
);
4467 static void visit_block(struct ac_nir_context
*ctx
, nir_block
*block
)
4469 nir_foreach_instr(instr
, block
)
4471 switch (instr
->type
) {
4472 case nir_instr_type_alu
:
4473 visit_alu(ctx
, nir_instr_as_alu(instr
));
4475 case nir_instr_type_load_const
:
4476 visit_load_const(ctx
, nir_instr_as_load_const(instr
));
4478 case nir_instr_type_intrinsic
:
4479 visit_intrinsic(ctx
, nir_instr_as_intrinsic(instr
));
4481 case nir_instr_type_tex
:
4482 visit_tex(ctx
, nir_instr_as_tex(instr
));
4484 case nir_instr_type_phi
:
4485 visit_phi(ctx
, nir_instr_as_phi(instr
));
4487 case nir_instr_type_ssa_undef
:
4488 visit_ssa_undef(ctx
, nir_instr_as_ssa_undef(instr
));
4490 case nir_instr_type_jump
:
4491 visit_jump(&ctx
->ac
, nir_instr_as_jump(instr
));
4493 case nir_instr_type_deref
:
4494 visit_deref(ctx
, nir_instr_as_deref(instr
));
4497 fprintf(stderr
, "Unknown NIR instr type: ");
4498 nir_print_instr(instr
, stderr
);
4499 fprintf(stderr
, "\n");
4504 _mesa_hash_table_insert(ctx
->defs
, block
,
4505 LLVMGetInsertBlock(ctx
->ac
.builder
));
4508 static void visit_if(struct ac_nir_context
*ctx
, nir_if
*if_stmt
)
4510 LLVMValueRef value
= get_src(ctx
, if_stmt
->condition
);
4512 nir_block
*then_block
=
4513 (nir_block
*) exec_list_get_head(&if_stmt
->then_list
);
4515 ac_build_uif(&ctx
->ac
, value
, then_block
->index
);
4517 visit_cf_list(ctx
, &if_stmt
->then_list
);
4519 if (!exec_list_is_empty(&if_stmt
->else_list
)) {
4520 nir_block
*else_block
=
4521 (nir_block
*) exec_list_get_head(&if_stmt
->else_list
);
4523 ac_build_else(&ctx
->ac
, else_block
->index
);
4524 visit_cf_list(ctx
, &if_stmt
->else_list
);
4527 ac_build_endif(&ctx
->ac
, then_block
->index
);
4530 static void visit_loop(struct ac_nir_context
*ctx
, nir_loop
*loop
)
4532 nir_block
*first_loop_block
=
4533 (nir_block
*) exec_list_get_head(&loop
->body
);
4535 ac_build_bgnloop(&ctx
->ac
, first_loop_block
->index
);
4537 visit_cf_list(ctx
, &loop
->body
);
4539 ac_build_endloop(&ctx
->ac
, first_loop_block
->index
);
4542 static void visit_cf_list(struct ac_nir_context
*ctx
,
4543 struct exec_list
*list
)
4545 foreach_list_typed(nir_cf_node
, node
, node
, list
)
4547 switch (node
->type
) {
4548 case nir_cf_node_block
:
4549 visit_block(ctx
, nir_cf_node_as_block(node
));
4552 case nir_cf_node_if
:
4553 visit_if(ctx
, nir_cf_node_as_if(node
));
4556 case nir_cf_node_loop
:
4557 visit_loop(ctx
, nir_cf_node_as_loop(node
));
4567 ac_handle_shader_output_decl(struct ac_llvm_context
*ctx
,
4568 struct ac_shader_abi
*abi
,
4569 struct nir_shader
*nir
,
4570 struct nir_variable
*variable
,
4571 gl_shader_stage stage
)
4573 unsigned output_loc
= variable
->data
.driver_location
/ 4;
4574 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
4576 /* tess ctrl has it's own load/store paths for outputs */
4577 if (stage
== MESA_SHADER_TESS_CTRL
)
4580 if (stage
== MESA_SHADER_VERTEX
||
4581 stage
== MESA_SHADER_TESS_EVAL
||
4582 stage
== MESA_SHADER_GEOMETRY
) {
4583 int idx
= variable
->data
.location
+ variable
->data
.index
;
4584 if (idx
== VARYING_SLOT_CLIP_DIST0
) {
4585 int length
= nir
->info
.clip_distance_array_size
+
4586 nir
->info
.cull_distance_array_size
;
4595 bool is_16bit
= glsl_type_is_16bit(glsl_without_array(variable
->type
));
4596 LLVMTypeRef type
= is_16bit
? ctx
->f16
: ctx
->f32
;
4597 for (unsigned i
= 0; i
< attrib_count
; ++i
) {
4598 for (unsigned chan
= 0; chan
< 4; chan
++) {
4599 abi
->outputs
[ac_llvm_reg_index_soa(output_loc
+ i
, chan
)] =
4600 ac_build_alloca_undef(ctx
, type
, "");
4606 setup_locals(struct ac_nir_context
*ctx
,
4607 struct nir_function
*func
)
4610 ctx
->num_locals
= 0;
4611 nir_foreach_variable(variable
, &func
->impl
->locals
) {
4612 unsigned attrib_count
= glsl_count_attribute_slots(variable
->type
, false);
4613 variable
->data
.driver_location
= ctx
->num_locals
* 4;
4614 variable
->data
.location_frac
= 0;
4615 ctx
->num_locals
+= attrib_count
;
4617 ctx
->locals
= malloc(4 * ctx
->num_locals
* sizeof(LLVMValueRef
));
4621 for (i
= 0; i
< ctx
->num_locals
; i
++) {
4622 for (j
= 0; j
< 4; j
++) {
4623 ctx
->locals
[i
* 4 + j
] =
4624 ac_build_alloca_undef(&ctx
->ac
, ctx
->ac
.f32
, "temp");
4630 setup_scratch(struct ac_nir_context
*ctx
,
4631 struct nir_shader
*shader
)
4633 if (shader
->scratch_size
== 0)
4636 ctx
->scratch
= ac_build_alloca_undef(&ctx
->ac
,
4637 LLVMArrayType(ctx
->ac
.i8
, shader
->scratch_size
),
4642 setup_constant_data(struct ac_nir_context
*ctx
,
4643 struct nir_shader
*shader
)
4645 if (!shader
->constant_data
)
4649 LLVMConstStringInContext(ctx
->ac
.context
,
4650 shader
->constant_data
,
4651 shader
->constant_data_size
,
4653 LLVMTypeRef type
= LLVMArrayType(ctx
->ac
.i8
, shader
->constant_data_size
);
4655 /* We want to put the constant data in the CONST address space so that
4656 * we can use scalar loads. However, LLVM versions before 10 put these
4657 * variables in the same section as the code, which is unacceptable
4658 * for RadeonSI as it needs to relocate all the data sections after
4659 * the code sections. See https://reviews.llvm.org/D65813.
4661 unsigned address_space
=
4662 LLVM_VERSION_MAJOR
< 10 ? AC_ADDR_SPACE_GLOBAL
: AC_ADDR_SPACE_CONST
;
4664 LLVMValueRef global
=
4665 LLVMAddGlobalInAddressSpace(ctx
->ac
.module
, type
,
4669 LLVMSetInitializer(global
, data
);
4670 LLVMSetGlobalConstant(global
, true);
4671 LLVMSetVisibility(global
, LLVMHiddenVisibility
);
4672 ctx
->constant_data
= global
;
4676 setup_shared(struct ac_nir_context
*ctx
,
4677 struct nir_shader
*nir
)
4679 nir_foreach_variable(variable
, &nir
->shared
) {
4680 LLVMValueRef shared
=
4681 LLVMAddGlobalInAddressSpace(
4682 ctx
->ac
.module
, glsl_to_llvm_type(&ctx
->ac
, variable
->type
),
4683 variable
->name
? variable
->name
: "",
4685 _mesa_hash_table_insert(ctx
->vars
, variable
, shared
);
4689 void ac_nir_translate(struct ac_llvm_context
*ac
, struct ac_shader_abi
*abi
,
4690 struct nir_shader
*nir
)
4692 struct ac_nir_context ctx
= {};
4693 struct nir_function
*func
;
4698 ctx
.stage
= nir
->info
.stage
;
4699 ctx
.info
= &nir
->info
;
4701 ctx
.main_function
= LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx
.ac
.builder
));
4703 nir_foreach_variable(variable
, &nir
->outputs
)
4704 ac_handle_shader_output_decl(&ctx
.ac
, ctx
.abi
, nir
, variable
,
4707 ctx
.defs
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4708 _mesa_key_pointer_equal
);
4709 ctx
.phis
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4710 _mesa_key_pointer_equal
);
4711 ctx
.vars
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
4712 _mesa_key_pointer_equal
);
4714 func
= (struct nir_function
*)exec_list_get_head(&nir
->functions
);
4716 nir_index_ssa_defs(func
->impl
);
4717 ctx
.ssa_defs
= calloc(func
->impl
->ssa_alloc
, sizeof(LLVMValueRef
));
4719 setup_locals(&ctx
, func
);
4720 setup_scratch(&ctx
, nir
);
4721 setup_constant_data(&ctx
, nir
);
4723 if (gl_shader_stage_is_compute(nir
->info
.stage
))
4724 setup_shared(&ctx
, nir
);
4726 visit_cf_list(&ctx
, &func
->impl
->body
);
4727 phi_post_pass(&ctx
);
4729 if (!gl_shader_stage_is_compute(nir
->info
.stage
))
4730 ctx
.abi
->emit_outputs(ctx
.abi
, AC_LLVM_MAX_OUTPUTS
,
4735 ralloc_free(ctx
.defs
);
4736 ralloc_free(ctx
.phis
);
4737 ralloc_free(ctx
.vars
);
4741 ac_lower_indirect_derefs(struct nir_shader
*nir
, enum chip_class chip_class
)
4743 /* Lower large variables to scratch first so that we won't bloat the
4744 * shader by generating large if ladders for them. We later lower
4745 * scratch to alloca's, assuming LLVM won't generate VGPR indexing.
4747 NIR_PASS_V(nir
, nir_lower_vars_to_scratch
,
4748 nir_var_function_temp
,
4750 glsl_get_natural_size_align_bytes
);
4752 /* While it would be nice not to have this flag, we are constrained
4753 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
4755 bool llvm_has_working_vgpr_indexing
= chip_class
!= GFX9
;
4757 /* TODO: Indirect indexing of GS inputs is unimplemented.
4759 * TCS and TES load inputs directly from LDS or offchip memory, so
4760 * indirect indexing is trivial.
4762 nir_variable_mode indirect_mask
= 0;
4763 if (nir
->info
.stage
== MESA_SHADER_GEOMETRY
||
4764 (nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
&&
4765 nir
->info
.stage
!= MESA_SHADER_TESS_EVAL
&&
4766 !llvm_has_working_vgpr_indexing
)) {
4767 indirect_mask
|= nir_var_shader_in
;
4769 if (!llvm_has_working_vgpr_indexing
&&
4770 nir
->info
.stage
!= MESA_SHADER_TESS_CTRL
)
4771 indirect_mask
|= nir_var_shader_out
;
4773 /* TODO: We shouldn't need to do this, however LLVM isn't currently
4774 * smart enough to handle indirects without causing excess spilling
4775 * causing the gpu to hang.
4777 * See the following thread for more details of the problem:
4778 * https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
4780 indirect_mask
|= nir_var_function_temp
;
4782 nir_lower_indirect_derefs(nir
, indirect_mask
);
4786 get_inst_tessfactor_writemask(nir_intrinsic_instr
*intrin
)
4788 if (intrin
->intrinsic
!= nir_intrinsic_store_deref
)
4792 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
4794 if (var
->data
.mode
!= nir_var_shader_out
)
4797 unsigned writemask
= 0;
4798 const int location
= var
->data
.location
;
4799 unsigned first_component
= var
->data
.location_frac
;
4800 unsigned num_comps
= intrin
->dest
.ssa
.num_components
;
4802 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
)
4803 writemask
= ((1 << (num_comps
+ 1)) - 1) << first_component
;
4804 else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
)
4805 writemask
= (((1 << (num_comps
+ 1)) - 1) << first_component
) << 4;
4811 scan_tess_ctrl(nir_cf_node
*cf_node
, unsigned *upper_block_tf_writemask
,
4812 unsigned *cond_block_tf_writemask
,
4813 bool *tessfactors_are_def_in_all_invocs
, bool is_nested_cf
)
4815 switch (cf_node
->type
) {
4816 case nir_cf_node_block
: {
4817 nir_block
*block
= nir_cf_node_as_block(cf_node
);
4818 nir_foreach_instr(instr
, block
) {
4819 if (instr
->type
!= nir_instr_type_intrinsic
)
4822 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
4823 if (intrin
->intrinsic
== nir_intrinsic_barrier
) {
4825 /* If we find a barrier in nested control flow put this in the
4826 * too hard basket. In GLSL this is not possible but it is in
4830 *tessfactors_are_def_in_all_invocs
= false;
4834 /* The following case must be prevented:
4835 * gl_TessLevelInner = ...;
4837 * if (gl_InvocationID == 1)
4838 * gl_TessLevelInner = ...;
4840 * If you consider disjoint code segments separated by barriers, each
4841 * such segment that writes tess factor channels should write the same
4842 * channels in all codepaths within that segment.
4844 if (upper_block_tf_writemask
|| cond_block_tf_writemask
) {
4845 /* Accumulate the result: */
4846 *tessfactors_are_def_in_all_invocs
&=
4847 !(*cond_block_tf_writemask
& ~(*upper_block_tf_writemask
));
4849 /* Analyze the next code segment from scratch. */
4850 *upper_block_tf_writemask
= 0;
4851 *cond_block_tf_writemask
= 0;
4854 *upper_block_tf_writemask
|= get_inst_tessfactor_writemask(intrin
);
4859 case nir_cf_node_if
: {
4860 unsigned then_tessfactor_writemask
= 0;
4861 unsigned else_tessfactor_writemask
= 0;
4863 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
4864 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->then_list
) {
4865 scan_tess_ctrl(nested_node
, &then_tessfactor_writemask
,
4866 cond_block_tf_writemask
,
4867 tessfactors_are_def_in_all_invocs
, true);
4870 foreach_list_typed(nir_cf_node
, nested_node
, node
, &if_stmt
->else_list
) {
4871 scan_tess_ctrl(nested_node
, &else_tessfactor_writemask
,
4872 cond_block_tf_writemask
,
4873 tessfactors_are_def_in_all_invocs
, true);
4876 if (then_tessfactor_writemask
|| else_tessfactor_writemask
) {
4877 /* If both statements write the same tess factor channels,
4878 * we can say that the upper block writes them too.
4880 *upper_block_tf_writemask
|= then_tessfactor_writemask
&
4881 else_tessfactor_writemask
;
4882 *cond_block_tf_writemask
|= then_tessfactor_writemask
|
4883 else_tessfactor_writemask
;
4888 case nir_cf_node_loop
: {
4889 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
4890 foreach_list_typed(nir_cf_node
, nested_node
, node
, &loop
->body
) {
4891 scan_tess_ctrl(nested_node
, cond_block_tf_writemask
,
4892 cond_block_tf_writemask
,
4893 tessfactors_are_def_in_all_invocs
, true);
4899 unreachable("unknown cf node type");
4904 ac_are_tessfactors_def_in_all_invocs(const struct nir_shader
*nir
)
4906 assert(nir
->info
.stage
== MESA_SHADER_TESS_CTRL
);
4908 /* The pass works as follows:
4909 * If all codepaths write tess factors, we can say that all
4910 * invocations define tess factors.
4912 * Each tess factor channel is tracked separately.
4914 unsigned main_block_tf_writemask
= 0; /* if main block writes tess factors */
4915 unsigned cond_block_tf_writemask
= 0; /* if cond block writes tess factors */
4917 /* Initial value = true. Here the pass will accumulate results from
4918 * multiple segments surrounded by barriers. If tess factors aren't
4919 * written at all, it's a shader bug and we don't care if this will be
4922 bool tessfactors_are_def_in_all_invocs
= true;
4924 nir_foreach_function(function
, nir
) {
4925 if (function
->impl
) {
4926 foreach_list_typed(nir_cf_node
, node
, node
, &function
->impl
->body
) {
4927 scan_tess_ctrl(node
, &main_block_tf_writemask
,
4928 &cond_block_tf_writemask
,
4929 &tessfactors_are_def_in_all_invocs
,
4935 /* Accumulate the result for the last code segment separated by a
4938 if (main_block_tf_writemask
|| cond_block_tf_writemask
) {
4939 tessfactors_are_def_in_all_invocs
&=
4940 !(cond_block_tf_writemask
& ~main_block_tf_writemask
);
4943 return tessfactors_are_def_in_all_invocs
;