bool write, bool atomic)
{
LLVMValueRef rsrc = get_image_descriptor(ctx, instr, AC_DESC_BUFFER, write);
- if (ctx->abi->gfx9_stride_size_workaround_for_atomic && atomic) {
+ if (ctx->ac.chip_class == GFX9 && HAVE_LLVM < 0x900 && atomic) {
LLVMValueRef elem_count = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), "");
LLVMValueRef stride = LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), "");
stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, 0), "");
bool clamp_shadow_reference;
bool interp_at_sample_force_center;
- /* Whether to workaround GFX9 ignoring the stride for the buffer size if IDXEN=0
- * and LLVM optimizes an indexed load with constant index to IDXEN=0. */
- bool gfx9_stride_size_workaround_for_atomic;
-
/* Whether bounds checks are required */
bool robust_buffer_access;
};
ctx.abi.clamp_shadow_reference = false;
ctx.abi.robust_buffer_access = options->robust_buffer_access;
- /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
- * we fallback to the old intrinsics for atomic buffer image operations
- * and thus we need to apply the indexing workaround...
- */
- ctx.abi.gfx9_stride_size_workaround_for_atomic = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x900;
-
bool is_ngg = is_pre_gs_stage(shaders[0]->info.stage) && ctx.options->key.vs_common_out.as_ngg;
if (shader_count >= 2 || is_ngg)
ac_init_exec_full_mask(&ctx.ac);