LLVMBuildExtractElement(builder, descriptor,
LLVMConstInt(ctx->i32, 2, 0), "");
- if (ctx->screen->info.chip_class == VI) {
- /* On VI, the descriptor contains the size in bytes,
+ if (ctx->screen->info.chip_class == GFX8) {
+ /* On GFX8, the descriptor contains the size in bytes,
* but TXQ must return the size in elements.
* The stride is always non-zero for resources using TXQ.
*/
switch (target) {
case TGSI_TEXTURE_1D:
case TGSI_TEXTURE_SHADOW1D:
- if (screen->info.chip_class >= GFX9)
+ if (screen->info.chip_class == GFX9)
return ac_image_2d;
return ac_image_1d;
case TGSI_TEXTURE_2D:
return ac_image_cube;
case TGSI_TEXTURE_1D_ARRAY:
case TGSI_TEXTURE_SHADOW1D_ARRAY:
- if (screen->info.chip_class >= GFX9)
+ if (screen->info.chip_class == GFX9)
return ac_image_2darray;
return ac_image_1darray;
case TGSI_TEXTURE_2D_ARRAY:
/* Match the resource type set in the descriptor. */
if (dim == ac_image_cube ||
- (screen->info.chip_class <= VI && dim == ac_image_3d))
+ (screen->info.chip_class <= GFX8 && dim == ac_image_3d))
dim = ac_image_2darray;
- else if (target == TGSI_TEXTURE_2D && screen->info.chip_class >= GFX9) {
+ else if (target == TGSI_TEXTURE_2D && screen->info.chip_class == GFX9) {
/* When a single layer of a 3D texture is bound, the shader
* will refer to a 2D target, but the descriptor has a 3D type.
* Since the HW ignores BASE_ARRAY in this case, we need to
static LLVMValueRef force_dcc_off(struct si_shader_context *ctx,
LLVMValueRef rsrc)
{
- if (ctx->screen->info.chip_class <= CIK) {
+ if (ctx->screen->info.chip_class <= GFX7) {
return rsrc;
} else {
LLVMValueRef i32_6 = LLVMConstInt(ctx->i32, 6, 0);
LLVMValueRef si_load_image_desc(struct si_shader_context *ctx,
LLVMValueRef list, LLVMValueRef index,
- enum ac_descriptor_type desc_type, bool dcc_off)
+ enum ac_descriptor_type desc_type,
+ bool uses_store, bool bindless)
{
LLVMBuilderRef builder = ctx->ac.builder;
LLVMValueRef rsrc;
if (desc_type == AC_DESC_BUFFER) {
- index = LLVMBuildMul(builder, index,
- LLVMConstInt(ctx->i32, 2, 0), "");
- index = LLVMBuildAdd(builder, index,
- ctx->i32_1, "");
+ index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->i32, 2, 0),
+ ctx->i32_1);
list = LLVMBuildPointerCast(builder, list,
ac_array_in_const32_addr_space(ctx->v4i32), "");
} else {
assert(desc_type == AC_DESC_IMAGE);
}
- rsrc = ac_build_load_to_sgpr(&ctx->ac, list, index);
- if (desc_type == AC_DESC_IMAGE && dcc_off)
+ if (bindless)
+ rsrc = ac_build_load_to_sgpr_uint_wraparound(&ctx->ac, list, index);
+ else
+ rsrc = ac_build_load_to_sgpr(&ctx->ac, list, index);
+
+ if (ctx->ac.chip_class <= GFX9 &&
+ desc_type == AC_DESC_IMAGE && uses_store)
rsrc = force_dcc_off(ctx, rsrc);
return rsrc;
}
LLVMValueRef rsrc_ptr = LLVMGetParam(ctx->main_fn,
ctx->param_samplers_and_images);
LLVMValueRef index;
- bool dcc_off = is_store;
if (!image->Register.Indirect) {
- const struct tgsi_shader_info *info = bld_base->info;
- unsigned images_writemask = info->images_store |
- info->images_atomic;
-
index = LLVMConstInt(ctx->i32,
si_get_image_slot(image->Register.Index), 0);
-
- if (images_writemask & (1 << image->Register.Index))
- dcc_off = true;
} else {
/* From the GL_ARB_shader_image_load_store extension spec:
*
index, "");
}
+ bool bindless = false;
+
if (image->Register.File != TGSI_FILE_IMAGE) {
/* Bindless descriptors are accessible from a different pair of
* user SGPR indices.
*/
index = LLVMBuildMul(ctx->ac.builder, index,
LLVMConstInt(ctx->i32, 2, 0), "");
+ bindless = true;
}
*rsrc = si_load_image_desc(ctx, rsrc_ptr, index,
target == TGSI_TEXTURE_BUFFER ? AC_DESC_BUFFER : AC_DESC_IMAGE,
- dcc_off);
+ is_store, bindless);
}
static void image_fetch_coords(
coords[chan] = tmp;
}
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.chip_class == GFX9) {
/* 1D textures are allocated and used as 2D on GFX9. */
if (target == TGSI_TEXTURE_1D) {
coords[1] = ctx->i32_0;
}
}
-/**
- * Append the resource and indexing arguments for buffer intrinsics.
- *
- * \param rsrc the v4i32 buffer resource
- * \param index index into the buffer (stride-based)
- * \param offset byte offset into the buffer
- */
-static void buffer_append_args(
- struct si_shader_context *ctx,
- struct lp_build_emit_data *emit_data,
- LLVMValueRef rsrc,
- LLVMValueRef index,
- LLVMValueRef offset,
- bool atomic,
- bool force_glc)
+static unsigned get_cache_policy(struct si_shader_context *ctx,
+ const struct tgsi_full_instruction *inst,
+ bool atomic, bool may_store_unaligned,
+ bool writeonly_memory)
{
- const struct tgsi_full_instruction *inst = emit_data->inst;
- LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
- LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
-
- emit_data->args[emit_data->arg_count++] = rsrc;
- emit_data->args[emit_data->arg_count++] = index; /* vindex */
- emit_data->args[emit_data->arg_count++] = offset; /* voffset */
- if (!atomic) {
- emit_data->args[emit_data->arg_count++] =
- force_glc ||
- inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE) ?
- i1true : i1false; /* glc */
+ unsigned cache_policy = 0;
+
+ if (!atomic &&
+ /* GFX6 has a TC L1 bug causing corruption of 8bit/16bit stores.
+ * All store opcodes not aligned to a dword are affected.
+ * The only way to get unaligned stores in radeonsi is through
+ * shader images. */
+ ((may_store_unaligned && ctx->screen->info.chip_class == GFX6) ||
+ /* If this is write-only, don't keep data in L1 to prevent
+ * evicting L1 cache lines that may be needed by other
+ * instructions. */
+ writeonly_memory ||
+ inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE))) {
+ cache_policy |= ac_glc;
}
- emit_data->args[emit_data->arg_count++] = i1false; /* slc */
-}
-static void load_emit_buffer(struct si_shader_context *ctx,
- struct lp_build_emit_data *emit_data,
- bool can_speculate, bool allow_smem)
-{
- const struct tgsi_full_instruction *inst = emit_data->inst;
- uint writemask = inst->Dst[0].Register.WriteMask;
- uint count = util_last_bit(writemask);
- LLVMValueRef *args = emit_data->args;
-
- /* Don't use SMEM for shader buffer loads, because LLVM doesn't
- * select SMEM for SI.load.const with a non-constant offset, and
- * constant offsets practically don't exist with shader buffers.
- *
- * Also, SI.load.const doesn't use inst_offset when it's lowered
- * to VMEM, so we just end up with more VALU instructions in the end
- * and no benefit.
- *
- * TODO: Remove this line once LLVM can select SMEM with a non-constant
- * offset, and can derive inst_offset when VMEM is selected.
- * After that, si_memory_barrier should invalidate sL1 for shader
- * buffers.
- */
+ if (inst->Memory.Qualifier & TGSI_MEMORY_STREAM_CACHE_POLICY)
+ cache_policy |= ac_slc;
- assert(LLVMConstIntGetZExtValue(args[1]) == 0); /* vindex */
- emit_data->output[emit_data->chan] =
- ac_build_buffer_load(&ctx->ac, args[0], count, NULL,
- args[2], NULL, 0,
- LLVMConstIntGetZExtValue(args[3]),
- LLVMConstIntGetZExtValue(args[4]),
- can_speculate, allow_smem);
+ return cache_policy;
}
static LLVMValueRef get_memory_ptr(struct si_shader_context *ctx,
* For LOAD, set this to (store | atomic) slot usage in the shader.
* For STORE, set this to (load | atomic) slot usage in the shader.
* \param images_reverse_access_mask Same as above, but for images.
+ * \param bindless_buffer_reverse_access_mask Same as above, but for bindless image buffers.
+ * \param bindless_image_reverse_access_mask Same as above, but for bindless images.
*/
static bool is_oneway_access_only(const struct tgsi_full_instruction *inst,
const struct tgsi_shader_info *info,
unsigned shader_buffers_reverse_access_mask,
- unsigned images_reverse_access_mask)
+ unsigned images_reverse_access_mask,
+ bool bindless_buffer_reverse_access_mask,
+ bool bindless_image_reverse_access_mask)
{
+ enum tgsi_file_type resource_file;
+ unsigned resource_index;
+ bool resource_indirect;
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_STORE) {
+ resource_file = inst->Dst[0].Register.File;
+ resource_index = inst->Dst[0].Register.Index;
+ resource_indirect = inst->Dst[0].Register.Indirect;
+ } else {
+ resource_file = inst->Src[0].Register.File;
+ resource_index = inst->Src[0].Register.Index;
+ resource_indirect = inst->Src[0].Register.Indirect;
+ }
+
+ assert(resource_file == TGSI_FILE_BUFFER ||
+ resource_file == TGSI_FILE_IMAGE ||
+ /* bindless image */
+ resource_file == TGSI_FILE_INPUT ||
+ resource_file == TGSI_FILE_OUTPUT ||
+ resource_file == TGSI_FILE_CONSTANT ||
+ resource_file == TGSI_FILE_TEMPORARY ||
+ resource_file == TGSI_FILE_IMMEDIATE);
+
+ assert(resource_file != TGSI_FILE_BUFFER ||
+ inst->Memory.Texture == TGSI_TEXTURE_BUFFER);
+
+ bool bindless = resource_file != TGSI_FILE_BUFFER &&
+ resource_file != TGSI_FILE_IMAGE;
+
/* RESTRICT means NOALIAS.
* If there are no writes, we can assume the accessed memory is read-only.
* If there are no reads, we can assume the accessed memory is write-only.
*/
- if (inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT) {
+ if (inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT && !bindless) {
unsigned reverse_access_mask;
- if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
+ if (resource_file == TGSI_FILE_BUFFER) {
reverse_access_mask = shader_buffers_reverse_access_mask;
} else if (inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
reverse_access_mask = info->images_buffers &
images_reverse_access_mask;
}
- if (inst->Src[0].Register.Indirect) {
+ if (resource_indirect) {
if (!reverse_access_mask)
return true;
} else {
if (!(reverse_access_mask &
- (1u << inst->Src[0].Register.Index)))
+ (1u << resource_index)))
return true;
}
}
* Same for the case when there are no writes/reads for non-buffer
* images.
*/
- if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
- (inst->Memory.Texture == TGSI_TEXTURE_BUFFER &&
- (inst->Src[0].Register.File == TGSI_FILE_IMAGE ||
- tgsi_is_bindless_image_file(inst->Src[0].Register.File)))) {
+ if (resource_file == TGSI_FILE_BUFFER ||
+ inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
if (!shader_buffers_reverse_access_mask &&
- !(info->images_buffers & images_reverse_access_mask))
+ !(info->images_buffers & images_reverse_access_mask) &&
+ !bindless_buffer_reverse_access_mask)
return true;
} else {
- if (!(~info->images_buffers & images_reverse_access_mask))
+ if (!(~info->images_buffers & images_reverse_access_mask) &&
+ !bindless_image_reverse_access_mask)
return true;
}
return false;
const struct tgsi_full_instruction * inst = emit_data->inst;
const struct tgsi_shader_info *info = &ctx->shader->selector->info;
bool can_speculate = false;
+ LLVMValueRef vindex = ctx->i32_0;
+ LLVMValueRef voffset = ctx->i32_0;
+ struct ac_image_args args = {};
if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
load_emit_memory(ctx, emit_data);
if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
inst->Src[0].Register.File == TGSI_FILE_CONSTBUF) {
- LLVMValueRef offset, tmp, rsrc;
-
bool ubo = inst->Src[0].Register.File == TGSI_FILE_CONSTBUF;
- rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], ubo);
-
- tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
- offset = ac_to_integer(&ctx->ac, tmp);
-
- buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
- offset, false, false);
- } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE ||
- tgsi_is_bindless_image_file(inst->Src[0].Register.File)) {
- LLVMValueRef rsrc;
+ args.resource = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], ubo);
+ voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 1, 0));
+ } else {
unsigned target = inst->Memory.Texture;
- image_fetch_rsrc(bld_base, &inst->Src[0], false, target, &rsrc);
- image_fetch_coords(bld_base, inst, 1, rsrc, &emit_data->args[1]);
-
- if (target == TGSI_TEXTURE_BUFFER) {
- buffer_append_args(ctx, emit_data, rsrc, emit_data->args[1],
- ctx->i32_0, false, false);
- } else {
- emit_data->args[0] = rsrc;
- }
+ image_fetch_rsrc(bld_base, &inst->Src[0], false, target, &args.resource);
+ image_fetch_coords(bld_base, inst, 1, args.resource, args.coords);
+ vindex = args.coords[0]; /* for buffers only */
}
if (inst->Src[0].Register.File == TGSI_FILE_CONSTBUF) {
- load_emit_buffer(ctx, emit_data, true, true);
+ emit_data->output[emit_data->chan] =
+ ac_build_buffer_load(&ctx->ac, args.resource,
+ util_last_bit(inst->Dst[0].Register.WriteMask),
+ NULL, voffset, NULL, 0, 0, true, true);
return;
}
if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
- ac_build_waitcnt(&ctx->ac, VM_CNT);
+ ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE);
can_speculate = !(inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE) &&
is_oneway_access_only(inst, info,
info->shader_buffers_store |
info->shader_buffers_atomic,
info->images_store |
- info->images_atomic);
+ info->images_atomic,
+ info->uses_bindless_buffer_store |
+ info->uses_bindless_buffer_atomic,
+ info->uses_bindless_image_store |
+ info->uses_bindless_image_atomic);
+ args.cache_policy = get_cache_policy(ctx, inst, false, false, false);
if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
- load_emit_buffer(ctx, emit_data, can_speculate, false);
+ /* Don't use SMEM for shader buffer loads, because LLVM doesn't
+ * select SMEM for SI.load.const with a non-constant offset, and
+ * constant offsets practically don't exist with shader buffers.
+ *
+ * Also, SI.load.const doesn't use inst_offset when it's lowered
+ * to VMEM, so we just end up with more VALU instructions in the end
+ * and no benefit.
+ *
+ * TODO: Remove this line once LLVM can select SMEM with a non-constant
+ * offset, and can derive inst_offset when VMEM is selected.
+ * After that, si_memory_barrier should invalidate sL1 for shader
+ * buffers.
+ */
+ emit_data->output[emit_data->chan] =
+ ac_build_buffer_load(&ctx->ac, args.resource,
+ util_last_bit(inst->Dst[0].Register.WriteMask),
+ NULL, voffset, NULL, 0,
+ args.cache_policy, can_speculate, false);
return;
}
unsigned num_channels = util_last_bit(inst->Dst[0].Register.WriteMask);
LLVMValueRef result =
ac_build_buffer_load_format(&ctx->ac,
- emit_data->args[0],
- emit_data->args[1],
- emit_data->args[2],
+ args.resource,
+ vindex,
+ ctx->i32_0,
num_channels,
- LLVMConstIntGetZExtValue(emit_data->args[3]),
+ args.cache_policy,
can_speculate);
emit_data->output[emit_data->chan] =
ac_build_expand_to_vec4(&ctx->ac, result, num_channels);
} else {
- struct ac_image_args args = {};
args.opcode = ac_image_load;
- args.resource = emit_data->args[0];
- memcpy(args.coords, &emit_data->args[1], sizeof(args.coords));
args.dim = ac_image_dim_from_tgsi_target(ctx->screen, inst->Memory.Texture);
- if (inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE))
- args.cache_policy = ac_glc;
args.attributes = ac_get_load_intr_attribs(can_speculate);
args.dmask = 0xf;
}
}
-static void store_emit_buffer(
- struct si_shader_context *ctx,
- struct lp_build_emit_data *emit_data,
- bool writeonly_memory)
+static void store_emit_buffer(struct si_shader_context *ctx,
+ LLVMValueRef resource,
+ unsigned writemask,
+ LLVMValueRef value,
+ LLVMValueRef voffset,
+ unsigned cache_policy,
+ bool writeonly_memory)
{
- const struct tgsi_full_instruction *inst = emit_data->inst;
LLVMBuilderRef builder = ctx->ac.builder;
- LLVMValueRef base_data = emit_data->args[0];
- LLVMValueRef base_offset = emit_data->args[3];
- unsigned writemask = inst->Dst[0].Register.WriteMask;
-
- /* If this is write-only, don't keep data in L1 to prevent
- * evicting L1 cache lines that may be needed by other
- * instructions.
- */
- if (writeonly_memory)
- emit_data->args[4] = LLVMConstInt(ctx->i1, 1, 0); /* GLC = 1 */
+ LLVMValueRef base_data = value;
+ LLVMValueRef base_offset = voffset;
while (writemask) {
int start, count;
- const char *intrinsic_name;
- LLVMValueRef data;
- LLVMValueRef offset;
- LLVMValueRef tmp;
+ LLVMValueRef data, voff;
u_bit_scan_consecutive_range(&writemask, &start, &count);
- /* Due to an LLVM limitation, split 3-element writes
- * into a 2-element and a 1-element write. */
- if (count == 3) {
- writemask |= 1 << (start + 2);
- count = 2;
- }
-
- if (count == 4) {
+ if (count == 3 && ac_has_vec3_support(ctx->ac.chip_class, false)) {
+ LLVMValueRef values[3] = {
+ LLVMBuildExtractElement(builder, base_data,
+ LLVMConstInt(ctx->i32, start, 0), ""),
+ LLVMBuildExtractElement(builder, base_data,
+ LLVMConstInt(ctx->i32, start + 1, 0), ""),
+ LLVMBuildExtractElement(builder, base_data,
+ LLVMConstInt(ctx->i32, start + 2, 0), ""),
+ };
+ data = ac_build_gather_values(&ctx->ac, values, 3);
+ } else if (count >= 3) {
data = base_data;
- intrinsic_name = "llvm.amdgcn.buffer.store.v4f32";
} else if (count == 2) {
- LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
-
- tmp = LLVMBuildExtractElement(
- builder, base_data,
- LLVMConstInt(ctx->i32, start, 0), "");
- data = LLVMBuildInsertElement(
- builder, LLVMGetUndef(v2f32), tmp,
- ctx->i32_0, "");
-
- tmp = LLVMBuildExtractElement(
- builder, base_data,
- LLVMConstInt(ctx->i32, start + 1, 0), "");
- data = LLVMBuildInsertElement(
- builder, data, tmp, ctx->i32_1, "");
-
- intrinsic_name = "llvm.amdgcn.buffer.store.v2f32";
+ LLVMValueRef values[2] = {
+ LLVMBuildExtractElement(builder, base_data,
+ LLVMConstInt(ctx->i32, start, 0), ""),
+ LLVMBuildExtractElement(builder, base_data,
+ LLVMConstInt(ctx->i32, start + 1, 0), ""),
+ };
+
+ data = ac_build_gather_values(&ctx->ac, values, 2);
} else {
assert(count == 1);
data = LLVMBuildExtractElement(
builder, base_data,
LLVMConstInt(ctx->i32, start, 0), "");
- intrinsic_name = "llvm.amdgcn.buffer.store.f32";
}
- offset = base_offset;
+ voff = base_offset;
if (start != 0) {
- offset = LLVMBuildAdd(
- builder, offset,
+ voff = LLVMBuildAdd(
+ builder, voff,
LLVMConstInt(ctx->i32, start * 4, 0), "");
}
- emit_data->args[0] = data;
- emit_data->args[3] = offset;
-
- ac_build_intrinsic(
- &ctx->ac, intrinsic_name, ctx->voidt,
- emit_data->args, emit_data->arg_count,
- ac_get_store_intr_attribs(writeonly_memory));
+ ac_build_buffer_store_dword(&ctx->ac, resource, data, count,
+ voff, ctx->i32_0, 0, cache_policy,
+ false);
}
}
struct tgsi_full_src_register resource_reg =
tgsi_full_src_register_from_dst(&inst->Dst[0]);
unsigned target = inst->Memory.Texture;
- bool writeonly_memory = false;
- LLVMValueRef chans[4], rsrc;
if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) {
store_emit_memory(ctx, emit_data);
return;
}
+ bool writeonly_memory = is_oneway_access_only(inst, info,
+ info->shader_buffers_load |
+ info->shader_buffers_atomic,
+ info->images_load |
+ info->images_atomic,
+ info->uses_bindless_buffer_load |
+ info->uses_bindless_buffer_atomic,
+ info->uses_bindless_image_load |
+ info->uses_bindless_image_atomic);
+ LLVMValueRef chans[4];
+ LLVMValueRef vindex = ctx->i32_0;
+ LLVMValueRef voffset = ctx->i32_0;
+ struct ac_image_args args = {};
+
for (unsigned chan = 0; chan < 4; ++chan)
chans[chan] = lp_build_emit_fetch(bld_base, inst, 1, chan);
- emit_data->args[emit_data->arg_count++] =
- ac_build_gather_values(&ctx->ac, chans, 4);
-
if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
- LLVMValueRef offset, tmp;
-
- rsrc = shader_buffer_fetch_rsrc(ctx, &resource_reg, false);
-
- tmp = lp_build_emit_fetch(bld_base, inst, 0, 0);
- offset = ac_to_integer(&ctx->ac, tmp);
-
- buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
- offset, false, false);
- } else if (inst->Dst[0].Register.File == TGSI_FILE_IMAGE ||
- tgsi_is_bindless_image_file(inst->Dst[0].Register.File)) {
- /* 8bit/16bit TC L1 write corruption bug on SI.
- * All store opcodes not aligned to a dword are affected.
- *
- * The only way to get unaligned stores in radeonsi is through
- * shader images.
- */
- bool force_glc = ctx->screen->info.chip_class == SI;
-
- image_fetch_rsrc(bld_base, &resource_reg, true, target, &rsrc);
- image_fetch_coords(bld_base, inst, 0, rsrc, &emit_data->args[2]);
-
- if (target == TGSI_TEXTURE_BUFFER) {
- buffer_append_args(ctx, emit_data, rsrc, emit_data->args[2],
- ctx->i32_0, false, force_glc);
- } else {
- emit_data->args[1] = rsrc;
- }
+ args.resource = shader_buffer_fetch_rsrc(ctx, &resource_reg, false);
+ voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 0, 0));
+ } else {
+ image_fetch_rsrc(bld_base, &resource_reg, true, target, &args.resource);
+ image_fetch_coords(bld_base, inst, 0, args.resource, args.coords);
+ vindex = args.coords[0]; /* for buffers only */
}
if (inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
- ac_build_waitcnt(&ctx->ac, VM_CNT);
+ ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE);
- writeonly_memory = is_oneway_access_only(inst, info,
- info->shader_buffers_load |
- info->shader_buffers_atomic,
- info->images_load |
- info->images_atomic);
+ bool is_image = inst->Dst[0].Register.File != TGSI_FILE_BUFFER;
+ args.cache_policy = get_cache_policy(ctx, inst,
+ false, /* atomic */
+ is_image, /* may_store_unaligned */
+ writeonly_memory);
if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) {
- store_emit_buffer(ctx, emit_data, writeonly_memory);
+ store_emit_buffer(ctx, args.resource, inst->Dst[0].Register.WriteMask,
+ ac_build_gather_values(&ctx->ac, chans, 4),
+ voffset, args.cache_policy, writeonly_memory);
return;
}
if (target == TGSI_TEXTURE_BUFFER) {
- /* If this is write-only, don't keep data in L1 to prevent
- * evicting L1 cache lines that may be needed by other
- * instructions.
- */
- if (writeonly_memory)
- emit_data->args[4] = LLVMConstInt(ctx->i1, 1, 0); /* GLC = 1 */
-
- emit_data->output[emit_data->chan] = ac_build_intrinsic(
- &ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32",
- ctx->voidt, emit_data->args,
- emit_data->arg_count,
- ac_get_store_intr_attribs(writeonly_memory));
+ unsigned num_channels = util_last_bit(inst->Dst[0].Register.WriteMask);
+
+ ac_build_buffer_store_format(&ctx->ac, args.resource,
+ ac_build_gather_values(&ctx->ac, chans, num_channels),
+ vindex, ctx->i32_0 /* voffset */,
+ num_channels,
+ args.cache_policy);
} else {
- struct ac_image_args args = {};
args.opcode = ac_image_store;
- args.data[0] = emit_data->args[0];
- args.resource = emit_data->args[1];
- memcpy(args.coords, &emit_data->args[2], sizeof(args.coords));
+ args.data[0] = ac_build_gather_values(&ctx->ac, chans, 4);
args.dim = ac_image_dim_from_tgsi_target(ctx->screen, inst->Memory.Texture);
- args.attributes = ac_get_store_intr_attribs(writeonly_memory);
+ args.attributes = AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY;
args.dmask = 0xf;
- /* Workaround for 8bit/16bit TC L1 write corruption bug on SI.
- * All store opcodes not aligned to a dword are affected.
- */
- if (ctx->screen->info.chip_class == SI ||
- /* If this is write-only, don't keep data in L1 to prevent
- * evicting L1 cache lines that may be needed by other
- * instructions. */
- writeonly_memory ||
- inst->Memory.Qualifier & (TGSI_MEMORY_COHERENT | TGSI_MEMORY_VOLATILE))
- args.cache_policy = ac_glc;
-
emit_data->output[emit_data->chan] =
ac_build_image_opcode(&ctx->ac, &args);
}
}
-static void atomic_fetch_args(
- struct lp_build_tgsi_context * bld_base,
- struct lp_build_emit_data * emit_data)
-{
- struct si_shader_context *ctx = si_shader_context(bld_base);
- const struct tgsi_full_instruction * inst = emit_data->inst;
- LLVMValueRef data1, data2;
- LLVMValueRef rsrc;
- LLVMValueRef tmp;
-
- emit_data->dst_type = ctx->f32;
-
- tmp = lp_build_emit_fetch(bld_base, inst, 2, 0);
- data1 = ac_to_integer(&ctx->ac, tmp);
-
- if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
- tmp = lp_build_emit_fetch(bld_base, inst, 3, 0);
- data2 = ac_to_integer(&ctx->ac, tmp);
- }
-
- /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
- * of arguments, which is reversed relative to TGSI (and GLSL)
- */
- if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
- emit_data->args[emit_data->arg_count++] = data2;
- emit_data->args[emit_data->arg_count++] = data1;
-
- if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
- LLVMValueRef offset;
-
- rsrc = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], false);
-
- tmp = lp_build_emit_fetch(bld_base, inst, 1, 0);
- offset = ac_to_integer(&ctx->ac, tmp);
-
- buffer_append_args(ctx, emit_data, rsrc, ctx->i32_0,
- offset, true, false);
- } else if (inst->Src[0].Register.File == TGSI_FILE_IMAGE ||
- tgsi_is_bindless_image_file(inst->Src[0].Register.File)) {
- unsigned target = inst->Memory.Texture;
-
- image_fetch_rsrc(bld_base, &inst->Src[0], true, target, &rsrc);
- image_fetch_coords(bld_base, inst, 1, rsrc,
- &emit_data->args[emit_data->arg_count + 1]);
-
- if (target == TGSI_TEXTURE_BUFFER) {
- buffer_append_args(ctx, emit_data, rsrc,
- emit_data->args[emit_data->arg_count + 1],
- ctx->i32_0, true, false);
- } else {
- emit_data->args[emit_data->arg_count] = rsrc;
- }
- }
-}
-
static void atomic_emit_memory(struct si_shader_context *ctx,
struct lp_build_emit_data *emit_data) {
LLVMBuilderRef builder = ctx->ac.builder;
const struct tgsi_full_instruction * inst = emit_data->inst;
LLVMValueRef ptr, result, arg;
+ const char *sync_scope = HAVE_LLVM >= 0x0900 ? "workgroup-one-as" : "workgroup";
ptr = get_memory_ptr(ctx, inst, ctx->i32, 1);
new_data = ac_to_integer(&ctx->ac, new_data);
- result = LLVMBuildAtomicCmpXchg(builder, ptr, arg, new_data,
- LLVMAtomicOrderingSequentiallyConsistent,
- LLVMAtomicOrderingSequentiallyConsistent,
- false);
-
+ result = ac_build_atomic_cmp_xchg(&ctx->ac, ptr, arg, new_data,
+ sync_scope);
result = LLVMBuildExtractValue(builder, result, 0, "");
} else {
LLVMAtomicRMWBinOp op;
unreachable("unknown atomic opcode");
}
- result = LLVMBuildAtomicRMW(builder, op, ptr, arg,
- LLVMAtomicOrderingSequentiallyConsistent,
- false);
+ result = ac_build_atomic_rmw(&ctx->ac, op, ptr, arg, sync_scope);
}
- emit_data->output[emit_data->chan] = LLVMBuildBitCast(builder, result, emit_data->dst_type, "");
+ emit_data->output[emit_data->chan] =
+ LLVMBuildBitCast(builder, result, ctx->f32, "");
}
static void atomic_emit(
{
struct si_shader_context *ctx = si_shader_context(bld_base);
const struct tgsi_full_instruction * inst = emit_data->inst;
- LLVMValueRef tmp;
+ struct ac_image_args args = {};
+ unsigned num_data = 0;
+ LLVMValueRef vindex = ctx->i32_0;
+ LLVMValueRef voffset = ctx->i32_0;
if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) {
atomic_emit_memory(ctx, emit_data);
return;
}
- if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
+ if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
+ /* llvm.amdgcn.image/buffer.atomic.cmpswap reflect the hardware order
+ * of arguments, which is reversed relative to TGSI (and GLSL)
+ */
+ args.data[num_data++] =
+ ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 3, 0));
+ }
+
+ args.data[num_data++] =
+ ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 2, 0));
+ args.cache_policy = get_cache_policy(ctx, inst, true, false, false);
+
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
+ args.resource = shader_buffer_fetch_rsrc(ctx, &inst->Src[0], false);
+ voffset = ac_to_integer(&ctx->ac, lp_build_emit_fetch(bld_base, inst, 1, 0));
+ } else {
+ image_fetch_rsrc(bld_base, &inst->Src[0], true,
+ inst->Memory.Texture, &args.resource);
+ image_fetch_coords(bld_base, inst, 1, args.resource, args.coords);
+ vindex = args.coords[0]; /* for buffers only */
+ }
+
+ if (HAVE_LLVM >= 0x0800 &&
+ inst->Src[0].Register.File != TGSI_FILE_BUFFER &&
inst->Memory.Texture == TGSI_TEXTURE_BUFFER) {
+ LLVMValueRef buf_args[7];
+ unsigned num_args = 0;
+
+ buf_args[num_args++] = args.data[0];
+ if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
+ buf_args[num_args++] = args.data[1];
+
+ buf_args[num_args++] = args.resource;
+ buf_args[num_args++] = vindex;
+ buf_args[num_args++] = voffset;
+ buf_args[num_args++] = ctx->i32_0; /* soffset */
+ buf_args[num_args++] = LLVMConstInt(ctx->i32, args.cache_policy & ac_slc, 0);
+
+ char intrinsic_name[64];
+ snprintf(intrinsic_name, sizeof(intrinsic_name),
+ "llvm.amdgcn.struct.buffer.atomic.%s", action->intr_name);
+ emit_data->output[emit_data->chan] =
+ ac_to_float(&ctx->ac,
+ ac_build_intrinsic(&ctx->ac, intrinsic_name,
+ ctx->i32, buf_args, num_args, 0));
+ return;
+ }
+
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
+ (HAVE_LLVM < 0x0800 &&
+ inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
+ LLVMValueRef buf_args[7];
+ unsigned num_args = 0;
+
+ buf_args[num_args++] = args.data[0];
+ if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS)
+ buf_args[num_args++] = args.data[1];
+
+ buf_args[num_args++] = args.resource;
+ buf_args[num_args++] = vindex;
+ buf_args[num_args++] = voffset;
+ buf_args[num_args++] = args.cache_policy & ac_slc ? ctx->i1true : ctx->i1false;
+
char intrinsic_name[40];
snprintf(intrinsic_name, sizeof(intrinsic_name),
"llvm.amdgcn.buffer.atomic.%s", action->intr_name);
- tmp = ac_build_intrinsic(
- &ctx->ac, intrinsic_name, ctx->i32,
- emit_data->args, emit_data->arg_count, 0);
- emit_data->output[emit_data->chan] = ac_to_float(&ctx->ac, tmp);
+ emit_data->output[emit_data->chan] =
+ ac_to_float(&ctx->ac,
+ ac_build_intrinsic(&ctx->ac, intrinsic_name,
+ ctx->i32, buf_args, num_args, 0));
} else {
- unsigned num_data = inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS ? 2 : 1;
- struct ac_image_args args = {};
-
if (inst->Instruction.Opcode == TGSI_OPCODE_ATOMCAS) {
args.opcode = ac_image_atomic_cmpswap;
} else {
}
}
- for (unsigned i = 0; i < num_data; ++i)
- args.data[i] = emit_data->args[i];
-
- args.resource = emit_data->args[num_data];
- memcpy(args.coords, &emit_data->args[num_data + 1], sizeof(args.coords));
args.dim = ac_image_dim_from_tgsi_target(ctx->screen, inst->Memory.Texture);
-
emit_data->output[emit_data->chan] =
ac_to_float(&ctx->ac, ac_build_image_opcode(&ctx->ac, &args));
}
LLVMBuilderRef builder = ctx->ac.builder;
/* 1D textures are allocated and used as 2D on GFX9. */
- if (ctx->screen->info.chip_class >= GFX9 &&
+ if (ctx->screen->info.chip_class == GFX9 &&
(target == TGSI_TEXTURE_1D_ARRAY ||
target == TGSI_TEXTURE_SHADOW1D_ARRAY)) {
LLVMValueRef layers =
args.opcode = ac_image_get_resinfo;
args.dim = ac_texture_dim_from_tgsi_target(ctx->screen, target);
args.dmask = 0xf;
+ args.attributes = AC_FUNC_ATTR_READNONE;
if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
tex_fetch_ptrs(bld_base, emit_data, &args.resource, NULL, NULL);
break;
case AC_DESC_BUFFER:
/* The buffer is in [4:7]. */
- index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
- index = LLVMBuildAdd(builder, index, ctx->i32_1, "");
+ index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->i32, 4, 0),
+ ctx->i32_1);
list = LLVMBuildPointerCast(builder, list,
ac_array_in_const32_addr_space(ctx->v4i32), "");
break;
case AC_DESC_FMASK:
/* The FMASK is at [8:15]. */
- index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
- index = LLVMBuildAdd(builder, index, ctx->i32_1, "");
+ index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->i32, 2, 0),
+ ctx->i32_1);
break;
case AC_DESC_SAMPLER:
/* The sampler state is at [12:15]. */
- index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, 4, 0), "");
- index = LLVMBuildAdd(builder, index, LLVMConstInt(ctx->i32, 3, 0), "");
+ index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->i32, 4, 0),
+ LLVMConstInt(ctx->i32, 3, 0));
list = LLVMBuildPointerCast(builder, list,
ac_array_in_const32_addr_space(ctx->v4i32), "");
break;
+ case AC_DESC_PLANE_0:
+ case AC_DESC_PLANE_1:
+ case AC_DESC_PLANE_2:
+ /* Only used for the multiplane image support for Vulkan. Should
+ * never be reached in radeonsi.
+ */
+ unreachable("Plane descriptor requested in radeonsi.");
}
return ac_build_load_to_sgpr(&ctx->ac, list, index);
/* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
*
- * SI-CI:
+ * GFX6-GFX7:
* If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
* filtering manually. The driver sets img7 to a mask clearing
* MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
* s_and_b32 samp0, samp0, img7
*
- * VI:
+ * GFX8:
* The ANISO_OVERRIDE sampler field enables this fix in TA.
*/
static LLVMValueRef sici_fix_sampler_aniso(struct si_shader_context *ctx,
{
LLVMValueRef img7, samp0;
- if (ctx->screen->info.chip_class >= VI)
+ if (ctx->screen->info.chip_class >= GFX8)
return samp;
img7 = LLVMBuildExtractElement(ctx->ac.builder, res,
ctx->param_bindless_samplers_and_images);
index = lp_build_emit_fetch_src(bld_base, reg,
TGSI_TYPE_UNSIGNED, 0);
+
+ /* Since bindless handle arithmetic can contain an unsigned integer
+ * wraparound and si_load_sampler_desc assumes there isn't any,
+ * use GEP without "inbounds" (inside ac_build_pointer_add)
+ * to prevent incorrect code generation and hangs.
+ */
+ index = LLVMBuildMul(ctx->ac.builder, index, LLVMConstInt(ctx->i32, 2, 0), "");
+ list = ac_build_pointer_add(&ctx->ac, list, index);
+ index = ctx->i32_0;
}
if (target == TGSI_TEXTURE_BUFFER)
uint32_t wa_num_format =
return_type == TGSI_RETURN_TYPE_UINT ?
- S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_USCALED) :
- S_008F14_NUM_FORMAT_GFX6(V_008F14_IMG_NUM_FORMAT_SSCALED);
+ S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_USCALED) :
+ S_008F14_NUM_FORMAT(V_008F14_IMG_NUM_FORMAT_SSCALED);
wa_formats = LLVMBuildAnd(builder, formats,
- LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT_GFX6, false),
+ LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT, false),
"");
wa_formats = LLVMBuildOr(builder, wa_formats,
LLVMConstInt(ctx->i32, wa_num_format, false), "");
resinfo.sampler = args->sampler;
resinfo.lod = ctx->ac.i32_0;
resinfo.dmask = 0xf;
+ resinfo.attributes = AC_FUNC_ATTR_READNONE;
LLVMValueRef texsize =
fix_resinfo(ctx, target,
args.resource,
vindex,
ctx->i32_0,
- num_channels, false, true);
+ num_channels, 0, true);
emit_data->output[emit_data->chan] =
ac_build_expand_to_vec4(&ctx->ac, result, num_channels);
return;
*
* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
* so the depth comparison value isn't clamped for Z16 and
- * Z24 anymore. Do it manually here.
+ * Z24 anymore. Do it manually here for GFX8-9; GFX10 has
+ * an explicitly clamped 32-bit float format.
*/
- if (ctx->screen->info.chip_class >= VI) {
+ if (ctx->screen->info.chip_class >= GFX8 &&
+ ctx->screen->info.chip_class <= GFX9) {
LLVMValueRef upgraded;
LLVMValueRef clamped;
upgraded = LLVMBuildExtractElement(ctx->ac.builder, args.sampler,
num_src_deriv_channels = 1;
/* 1D textures are allocated and used as 2D on GFX9. */
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.chip_class == GFX9) {
num_dst_deriv_channels = 2;
} else {
num_dst_deriv_channels = 1;
} else if (tgsi_is_array_sampler(target) &&
opcode != TGSI_OPCODE_TXF &&
opcode != TGSI_OPCODE_TXF_LZ &&
- ctx->screen->info.chip_class <= VI) {
+ ctx->screen->info.chip_class <= GFX8) {
unsigned array_coord = target == TGSI_TEXTURE_1D_ARRAY ? 1 : 2;
- args.coords[array_coord] =
- ac_build_intrinsic(&ctx->ac, "llvm.rint.f32", ctx->f32,
- &args.coords[array_coord], 1, 0);
+ args.coords[array_coord] = ac_build_round(&ctx->ac, args.coords[array_coord]);
}
/* 1D textures are allocated and used as 2D on GFX9. */
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.chip_class == GFX9) {
LLVMValueRef filler;
/* Use 0.5, so that we don't sample the border color. */
}
}
- if (target == TGSI_TEXTURE_2D_MSAA ||
- target == TGSI_TEXTURE_2D_ARRAY_MSAA) {
+ if ((target == TGSI_TEXTURE_2D_MSAA ||
+ target == TGSI_TEXTURE_2D_ARRAY_MSAA) &&
+ !(ctx->screen->debug_flags & DBG(NO_FMASK))) {
ac_apply_fmask_to_sample(&ctx->ac, fmask_ptr, args.coords,
target == TGSI_TEXTURE_2D_ARRAY_MSAA);
}
/* The hardware needs special lowering for Gather4 with integer formats. */
LLVMValueRef gather4_int_result_workaround = NULL;
- if (ctx->screen->info.chip_class <= VI &&
+ if (ctx->screen->info.chip_class <= GFX8 &&
opcode == TGSI_OPCODE_TG4) {
assert(inst->Texture.ReturnType != TGSI_RETURN_TYPE_UNKNOWN);
if (ctx->shader->key.mono.u.ps.fbfetch_msaa)
args.coords[chan++] = si_get_sample_id(ctx);
- if (ctx->shader->key.mono.u.ps.fbfetch_msaa) {
+ if (ctx->shader->key.mono.u.ps.fbfetch_msaa &&
+ !(ctx->screen->debug_flags & DBG(NO_FMASK))) {
fmask = ac_build_load_to_sgpr(&ctx->ac, ptr,
LLVMConstInt(ctx->i32, SI_PS_IMAGE_COLORBUF0_FMASK / 2, 0));
args.opcode = ac_image_load;
args.resource = image;
args.dmask = 0xf;
+ args.attributes = AC_FUNC_ATTR_READNONE;
+
if (ctx->shader->key.mono.u.ps.fbfetch_msaa)
args.dim = ctx->shader->key.mono.u.ps.fbfetch_layered ?
ac_image_2darraymsaa : ac_image_2dmsaa;
*/
void si_shader_context_init_mem(struct si_shader_context *ctx)
{
- struct lp_build_tgsi_context *bld_base;
- struct lp_build_tgsi_action tmpl = {};
-
- bld_base = &ctx->bld_base;
+ struct lp_build_tgsi_context *bld_base = &ctx->bld_base;
bld_base->op_actions[TGSI_OPCODE_TEX].emit = build_tex_intrinsic;
bld_base->op_actions[TGSI_OPCODE_TEX_LZ].emit = build_tex_intrinsic;
bld_base->op_actions[TGSI_OPCODE_STORE].emit = store_emit;
bld_base->op_actions[TGSI_OPCODE_RESQ].emit = resq_emit;
- tmpl.fetch_args = atomic_fetch_args;
- tmpl.emit = atomic_emit;
- bld_base->op_actions[TGSI_OPCODE_ATOMUADD] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMUADD].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMUADD].intr_name = "add";
- bld_base->op_actions[TGSI_OPCODE_ATOMXCHG] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMXCHG].intr_name = "swap";
- bld_base->op_actions[TGSI_OPCODE_ATOMCAS] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMCAS].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMCAS].intr_name = "cmpswap";
- bld_base->op_actions[TGSI_OPCODE_ATOMAND] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMAND].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMAND].intr_name = "and";
- bld_base->op_actions[TGSI_OPCODE_ATOMOR] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMOR].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMOR].intr_name = "or";
- bld_base->op_actions[TGSI_OPCODE_ATOMXOR] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMXOR].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMXOR].intr_name = "xor";
- bld_base->op_actions[TGSI_OPCODE_ATOMUMIN] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMUMIN].intr_name = "umin";
- bld_base->op_actions[TGSI_OPCODE_ATOMUMAX] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMUMAX].intr_name = "umax";
- bld_base->op_actions[TGSI_OPCODE_ATOMIMIN] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMIMIN].intr_name = "smin";
- bld_base->op_actions[TGSI_OPCODE_ATOMIMAX] = tmpl;
+ bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].emit = atomic_emit;
bld_base->op_actions[TGSI_OPCODE_ATOMIMAX].intr_name = "smax";
}