bool bindless)
{
enum gl_access_qualifier access = nir_intrinsic_access(intrin);
+ nir_alu_type type = nir_type_invalid;
+ if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_TYPE])
+ type = nir_intrinsic_type(intrin);
switch (intrin->intrinsic) {
#define CASE(op) \
nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(deref->type));
nir_intrinsic_set_access(intrin, access | var->data.access);
nir_intrinsic_set_format(intrin, var->data.image.format);
+ if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_TYPE])
+ nir_intrinsic_set_type(intrin, type);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(src));
# argument with the value to be written, and image atomic operations take
# either one or two additional scalar arguments with the same meaning as in
# the ARB_shader_image_load_store specification.
-def image(name, src_comp=[], **kwargs):
+def image(name, src_comp=[], extra_indices=[], **kwargs):
intrinsic("image_deref_" + name, src_comp=[1] + src_comp,
- indices=[ACCESS], **kwargs)
+ indices=[ACCESS] + extra_indices, **kwargs)
intrinsic("image_" + name, src_comp=[1] + src_comp,
- indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
+ indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS] + extra_indices, **kwargs)
intrinsic("bindless_image_" + name, src_comp=[1] + src_comp,
- indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
+ indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS] + extra_indices, **kwargs)
-image("load", src_comp=[4, 1, 1], dest_comp=0, flags=[CAN_ELIMINATE])
-image("store", src_comp=[4, 1, 0, 1])
+image("load", src_comp=[4, 1, 1], extra_indices=[TYPE], dest_comp=0, flags=[CAN_ELIMINATE])
+image("store", src_comp=[4, 1, 0, 1], extra_indices=[TYPE])
image("atomic_add", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1)
image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1)
case SpvOpAtomicStore:
case SpvOpImageWrite: {
const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
- nir_ssa_def *value = vtn_get_nir_ssa(b, value_id);
+ struct vtn_ssa_value *value = vtn_ssa_value(b, value_id);
/* nir_intrinsic_image_deref_store always takes a vec4 value */
assert(op == nir_intrinsic_image_deref_store);
intrin->num_components = 4;
- intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value));
+ intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value->def));
/* Only OpImageWrite can support a lod parameter if
* SPV_AMD_shader_image_load_store_lod is used but the current NIR
* intrinsics definition for atomics requires us to set it for
* OpAtomicStore.
*/
intrin->src[4] = nir_src_for_ssa(image.lod);
+
+ if (opcode == SpvOpImageWrite)
+ nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(value->type));
break;
}
result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
vtn_push_nir_ssa(b, w[2], result);
+
+ if (opcode == SpvOpImageRead)
+ nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(type->type));
} else {
nir_builder_instr_insert(&b->nb, &intrin->instr);
}