nir_ssa_def *src0 = vtn_ssa_value(b, link.id)->def;
if (src0->bit_size != 32)
src0 = nir_u2u32(&b->nb, src0);
- return nir_imul(&b->nb, src0, nir_imm_int(&b->nb, stride));
+ return nir_imul_imm(&b->nb, src0, stride);
}
}
case GLSL_TYPE_STRUCT: {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned member = deref_chain->link[idx].id;
- nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]);
- offset = nir_iadd(&b->nb, offset, mem_offset);
+ offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
type = type->members[member];
access |= type->access;
break;
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
instr->num_components = glsl_get_vector_elements(type);
+ /* Booleans usually shouldn't show up in external memory in SPIR-V.
+ * However, they do for certain older GLSLang versions and can for shared
+ * memory when we lower access chains internally.
+ */
+ const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
+ glsl_get_bit_size(type);
+
int src = 0;
if (!load) {
nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
if (load) {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- instr->num_components,
- glsl_get_bit_size(type), NULL);
+ instr->num_components, data_bit_size, NULL);
(*inout)->def = &instr->dest.ssa;
}
for (unsigned i = 0; i < num_ops; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * col_stride));
+ nir_iadd_imm(&b->nb, offset, i * col_stride);
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
&(*inout)->elems[i],
nir_ssa_def *per_comp[4];
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset,
- nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
struct vtn_ssa_value *comp, temp_val;
if (!load) {
temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
type->array_element,
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ nir_iadd_imm(&b->nb, offset, type->offsets[i]);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
type->members[i],
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationHlslSemanticGOOGLE:
+ /* HLSL semantic decorations can safely be ignored by the driver. */
+ break;
+
default:
vtn_fail("Unhandled decoration");
}
case SpvDecorationCoherent:
vtn_var->access |= ACCESS_COHERENT;
break;
+ case SpvDecorationHlslCounterBufferGOOGLE:
+ /* HLSL semantic decorations can safely be ignored by the driver. */
+ break;
default:
break;
}