case SpvOpDecorate:
case SpvOpMemberDecorate:
+ case SpvOpDecorateStringGOOGLE:
+ case SpvOpMemberDecorateStringGOOGLE:
case SpvOpExecutionMode: {
struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
switch (opcode) {
case SpvOpDecorate:
+ case SpvOpDecorateStringGOOGLE:
dec->scope = VTN_DEC_DECORATION;
break;
case SpvOpMemberDecorate:
+ case SpvOpMemberDecorateStringGOOGLE:
dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
"Member argument of OpMemberDecorate too large");
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationHlslSemanticGOOGLE:
+ /* HLSL semantic decorations can safely be ignored by the driver. */
+ break;
+
default:
vtn_fail("Unhandled decoration");
}
case SpvDecorationOffset:
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
+ case SpvDecorationHlslSemanticGOOGLE:
vtn_warn("Decoration only allowed for struct members: %s",
spirv_decoration_to_string(dec->decoration));
break;
val->type->type = glsl_uint_type();
}
- if (storage_class == SpvStorageClassWorkgroup &&
- b->options->lower_workgroup_access_to_offsets) {
- uint32_t size, align;
- val->type->deref = vtn_type_layout_std430(b, val->type->deref,
- &size, &align);
- val->type->length = size;
- val->type->align = align;
+ if (storage_class == SpvStorageClassWorkgroup) {
/* These can actually be stored to nir_variables and used as SSA
* values so they need a real glsl_type.
*/
val->type->type = glsl_uint_type();
+ if (b->options->lower_workgroup_access_to_offsets) {
+ uint32_t size, align;
+ val->type->deref = vtn_type_layout_std430(b, val->type->deref,
+ &size, &align);
+ val->type->length = size;
+ val->type->align = align;
+ }
}
break;
}
opcode == SpvOpSpecConstantFalse)
int_val = get_specialization(b, val, int_val);
- val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
+ val->constant->values[0].b[0] = int_val != 0;
break;
}
case 8:
val->constant->values[0].u8[i] = elems[i]->values[0].u8[0];
break;
+ case 1:
+ val->constant->values[0].b[i] = elems[i]->values[0].b[0];
+ break;
default:
vtn_fail("Invalid SpvOpConstantComposite bit size");
}
case 8:
val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i];
break;
+ case 1:
+ val->constant->values[0].b[i] = (*c)->values[col].b[elem + i];
+ break;
default:
vtn_fail("Invalid SpvOpCompositeExtract bit size");
}
case 8:
(*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i];
break;
+ case 1:
+ (*c)->values[col].b[elem + i] = insert->constant->values[0].b[i];
+ break;
default:
vtn_fail("Invalid SpvOpCompositeInsert bit size");
}
nir_const_value src[4];
for (unsigned i = 0; i < count - 4; i++) {
- nir_constant *c =
- vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
+ struct vtn_value *src_val =
+ vtn_value(b, w[4 + i], vtn_value_type_constant);
+
+ /* If this is an unsized source, pull the bit size from the
+ * source; otherwise, we'll use the bit size from the destination.
+ */
+ if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
+ bit_size = glsl_get_bit_size(src_val->type->type);
unsigned j = swap ? 1 - i : i;
- src[j] = c->values[0];
+ src[j] = src_val->constant->values[0];
+ }
+
+ /* fix up fixed size sources */
+ switch (op) {
+ case nir_op_ishl:
+ case nir_op_ishr:
+ case nir_op_ushr: {
+ if (bit_size == 32)
+ break;
+ for (unsigned i = 0; i < num_components; ++i) {
+ switch (bit_size) {
+ case 64: src[1].u32[i] = src[1].u64[i]; break;
+ case 16: src[1].u32[i] = src[1].u16[i]; break;
+ case 8: src[1].u32[i] = src[1].u8[i]; break;
+ }
+ }
+ break;
+ }
+ default:
+ break;
}
val->constant->values[0] =
texop = nir_texop_txf_ms;
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
}
+
+ if (operands & SpvImageOperandsMinLodMask) {
+ vtn_assert(texop == nir_texop_tex ||
+ texop == nir_texop_txb ||
+ texop == nir_texop_txd);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_min_lod);
+ }
}
/* We should have now consumed exactly all of the arguments */
vtn_assert(idx == count);
switch (opcode) {
case SpvOpAtomicLoad:
atomic->num_components = glsl_get_vector_elements(ptr->type->type);
+ nir_intrinsic_set_align(atomic, 4, 0);
if (ptr->mode == vtn_variable_mode_ssbo)
atomic->src[src++] = nir_src_for_ssa(index);
atomic->src[src++] = nir_src_for_ssa(offset);
case SpvOpAtomicStore:
atomic->num_components = glsl_get_vector_elements(ptr->type->type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
+ nir_intrinsic_set_align(atomic, 4, 0);
atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
if (ptr->mode == vtn_variable_mode_ssbo)
atomic->src[src++] = nir_src_for_ssa(index);
case SpvCapabilityVector16:
case SpvCapabilityFloat16Buffer:
case SpvCapabilityFloat16:
- case SpvCapabilityInt64Atomics:
case SpvCapabilityStorageImageMultisample:
case SpvCapabilityInt8:
case SpvCapabilitySparseResidency:
- case SpvCapabilityMinLod:
vtn_warn("Unsupported SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+ case SpvCapabilityMinLod:
+ spv_check_supported(min_lod, cap);
+ break;
+
case SpvCapabilityAtomicStorage:
spv_check_supported(atomic_storage, cap);
break;
spv_check_supported(geometry_streams, cap);
break;
+ case SpvCapabilityInt64Atomics:
+ spv_check_supported(int64_atomics, cap);
+ break;
+
case SpvCapabilityAddresses:
case SpvCapabilityKernel:
case SpvCapabilityImageBasic:
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
+ case SpvOpDecorateStringGOOGLE:
+ case SpvOpMemberDecorateStringGOOGLE:
vtn_handle_decoration(b, opcode, w, count);
break;
case SpvOpMemberDecorate:
case SpvOpGroupDecorate:
case SpvOpGroupMemberDecorate:
+ case SpvOpDecorateStringGOOGLE:
+ case SpvOpMemberDecorateStringGOOGLE:
vtn_fail("Invalid opcode types and variables section");
break;