#include "nir/nir_vla.h"
#include "nir/nir_control_flow.h"
#include "nir/nir_constant_expressions.h"
+#include "nir/nir_deref.h"
#include "spirv_info.h"
#include <stdio.h>
va_end(args);
}
+void
+_vtn_err(struct vtn_builder *b, const char *file, unsigned line,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
+ file, line, fmt, args);
+ va_end(args);
+}
+
void
_vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
const char *fmt, ...)
vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
+ const char *ext = (const char *)&w[2];
switch (opcode) {
case SpvOpExtInstImport: {
struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
- if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+ if (strcmp(ext, "GLSL.std.450") == 0) {
val->ext_handler = vtn_handle_glsl450_instruction;
- } else if ((strcmp((const char *)&w[2], "SPV_AMD_gcn_shader") == 0)
+ } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
&& (b->options && b->options->caps.gcn_shader)) {
val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
- } else if ((strcmp((const char *)&w[2], "SPV_AMD_shader_trinary_minmax") == 0)
+ } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
&& (b->options && b->options->caps.trinary_minmax)) {
val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
} else {
- vtn_fail("Unsupported extension");
+ vtn_fail("Unsupported extension: %s", ext);
}
break;
}
case SpvDecorationNonWritable:
case SpvDecorationNonReadable:
case SpvDecorationUniform:
- case SpvDecorationStream:
case SpvDecorationLocation:
case SpvDecorationComponent:
case SpvDecorationOffset:
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationStream:
+ /* We don't need to do anything here, as stream is filled up when
+ * aplying the decoration to a variable, just check that if it is not a
+ * struct member, it should be a struct.
+ */
+ vtn_assert(type->base_type == vtn_base_type_struct);
+ break;
+
case SpvDecorationRelaxedPrecision:
case SpvDecorationSpecId:
case SpvDecorationInvariant:
val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
}
+ if (storage_class == SpvStorageClassPushConstant) {
+ /* These can actually be stored to nir_variables and used as SSA
+ * values so they need a real glsl_type.
+ */
+ val->type->type = glsl_uint_type();
+ }
+
if (storage_class == SpvStorageClassWorkgroup &&
b->options->lower_workgroup_access_to_offsets) {
uint32_t size, align;
spirv_op_to_string(opcode), elem_count, val->type->length);
nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+ for (unsigned i = 0; i < elem_count; i++) {
+ struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
+
+ if (val->value_type == vtn_value_type_constant) {
+ elems[i] = val->constant;
+ } else {
+ vtn_fail_if(val->value_type != vtn_value_type_undef,
+ "only constants or undefs allowed for "
+ "SpvOpConstantComposite");
+ /* to make it easier, just insert a NULL constant for now */
+ elems[i] = vtn_null_constant(b, val->type->type);
+ }
+ }
switch (val->type->base_type) {
case vtn_base_type_vector: {
vtn_callee->referenced = true;
nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
- for (unsigned i = 0; i < call->num_params; i++) {
- unsigned arg_id = w[4 + i];
- struct vtn_value *arg = vtn_untyped_value(b, arg_id);
- if (arg->value_type == vtn_value_type_pointer &&
- arg->pointer->ptr_type->type == NULL) {
- nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
- call->params[i] = nir_deref_var_clone(d, call);
- } else {
- struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
- /* Make a temporary to store the argument in */
- nir_variable *tmp =
- nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp");
- call->params[i] = nir_deref_var_create(call, tmp);
+ unsigned param_idx = 0;
- vtn_local_store(b, arg_ssa, call->params[i]);
- }
+ nir_deref_instr *ret_deref = NULL;
+ struct vtn_type *ret_type = vtn_callee->type->return_type;
+ if (ret_type->base_type != vtn_base_type_void) {
+ nir_variable *ret_tmp =
+ nir_local_variable_create(b->nb.impl, ret_type->type, "return_tmp");
+ ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
+ call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
}
- nir_variable *out_tmp = NULL;
- vtn_assert(res_type->type == callee->return_type);
- if (!glsl_type_is_void(callee->return_type)) {
- out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type,
- "out_tmp");
- call->return_deref = nir_deref_var_create(call, out_tmp);
+ for (unsigned i = 0; i < vtn_callee->type->length; i++) {
+ struct vtn_type *arg_type = vtn_callee->type->params[i];
+ unsigned arg_id = w[4 + i];
+
+ if (arg_type->base_type == vtn_base_type_sampled_image) {
+ struct vtn_sampled_image *sampled_image =
+ vtn_value(b, arg_id, vtn_value_type_sampled_image)->sampled_image;
+
+ call->params[param_idx++] =
+ nir_src_for_ssa(&sampled_image->image->deref->dest.ssa);
+ call->params[param_idx++] =
+ nir_src_for_ssa(&sampled_image->sampler->deref->dest.ssa);
+ } else if (arg_type->base_type == vtn_base_type_pointer ||
+ arg_type->base_type == vtn_base_type_image ||
+ arg_type->base_type == vtn_base_type_sampler) {
+ struct vtn_pointer *pointer =
+ vtn_value(b, arg_id, vtn_value_type_pointer)->pointer;
+ call->params[param_idx++] =
+ nir_src_for_ssa(vtn_pointer_to_ssa(b, pointer));
+ } else {
+ /* This is a regular SSA value and we need a temporary */
+ nir_variable *tmp =
+ nir_local_variable_create(b->nb.impl, arg_type->type, "arg_tmp");
+ nir_deref_instr *tmp_deref = nir_build_deref_var(&b->nb, tmp);
+ vtn_local_store(b, vtn_ssa_value(b, arg_id), tmp_deref);
+ call->params[param_idx++] = nir_src_for_ssa(&tmp_deref->dest.ssa);
+ }
}
+ assert(param_idx == call->num_params);
nir_builder_instr_insert(&b->nb, &call->instr);
- if (glsl_type_is_void(callee->return_type)) {
+ if (ret_type->base_type == vtn_base_type_void) {
vtn_push_value(b, w[2], vtn_value_type_undef);
} else {
- vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref));
+ vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, ret_deref));
}
}
const struct glsl_type *image_type = sampled.type->type;
const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
const bool is_array = glsl_sampler_type_is_array(image_type);
- const bool is_shadow = glsl_sampler_type_is_shadow(image_type);
/* Figure out the base texture operation */
nir_texop texop;
vtn_fail("Unhandled opcode");
}
- nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src srcs[10]; /* 10 should be enough */
nir_tex_src *p = srcs;
+ nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler);
+ nir_deref_instr *texture =
+ sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler;
+
+ p->src = nir_src_for_ssa(&texture->dest.ssa);
+ p->src_type = nir_tex_src_texture_deref;
+ p++;
+
+ switch (texop) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ case nir_texop_txl:
+ case nir_texop_txd:
+ case nir_texop_tg4:
+ /* These operations require a sampler */
+ p->src = nir_src_for_ssa(&sampler->dest.ssa);
+ p->src_type = nir_tex_src_sampler_deref;
+ p++;
+ break;
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ case nir_texop_txs:
+ case nir_texop_lod:
+ case nir_texop_query_levels:
+ case nir_texop_texture_samples:
+ case nir_texop_samples_identical:
+ /* These don't */
+ break;
+ case nir_texop_txf_ms_mcs:
+ vtn_fail("unexpected nir_texop_txf_ms_mcs");
+ }
+
unsigned idx = 4;
struct nir_ssa_def *coord;
break;
}
+ bool is_shadow = false;
unsigned gather_component = 0;
switch (opcode) {
case SpvOpImageSampleDrefImplicitLod:
case SpvOpImageSampleProjDrefExplicitLod:
case SpvOpImageDrefGather:
/* These all have an explicit depth value as their next source */
+ is_shadow = true;
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
break;
vtn_fail("Invalid base type for sampler result");
}
- nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
- nir_deref_var *texture;
- if (sampled.image) {
- nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
- texture = image;
- } else {
- texture = sampler;
- }
-
- instr->texture = nir_deref_var_clone(texture, instr);
-
- switch (instr->op) {
- case nir_texop_tex:
- case nir_texop_txb:
- case nir_texop_txl:
- case nir_texop_txd:
- case nir_texop_tg4:
- /* These operations require a sampler */
- instr->sampler = nir_deref_var_clone(sampler, instr);
- break;
- case nir_texop_txf:
- case nir_texop_txf_ms:
- case nir_texop_txs:
- case nir_texop_lod:
- case nir_texop_query_levels:
- case nir_texop_texture_samples:
- case nir_texop_samples_identical:
- /* These don't */
- instr->sampler = NULL;
- break;
- case nir_texop_txf_ms_mcs:
- vtn_fail("unexpected nir_texop_txf_ms_mcs");
- }
-
nir_ssa_dest_init(&instr->instr, &instr->dest,
nir_tex_instr_dest_size(instr), 32, NULL);
instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
instrs[i]->component = instr->component;
instrs[i]->dest_type = instr->dest_type;
- instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
- instrs[i]->sampler = NULL;
memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
}
+static nir_ssa_def *
+expand_to_vec4(nir_builder *b, nir_ssa_def *value)
+{
+ if (value->num_components == 4)
+ return value;
+
+ unsigned swiz[4];
+ for (unsigned i = 0; i < 4; i++)
+ swiz[i] = i < value->num_components ? i : 0;
+ return nir_swizzle(b, value, swiz, 4, false);
+}
+
static void
vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
nir_intrinsic_op op;
switch (opcode) {
-#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_var_##N; break;
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
OP(ImageQuerySize, size)
OP(ImageRead, load)
OP(ImageWrite, store)
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
- nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
- intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
+ nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
+ intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
/* The image coordinate is always 4 components but we may not have that
* many. Swizzle to compensate.
*/
- unsigned swiz[4];
- for (unsigned i = 0; i < 4; i++)
- swiz[i] = i < image.coord->num_components ? i : 0;
- intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
- swiz, 4, false));
- intrin->src[1] = nir_src_for_ssa(image.sample);
+ intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord));
+ intrin->src[2] = nir_src_for_ssa(image.sample);
}
switch (opcode) {
case SpvOpImageRead:
break;
case SpvOpAtomicStore:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
- break;
- case SpvOpImageWrite:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+ case SpvOpImageWrite: {
+ const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
+ nir_ssa_def *value = vtn_ssa_value(b, value_id)->def;
+ /* nir_intrinsic_image_deref_store always takes a vec4 value */
+ intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value));
break;
+ }
case SpvOpAtomicCompareExchange:
case SpvOpAtomicIIncrement:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
+ fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
break;
default:
vtn_fail("Invalid image opcode");
}
- if (opcode != SpvOpImageWrite) {
+ if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- unsigned dest_components = nir_intrinsic_dest_components(intrin);
- if (intrin->intrinsic == nir_intrinsic_image_var_size) {
- dest_components = intrin->num_components =
- glsl_get_vector_elements(type->type);
- }
+ unsigned dest_components = glsl_get_vector_elements(type->type);
+ intrin->num_components = nir_intrinsic_infos[op].dest_components;
+ if (intrin->num_components == 0)
+ intrin->num_components = dest_components;
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
- dest_components, 32, NULL);
+ intrin->num_components, 32, NULL);
nir_builder_instr_insert(&b->nb, &intrin->instr);
+ nir_ssa_def *result = &intrin->dest.ssa;
+ if (intrin->num_components != dest_components)
+ result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
+
val->ssa = vtn_create_ssa_value(b, type->type);
- val->ssa->def = &intrin->dest.ssa;
+ val->ssa->def = result;
} else {
nir_builder_instr_insert(&b->nb, &intrin->instr);
}
}
}
+static nir_intrinsic_op
+get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
+{
+ switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
+ OP(AtomicLoad, read_deref)
+ OP(AtomicExchange, exchange)
+ OP(AtomicCompareExchange, comp_swap)
+ OP(AtomicIIncrement, inc_deref)
+ OP(AtomicIDecrement, post_dec_deref)
+ OP(AtomicIAdd, add_deref)
+ OP(AtomicISub, add_deref)
+ OP(AtomicUMin, min_deref)
+ OP(AtomicUMax, max_deref)
+ OP(AtomicAnd, and_deref)
+ OP(AtomicOr, or_deref)
+ OP(AtomicXor, xor_deref)
+#undef OP
+ default:
+ /* We left the following out: AtomicStore, AtomicSMin and
+ * AtomicSmax. Right now there are not nir intrinsics for them. At this
+ * moment Atomic Counter support is needed for ARB_spirv support, so is
+ * only need to support GLSL Atomic Counters that are uints and don't
+ * allow direct storage.
+ */
+ unreachable("Invalid uniform atomic");
+ }
+}
+
static nir_intrinsic_op
get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
}
static nir_intrinsic_op
-get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
+get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_var;
- case SpvOpAtomicStore: return nir_intrinsic_store_var;
-#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
+ case SpvOpAtomicStore: return nir_intrinsic_store_deref;
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
OP(AtomicIIncrement, atomic_add)
}
}
+/*
+ * Handles shared atomics, ssbo atomics and atomic counters.
+ */
static void
-vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, unsigned count)
+vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
{
struct vtn_pointer *ptr;
nir_intrinsic_instr *atomic;
SpvMemorySemanticsMask semantics = w[5];
*/
- if (ptr->mode == vtn_variable_mode_workgroup &&
- !b->options->lower_workgroup_access_to_offsets) {
- nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
- const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
- nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode);
+ /* uniform as "atomic counter uniform" */
+ if (ptr->mode == vtn_variable_mode_uniform) {
+ nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = deref->type;
+ nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
+ atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+
+ /* SSBO needs to initialize index/offset. In this case we don't need to,
+ * as that info is already stored on the ptr->var->var nir_variable (see
+ * vtn_create_variable)
+ */
+
+ switch (opcode) {
+ case SpvOpAtomicLoad:
+ atomic->num_components = glsl_get_vector_elements(deref_type);
+ break;
+
+ case SpvOpAtomicStore:
+ atomic->num_components = glsl_get_vector_elements(deref_type);
+ nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ /* Nothing: we don't need to call fill_common_atomic_sources here, as
+ * atomic counter uniforms doesn't have sources
+ */
+ break;
+
+ default:
+ unreachable("Invalid SPIR-V atomic");
+
+ }
+ } else if (ptr->mode == vtn_variable_mode_workgroup &&
+ !b->options->lower_workgroup_access_to_offsets) {
+ nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = deref->type;
+ nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->variables[0] = nir_deref_var_clone(deref, atomic);
+ atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
switch (opcode) {
case SpvOpAtomicLoad:
case SpvOpAtomicStore:
atomic->num_components = glsl_get_vector_elements(deref_type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
- atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
break;
case SpvOpAtomicExchange:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
break;
default:
}
} else {
nir_ssa_def *offset, *index;
- offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
+ offset = vtn_pointer_to_offset(b, ptr, &index);
nir_intrinsic_op op;
if (ptr->mode == vtn_variable_mode_ssbo) {
{
nir_op op;
switch (num_components) {
- case 1: op = nir_op_fmov; break;
+ case 1: op = nir_op_imov; break;
case 2: op = nir_op_vec2; break;
case 3: op = nir_op_vec3; break;
case 4: op = nir_op_vec4; break;
nir_ssa_def *
vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
{
- unsigned swiz[4] = { index };
- return nir_swizzle(&b->nb, src, swiz, 1, true);
+ return nir_channel(&b->nb, src, index);
}
nir_ssa_def *
unsigned elems = count - 3;
assume(elems >= 1);
if (glsl_type_is_vector_or_scalar(type)) {
- nir_ssa_def *srcs[4];
+ nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < elems; i++)
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
val->ssa->def =
switch (opcode) {
case SpvOpEmitStreamVertex:
- case SpvOpEndStreamPrimitive:
- nir_intrinsic_set_stream_id(intrin, w[1]);
+ case SpvOpEndStreamPrimitive: {
+ unsigned stream = vtn_constant_value(b, w[1])->values[0].u32[0];
+ nir_intrinsic_set_stream_id(intrin, stream);
break;
+ }
+
default:
break;
}
case SpvCapabilityFloat16Buffer:
case SpvCapabilityFloat16:
case SpvCapabilityInt64Atomics:
- case SpvCapabilityAtomicStorage:
- case SpvCapabilityInt16:
case SpvCapabilityStorageImageMultisample:
case SpvCapabilityInt8:
case SpvCapabilitySparseResidency:
spirv_capability_to_string(cap));
break;
+ case SpvCapabilityAtomicStorage:
+ spv_check_supported(atomic_storage, cap);
+ break;
+
case SpvCapabilityFloat64:
spv_check_supported(float64, cap);
break;
case SpvCapabilityInt64:
spv_check_supported(int64, cap);
break;
+ case SpvCapabilityInt16:
+ spv_check_supported(int16, cap);
+ break;
case SpvCapabilityAddresses:
case SpvCapabilityKernel:
case SpvCapabilityGroupNonUniformQuad:
spv_check_supported(subgroup_quad, cap);
+ break;
case SpvCapabilityGroupNonUniformArithmetic:
case SpvCapabilityGroupNonUniformClustered:
spv_check_supported(subgroup_arithmetic, cap);
+ break;
case SpvCapabilityVariablePointersStorageBuffer:
case SpvCapabilityVariablePointers:
spv_check_supported(shader_viewport_index_layer, cap);
break;
+ case SpvCapabilityStorageBuffer8BitAccess:
+ case SpvCapabilityUniformAndStorageBuffer8BitAccess:
+ case SpvCapabilityStoragePushConstant8:
+ spv_check_supported(storage_8bit, cap);
+ break;
+
+ case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
+ case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
+ case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
+ spv_check_supported(descriptor_array_dynamic_indexing, cap);
+ break;
+
+ case SpvCapabilityRuntimeDescriptorArrayEXT:
+ spv_check_supported(runtime_descriptor_array, cap);
+ break;
+
+ case SpvCapabilityStencilExportEXT:
+ spv_check_supported(stencil_export, cap);
+ break;
+
+ case SpvCapabilitySampleMaskPostDepthCoverage:
+ spv_check_supported(post_depth_coverage, cap);
+ break;
+
default:
vtn_fail("Unhandled capability");
}
b->shader->info.fs.early_fragment_tests = true;
break;
+ case SpvExecutionModePostDepthCoverage:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.post_depth_coverage = true;
+ break;
+
case SpvExecutionModeInvocations:
vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeXfb:
- vtn_fail("Unhandled execution mode");
+ b->shader->info.has_transform_feedback_varyings = true;
break;
case SpvExecutionModeVecTypeHint:
case SpvExecutionModeContractionOff:
break; /* OpenCL */
+ case SpvExecutionModeStencilRefReplacingEXT:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+ break;
+
default:
vtn_fail("Unhandled execution mode");
}
case SpvOpImageQuerySize: {
struct vtn_pointer *image =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
- if (image->mode == vtn_variable_mode_image) {
+ if (glsl_type_is_image(image->type->type)) {
vtn_handle_image(b, opcode, w, count);
} else {
- vtn_assert(image->mode == vtn_variable_mode_sampler);
+ vtn_assert(glsl_type_is_sampler(image->type->type));
vtn_handle_texture(b, opcode, w, count);
}
break;
vtn_handle_image(b, opcode, w, count);
} else {
vtn_assert(pointer->value_type == vtn_value_type_pointer);
- vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+ vtn_handle_atomics(b, opcode, w, count);
}
break;
}
vtn_handle_image(b, opcode, w, count);
} else {
vtn_assert(pointer->value_type == vtn_value_type_pointer);
- vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+ vtn_handle_atomics(b, opcode, w, count);
}
break;
}
b->entry_point_name = entry_point_name;
b->options = options;
- /* Handle the SPIR-V header (first 4 dwords) */
- vtn_assert(word_count > 5);
+ /*
+ * Handle the SPIR-V header (first 5 dwords).
+ * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
+ */
+ if (word_count <= 5)
+ goto fail;
+
+ if (words[0] != SpvMagicNumber) {
+ vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
+ goto fail;
+ }
+ if (words[1] < 0x10000) {
+ vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
+ goto fail;
+ }
- vtn_assert(words[0] == SpvMagicNumber);
- vtn_assert(words[1] >= 0x10000);
/* words[2] == generator magic */
unsigned value_id_bound = words[3];
- vtn_assert(words[4] == 0);
+ if (words[4] != 0) {
+ vtn_err("words[4] was %u, want 0", words[4]);
+ goto fail;
+ }
b->value_id_bound = value_id_bound;
b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
return b;
+ fail:
+ ralloc_free(b);
+ return NULL;
}
nir_function *
}
} while (progress);
+ /* We sometimes generate bogus derefs that, while never used, give the
+ * validator a bit of heartburn. Run dead code to get rid of them.
+ */
+ nir_opt_dce(b->shader);
+
vtn_assert(b->entry_point->value_type == vtn_value_type_function);
nir_function *entry_point = b->entry_point->func->impl->function;
vtn_assert(entry_point);