#include "nir/nir_constant_expressions.h"
#include "spirv_info.h"
+struct spec_constant_value {
+ bool is_double;
+ union {
+ uint32_t data32;
+ uint64_t data64;
+ };
+};
+
void
_vtn_warn(const char *file, int line, const char *msg, ...)
{
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(type);
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, num_components, 32);
+ nir_load_const_instr_create(b->shader, num_components, bit_size);
- for (unsigned i = 0; i < num_components; i++)
- load->value.u32[i] = constant->value.u[i];
+ load->value = constant->values[0];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
val->def = &load->def;
struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
col_val->type = glsl_get_column_type(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, rows, 32);
+ nir_load_const_instr_create(b->shader, rows, bit_size);
- for (unsigned j = 0; j < rows; j++)
- load->value.u32[j] = constant->value.u[rows * i + j];
+ load->value = constant->values[i];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
col_val->def = &load->def;
}
}
break;
+ }
case GLSL_TYPE_ARRAY: {
unsigned elems = glsl_get_length(val->type);
switch (glsl_get_base_type(src->type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
break;
case SpvDecorationPatch:
- vtn_warn("Tessellation not yet supported");
break;
case SpvDecorationSpecId:
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
spirv_decoration_to_string(dec->decoration));
break;
+
+ default:
+ unreachable("Unhandled decoration");
}
}
case SpvDecorationOffset:
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
- vtn_warn("Decoraiton only allowed for struct members: %s",
+ vtn_warn("Decoration only allowed for struct members: %s",
spirv_decoration_to_string(dec->decoration));
break;
case SpvDecorationLinkageAttributes:
case SpvDecorationNoContraction:
case SpvDecorationInputAttachmentIndex:
- vtn_warn("Decoraiton not allowed on types: %s",
+ vtn_warn("Decoration not allowed on types: %s",
spirv_decoration_to_string(dec->decoration));
break;
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoraiton only allowed for CL-style kernels: %s",
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
spirv_decoration_to_string(dec->decoration));
break;
+
+ default:
+ unreachable("Unhandled decoration");
}
}
val->type->type = glsl_bool_type();
break;
case SpvOpTypeInt: {
+ int bit_size = w[2];
const bool signedness = w[3];
- val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
+ if (bit_size == 64)
+ val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
+ else
+ val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
break;
}
- case SpvOpTypeFloat:
- val->type->type = glsl_float_type();
+ case SpvOpTypeFloat: {
+ int bit_size = w[2];
+ val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type();
break;
+ }
case SpvOpTypeVector: {
struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
length = 0;
} else {
length =
- vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0];
+ vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
}
val->type->type = glsl_array_type(array_element->type, length);
val->type->access_qualifier = SpvAccessQualifierReadWrite;
if (multisampled) {
- assert(dim == GLSL_SAMPLER_DIM_2D);
- dim = GLSL_SAMPLER_DIM_MS;
+ if (dim == GLSL_SAMPLER_DIM_2D)
+ dim = GLSL_SAMPLER_DIM_MS;
+ else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
+ dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
+ else
+ assert(!"Unsupported multisampled image type");
}
val->type->image_format = translate_image_format(format);
val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
glsl_get_base_type(sampled_type));
} else if (sampled == 2) {
- assert((dim == GLSL_SAMPLER_DIM_SUBPASS) || format);
assert(!is_shadow);
val->type->type = glsl_image_type(dim, is_array,
glsl_get_base_type(sampled_type));
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
}
static void
-spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
+spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
int member, const struct vtn_decoration *dec,
void *data)
{
if (dec->decoration != SpvDecorationSpecId)
return;
- uint32_t *const_value = data;
+ struct spec_constant_value *const_value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
if (b->specializations[i].id == dec->literals[0]) {
- *const_value = b->specializations[i].data;
+ if (const_value->is_double)
+ const_value->data64 = b->specializations[i].data64;
+ else
+ const_value->data32 = b->specializations[i].data32;
return;
}
}
get_specialization(struct vtn_builder *b, struct vtn_value *val,
uint32_t const_value)
{
- vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
- return const_value;
+ struct spec_constant_value data;
+ data.is_double = false;
+ data.data32 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data32;
+}
+
+static uint64_t
+get_specialization64(struct vtn_builder *b, struct vtn_value *val,
+ uint64_t const_value)
+{
+ struct spec_constant_value data;
+ data.is_double = true;
+ data.data64 = const_value;
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
+ return data.data64;
}
static void
assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3));
- b->shader->info.cs.local_size[0] = val->constant->value.u[0];
- b->shader->info.cs.local_size[1] = val->constant->value.u[1];
- b->shader->info.cs.local_size[2] = val->constant->value.u[2];
+ b->shader->info->cs.local_size[0] = val->constant->values[0].u32[0];
+ b->shader->info->cs.local_size[1] = val->constant->values[0].u32[1];
+ b->shader->info->cs.local_size[2] = val->constant->values[0].u32[2];
}
static void
switch (opcode) {
case SpvOpConstantTrue:
assert(val->const_type == glsl_bool_type());
- val->constant->value.u[0] = NIR_TRUE;
+ val->constant->values[0].u32[0] = NIR_TRUE;
break;
case SpvOpConstantFalse:
assert(val->const_type == glsl_bool_type());
- val->constant->value.u[0] = NIR_FALSE;
+ val->constant->values[0].u32[0] = NIR_FALSE;
break;
case SpvOpSpecConstantTrue:
assert(val->const_type == glsl_bool_type());
uint32_t int_val =
get_specialization(b, val, (opcode == SpvOpSpecConstantTrue));
- val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE;
+ val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
break;
}
- case SpvOpConstant:
+ case SpvOpConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->value.u[0] = w[3];
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64) {
+ val->constant->values->u32[0] = w[3];
+ val->constant->values->u32[1] = w[4];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values->u32[0] = w[3];
+ }
break;
- case SpvOpSpecConstant:
+ }
+ case SpvOpSpecConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->value.u[0] = get_specialization(b, val, w[3]);
+ val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64)
+ val->constant->values[0].u64[0] =
+ get_specialization64(b, val, vtn_u64_literal(&w[3]));
+ else
+ val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
break;
+ }
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
unsigned elem_count = count - 3;
switch (glsl_get_base_type(val->const_type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(val->const_type);
if (glsl_type_is_matrix(val->const_type)) {
- unsigned rows = glsl_get_vector_elements(val->const_type);
assert(glsl_get_matrix_columns(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
- for (unsigned j = 0; j < rows; j++)
- val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ val->constant->values[i] = elems[i]->values[0];
} else {
assert(glsl_type_is_vector(val->const_type));
assert(glsl_get_vector_elements(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->value.u[i] = elems[i]->value.u[0];
+ for (unsigned i = 0; i < elem_count; i++) {
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ }
+ }
}
ralloc_free(elems);
break;
-
+ }
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ARRAY:
ralloc_steal(val->constant, elems);
SpvOp opcode = get_specialization(b, val, w[3]);
switch (opcode) {
case SpvOpVectorShuffle: {
- struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant);
- struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant);
- unsigned len0 = glsl_get_vector_elements(v0->const_type);
- unsigned len1 = glsl_get_vector_elements(v1->const_type);
-
- uint32_t u[8];
- for (unsigned i = 0; i < len0; i++)
- u[i] = v0->constant->value.u[i];
- for (unsigned i = 0; i < len1; i++)
- u[len0 + i] = v1->constant->value.u[i];
-
- for (unsigned i = 0; i < count - 6; i++) {
- uint32_t comp = w[i + 6];
- if (comp == (uint32_t)-1) {
- val->constant->value.u[i] = 0xdeadbeef;
- } else {
- val->constant->value.u[i] = u[comp];
+ struct vtn_value *v0 = &b->values[w[4]];
+ struct vtn_value *v1 = &b->values[w[5]];
+
+ assert(v0->value_type == vtn_value_type_constant ||
+ v0->value_type == vtn_value_type_undef);
+ assert(v1->value_type == vtn_value_type_constant ||
+ v1->value_type == vtn_value_type_undef);
+
+ unsigned len0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v0->const_type) :
+ glsl_get_vector_elements(v0->type->type);
+ unsigned len1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_vector_elements(v1->const_type) :
+ glsl_get_vector_elements(v1->type->type);
+
+ assert(len0 + len1 < 16);
+
+ unsigned bit_size = glsl_get_bit_size(val->const_type);
+ unsigned bit_size0 = v0->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v0->const_type) :
+ glsl_get_bit_size(v0->type->type);
+ unsigned bit_size1 = v1->value_type == vtn_value_type_constant ?
+ glsl_get_bit_size(v1->const_type) :
+ glsl_get_bit_size(v1->type->type);
+
+ assert(bit_size == bit_size0 && bit_size == bit_size1);
+ (void)bit_size0; (void)bit_size1;
+
+ if (bit_size == 64) {
+ uint64_t u64[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u64[i] = v0->constant->values[0].u64[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u64[len0 + i] = v1->constant->values[0].u64[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
+ else
+ val->constant->values[0].u64[j] = u64[comp];
+ }
+ } else {
+ uint32_t u32[8];
+ if (v0->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len0; i++)
+ u32[i] = v0->constant->values[0].u32[i];
+ }
+ if (v1->value_type == vtn_value_type_constant) {
+ for (unsigned i = 0; i < len1; i++)
+ u32[len0 + i] = v1->constant->values[0].u32[i];
+ }
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u32[j] = 0xdeadbeef;
+ else
+ val->constant->values[0].u32[j] = u32[comp];
}
}
break;
}
int elem = -1;
+ int col = 0;
const struct glsl_type *type = comp->const_type;
for (unsigned i = deref_start; i < count; i++) {
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_BOOL:
/* If we hit this granularity, we're picking off an element */
- if (elem < 0)
- elem = 0;
-
if (glsl_type_is_matrix(type)) {
- elem += w[i] * glsl_get_vector_elements(type);
+ assert(col == 0 && elem == -1);
+ col = w[i];
+ elem = 0;
type = glsl_get_column_type(type);
} else {
- assert(glsl_type_is_vector(type));
- elem += w[i];
+ assert(elem <= 0 && glsl_type_is_vector(type));
+ elem = w[i];
type = glsl_scalar_type(glsl_get_base_type(type));
}
continue;
val->constant = *c;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- val->constant->value.u[i] = (*c)->value.u[elem + i];
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ }
}
} else {
struct vtn_value *insert =
*c = insert->constant;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- (*c)->value.u[elem + i] = insert->constant->value.u[i];
+ if (bit_size == 64) {
+ (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
+ } else {
+ assert(bit_size == 32);
+ (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ }
}
}
break;
default: {
bool swap;
- nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+ nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
+ nir_alu_type src_alu_type = dst_alu_type;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);
unsigned num_components = glsl_get_vector_elements(val->const_type);
unsigned bit_size =
unsigned j = swap ? 1 - i : i;
assert(bit_size == 32);
- for (unsigned k = 0; k < num_components; k++)
- src[j].u32[k] = c->value.u[k];
+ src[j] = c->values[0];
}
- nir_const_value res = nir_eval_const_opcode(op, num_components,
- bit_size, src);
-
- for (unsigned k = 0; k < num_components; k++)
- val->constant->value.u[k] = res.u32[k];
-
+ val->constant->values[0] =
+ nir_eval_const_opcode(op, num_components, bit_size, src);
break;
} /* default */
}
struct vtn_value *arg = vtn_untyped_value(b, arg_id);
if (arg->value_type == vtn_value_type_access_chain) {
nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
- call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
+ call->params[i] = nir_deref_var_clone(d, call);
} else {
struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
switch (glsl_get_base_type(type)) {
case GLSL_TYPE_INT:
case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_DOUBLE:
case SpvOpImageSampleProjDrefExplicitLod:
case SpvOpImageDrefGather:
/* These all have an explicit depth value as their next source */
- (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
break;
case SpvOpImageGather:
/* This has a component as its next source */
gather_component =
- vtn_value(b, w[idx++], vtn_value_type_constant)->constant->value.u[0];
+ vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
break;
default:
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
/* Now we need to handle some number of optional arguments */
+ const struct vtn_ssa_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx++];
operands & SpvImageOperandsConstOffsetMask)
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
- if (operands & SpvImageOperandsConstOffsetsMask)
- assert(!"Constant offsets to texture gather not yet implemented");
+ if (operands & SpvImageOperandsConstOffsetsMask) {
+ gather_offsets = vtn_ssa_value(b, w[idx++]);
+ (*p++) = (nir_tex_src){};
+ }
if (operands & SpvImageOperandsSampleMask) {
assert(texop == nir_texop_txf_ms);
}
nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+ nir_deref_var *texture;
if (sampled.image) {
nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
+ texture = image;
} else {
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ texture = sampler;
}
+ instr->texture = nir_deref_var_clone(texture, instr);
+
switch (instr->op) {
case nir_texop_tex:
case nir_texop_txb:
case nir_texop_txl:
case nir_texop_txd:
/* These operations require a sampler */
- instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ instr->sampler = nir_deref_var_clone(sampler, instr);
break;
case nir_texop_txf:
case nir_texop_txf_ms:
assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
+ nir_ssa_def *def;
+ nir_instr *instruction;
+ if (gather_offsets) {
+ assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
+ assert(glsl_get_length(gather_offsets->type) == 4);
+ nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
+
+ /* Copy the current instruction 4x */
+ for (uint32_t i = 1; i < 4; i++) {
+ instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
+ instrs[i]->op = instr->op;
+ instrs[i]->coord_components = instr->coord_components;
+ instrs[i]->sampler_dim = instr->sampler_dim;
+ instrs[i]->is_array = instr->is_array;
+ instrs[i]->is_shadow = instr->is_shadow;
+ instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
+ instrs[i]->component = instr->component;
+ instrs[i]->dest_type = instr->dest_type;
+ instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
+ instrs[i]->sampler = NULL;
+
+ memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
+
+ nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
+ nir_tex_instr_dest_size(instr), 32, NULL);
+ }
+
+ /* Fill in the last argument with the offset from the passed in offsets
+ * and insert the instruction into the stream.
+ */
+ for (uint32_t i = 0; i < 4; i++) {
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
+ src.src_type = nir_tex_src_offset;
+ instrs[i]->src[instrs[i]->num_srcs - 1] = src;
+ nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
+ }
+
+ /* Combine the results of the 4 instructions by taking their .w
+ * components
+ */
+ nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
+ nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
+ vec4->dest.write_mask = 0xf;
+ for (uint32_t i = 0; i < 4; i++) {
+ vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
+ vec4->src[i].swizzle[0] = 3;
+ }
+ def = &vec4->dest.dest.ssa;
+ instruction = &vec4->instr;
+ } else {
+ def = &instr->dest.ssa;
+ instruction = &instr->instr;
+ }
+
val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = &instr->dest.ssa;
+ val->ssa->def = def;
- nir_builder_instr_insert(&b->nb, &instr->instr);
+ nir_builder_instr_insert(&b->nb, instruction);
}
static void
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
+ case SpvOpAtomicLoad:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
break;
- case SpvOpAtomicLoad: {
- image.image =
- vtn_value(b, w[3], vtn_value_type_access_chain)->access_chain;
- break;
- }
-
- case SpvOpAtomicStore: {
- image.image =
- vtn_value(b, w[1], vtn_value_type_access_chain)->access_chain;
+ case SpvOpAtomicStore:
+ image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
break;
- }
case SpvOpImageQuerySize:
image.image =
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
- intrin->variables[0] =
- nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
+ intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
if (chain->var->mode == vtn_variable_mode_workgroup) {
struct vtn_type *type = chain->var->type;
- nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
+ nir_deref_var *deref = vtn_access_chain_to_deref(b, chain);
nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
+ atomic->variables[0] = nir_deref_var_clone(deref, atomic);
switch (opcode) {
case SpvOpAtomicLoad:
nir_alu_instr *vec = create_vec(b->shader, num_components,
srcs[0]->bit_size);
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, there must be at least two Constituent
+ * operands."
+ */
+ assert(num_srcs >= 2);
+
unsigned dest_idx = 0;
for (unsigned i = 0; i < num_srcs; i++) {
nir_ssa_def *src = srcs[i];
+ assert(dest_idx + src->num_components <= num_components);
for (unsigned j = 0; j < src->num_components; j++) {
vec->src[dest_idx].src = nir_src_for_ssa(src);
vec->src[dest_idx].swizzle[0] = j;
}
}
+ /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
+ *
+ * "When constructing a vector, the total number of components in all
+ * the operands must equal the number of components in Result Type."
+ */
+ assert(dest_idx == num_components);
+
nir_builder_instr_insert(&b->nb, &vec->instr);
return &vec->dest.dest.ssa;
}
}
+#define spv_check_supported(name, cap) do { \
+ if (!(b->ext && b->ext->name)) \
+ vtn_warn("Unsupported SPIR-V capability: %s", \
+ spirv_capability_to_string(cap)); \
+ } while(0)
+
static bool
vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvCapabilityDerivativeControl:
case SpvCapabilityInterpolationFunction:
case SpvCapabilityMultiViewport:
- break;
-
+ case SpvCapabilitySampleRateShading:
case SpvCapabilityClipDistance:
case SpvCapabilityCullDistance:
+ case SpvCapabilityInputAttachment:
+ case SpvCapabilityImageGatherExtended:
+ case SpvCapabilityStorageImageExtendedFormats:
+ break;
+
case SpvCapabilityGeometryStreams:
- case SpvCapabilityTessellation:
- case SpvCapabilityTessellationPointSize:
case SpvCapabilityLinkage:
case SpvCapabilityVector16:
case SpvCapabilityFloat16Buffer:
case SpvCapabilityFloat16:
- case SpvCapabilityFloat64:
- case SpvCapabilityInt64:
case SpvCapabilityInt64Atomics:
case SpvCapabilityAtomicStorage:
case SpvCapabilityInt16:
- case SpvCapabilityImageGatherExtended:
case SpvCapabilityStorageImageMultisample:
case SpvCapabilityImageCubeArray:
- case SpvCapabilitySampleRateShading:
case SpvCapabilityInt8:
- case SpvCapabilityInputAttachment:
case SpvCapabilitySparseResidency:
case SpvCapabilityMinLod:
- case SpvCapabilityImageMSArray:
- case SpvCapabilityStorageImageExtendedFormats:
case SpvCapabilityTransformFeedback:
- case SpvCapabilityStorageImageReadWithoutFormat:
- case SpvCapabilityStorageImageWriteWithoutFormat:
vtn_warn("Unsupported SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+ case SpvCapabilityFloat64:
+ spv_check_supported(float64, cap);
+ break;
+ case SpvCapabilityInt64:
+ spv_check_supported(int64, cap);
+ break;
+
case SpvCapabilityAddresses:
case SpvCapabilityKernel:
case SpvCapabilityImageBasic:
vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+
+ case SpvCapabilityImageMSArray:
+ spv_check_supported(image_ms_array, cap);
+ break;
+
+ case SpvCapabilityTessellation:
+ case SpvCapabilityTessellationPointSize:
+ spv_check_supported(tessellation, cap);
+ break;
+
+ case SpvCapabilityDrawParameters:
+ spv_check_supported(draw_parameters, cap);
+ break;
+
+ case SpvCapabilityStorageImageReadWithoutFormat:
+ spv_check_supported(image_read_without_format, cap);
+ break;
+
+ case SpvCapabilityStorageImageWriteWithoutFormat:
+ spv_check_supported(image_write_without_format, cap);
+ break;
+
+ default:
+ unreachable("Unhandled capability");
}
break;
}
case SpvExecutionModeEarlyFragmentTests:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.early_fragment_tests = true;
+ b->shader->info->fs.early_fragment_tests = true;
break;
case SpvExecutionModeInvocations:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
+ b->shader->info->gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeDepthReplacing:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
break;
case SpvExecutionModeDepthGreater:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
break;
case SpvExecutionModeDepthLess:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
break;
case SpvExecutionModeDepthUnchanged:
assert(b->shader->stage == MESA_SHADER_FRAGMENT);
- b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
break;
case SpvExecutionModeLocalSize:
assert(b->shader->stage == MESA_SHADER_COMPUTE);
- b->shader->info.cs.local_size[0] = mode->literals[0];
- b->shader->info.cs.local_size[1] = mode->literals[1];
- b->shader->info.cs.local_size[2] = mode->literals[2];
+ b->shader->info->cs.local_size[0] = mode->literals[0];
+ b->shader->info->cs.local_size[1] = mode->literals[1];
+ b->shader->info->cs.local_size[2] = mode->literals[2];
break;
case SpvExecutionModeLocalSizeHint:
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.vertices_out = mode->literals[0];
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info->tess.tcs_vertices_out = mode->literals[0];
+ } else {
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info->gs.vertices_out = mode->literals[0];
+ }
break;
case SpvExecutionModeInputPoints:
case SpvExecutionModeInputTrianglesAdjacency:
case SpvExecutionModeQuads:
case SpvExecutionModeIsolines:
- if (b->shader->stage == MESA_SHADER_GEOMETRY) {
- b->shader->info.gs.vertices_in =
- vertices_in_from_spv_execution_mode(mode->exec_mode);
+ if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ b->shader->info->tess.primitive_mode =
+ gl_primitive_from_spv_execution_mode(mode->exec_mode);
} else {
- assert(!"Tesselation shaders not yet supported");
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info->gs.vertices_in =
+ vertices_in_from_spv_execution_mode(mode->exec_mode);
}
break;
case SpvExecutionModeOutputLineStrip:
case SpvExecutionModeOutputTriangleStrip:
assert(b->shader->stage == MESA_SHADER_GEOMETRY);
- b->shader->info.gs.output_primitive =
+ b->shader->info->gs.output_primitive =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
break;
case SpvExecutionModeSpacingEqual:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info->tess.spacing = TESS_SPACING_EQUAL;
+ break;
case SpvExecutionModeSpacingFractionalEven:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
+ break;
case SpvExecutionModeSpacingFractionalOdd:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
+ break;
case SpvExecutionModeVertexOrderCw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Vulkan's notion of CCW seems to match the hardware backends,
+ * but be the opposite of OpenGL. Currently NIR follows GL semantics,
+ * so we set it backwards here.
+ */
+ b->shader->info->tess.ccw = true;
+ break;
case SpvExecutionModeVertexOrderCcw:
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ /* Backwards; see above */
+ b->shader->info->tess.ccw = false;
+ break;
case SpvExecutionModePointMode:
- assert(!"TODO: Add tessellation metadata");
+ assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->stage == MESA_SHADER_TESS_EVAL);
+ b->shader->info->tess.point_mode = true;
break;
case SpvExecutionModePixelCenterInteger:
case SpvExecutionModeVecTypeHint:
case SpvExecutionModeContractionOff:
break; /* OpenCL */
+
+ default:
+ unreachable("Unhandled execution mode");
}
}
vtn_handle_constant(b, opcode, w, count);
break;
+ case SpvOpUndef:
case SpvOpVariable:
vtn_handle_variables(b, opcode, w, count);
break;
spirv_to_nir(const uint32_t *words, size_t word_count,
struct nir_spirv_specialization *spec, unsigned num_spec,
gl_shader_stage stage, const char *entry_point_name,
+ const struct nir_spirv_supported_extensions *ext,
const nir_shader_compiler_options *options)
{
const uint32_t *word_end = words + word_count;
exec_list_make_empty(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
+ b->ext = ext;
/* Handle all the preamble instructions */
words = vtn_foreach_instruction(b, words, word_end,
return NULL;
}
- b->shader = nir_shader_create(NULL, stage, options);
+ b->shader = nir_shader_create(NULL, stage, options, NULL);
/* Set shader info defaults */
- b->shader->info.gs.invocations = 1;
+ b->shader->info->gs.invocations = 1;
/* Parse execution modes */
vtn_foreach_execution_mode(b, b->entry_point,