struct vtn_value *val = vtn_untyped_value(b, value_id);
switch (val->value_type) {
case vtn_value_type_constant:
- return vtn_const_ssa_value(b, val->constant, val->type);
+ return vtn_const_ssa_value(b, val->constant, val->const_type);
case vtn_value_type_ssa:
return val->ssa;
struct vtn_value *value,
vtn_decoration_foreach_cb cb, void *data)
{
+ int new_member = member;
+
for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
if (dec->member >= 0) {
assert(member == -1);
- member = dec->member;
+ new_member = dec->member;
}
if (dec->group) {
assert(dec->group->value_type == vtn_value_type_decoration_group);
- _foreach_decoration_helper(b, base_value, member, dec->group, cb, data);
+ _foreach_decoration_helper(b, base_value, new_member, dec->group,
+ cb, data);
} else {
- cb(b, base_value, member, dec, data);
+ cb(b, base_value, new_member, dec, data);
}
}
}
}
}
+struct member_decoration_ctx {
+ struct glsl_struct_field *fields;
+ struct vtn_type *type;
+};
+
+/* does a shallow copy of a vtn_type */
+
+static struct vtn_type *
+vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
+{
+ struct vtn_type *dest = ralloc(b, struct vtn_type);
+ dest->type = src->type;
+ dest->is_builtin = src->is_builtin;
+ if (src->is_builtin)
+ dest->builtin = src->builtin;
+
+ if (!glsl_type_is_vector_or_scalar(src->type)) {
+ switch (glsl_get_base_type(src->type)) {
+ case GLSL_TYPE_ARRAY:
+ dest->array_element = src->array_element;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ /* matrices */
+ dest->row_major = src->row_major;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->members = ralloc_array(b, struct vtn_type *, elems);
+ memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+
+ dest->offsets = ralloc_array(b, unsigned, elems);
+ memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
+ break;
+ }
+
+ default:
+ unreachable("unhandled type");
+ }
+ }
+
+ return dest;
+}
+
static void
struct_member_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
- const struct vtn_decoration *dec, void *void_fields)
+ const struct vtn_decoration *dec, void *void_ctx)
{
- struct glsl_struct_field *fields = void_fields;
+ struct member_decoration_ctx *ctx = void_ctx;
if (member < 0)
return;
case SpvDecorationPrecisionHigh:
break; /* FIXME: Do nothing with these for now. */
case SpvDecorationSmooth:
- fields[member].interpolation = INTERP_QUALIFIER_SMOOTH;
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_SMOOTH;
break;
case SpvDecorationNoperspective:
- fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
break;
case SpvDecorationFlat:
- fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
break;
case SpvDecorationCentroid:
- fields[member].centroid = true;
+ ctx->fields[member].centroid = true;
break;
case SpvDecorationSample:
- fields[member].sample = true;
+ ctx->fields[member].sample = true;
break;
case SpvDecorationLocation:
- fields[member].location = dec->literals[0];
+ ctx->fields[member].location = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn:
+ ctx->type->members[member] = vtn_type_copy(b,
+ ctx->type->members[member]);
+ ctx->type->members[member]->is_builtin = true;
+ ctx->type->members[member]->builtin = dec->literals[0];
break;
default:
unreachable("Unhandled member decoration");
{
struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+ val->type = ralloc(b, struct vtn_type);
+ val->type->is_builtin = false;
+
switch (opcode) {
case SpvOpTypeVoid:
- val->type = glsl_void_type();
+ val->type->type = glsl_void_type();
return;
case SpvOpTypeBool:
- val->type = glsl_bool_type();
+ val->type->type = glsl_bool_type();
return;
case SpvOpTypeInt:
- val->type = glsl_int_type();
+ val->type->type = glsl_int_type();
return;
case SpvOpTypeFloat:
- val->type = glsl_float_type();
+ val->type->type = glsl_float_type();
return;
case SpvOpTypeVector: {
const struct glsl_type *base =
- vtn_value(b, w[2], vtn_value_type_type)->type;
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
unsigned elems = w[3];
assert(glsl_type_is_scalar(base));
- val->type = glsl_vector_type(glsl_get_base_type(base), elems);
+ val->type->type = glsl_vector_type(glsl_get_base_type(base), elems);
return;
}
case SpvOpTypeMatrix: {
- const struct glsl_type *base =
+ struct vtn_type *base =
vtn_value(b, w[2], vtn_value_type_type)->type;
unsigned columns = w[3];
- assert(glsl_type_is_vector(base));
- val->type = glsl_matrix_type(glsl_get_base_type(base),
- glsl_get_vector_elements(base),
- columns);
+ assert(glsl_type_is_vector(base->type));
+ val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
+ glsl_get_vector_elements(base->type),
+ columns);
+ val->type->array_element = base;
+ val->type->row_major = false;
+ val->type->stride = 0;
return;
}
- case SpvOpTypeArray:
- val->type = glsl_array_type(b->values[w[2]].type, w[3]);
+ case SpvOpTypeArray: {
+ struct vtn_type *array_element =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+ val->type->type = glsl_array_type(array_element->type, w[3]);
+ val->type->array_element = array_element;
+ val->type->stride = 0;
return;
+ }
case SpvOpTypeStruct: {
+ unsigned num_fields = count - 2;
+ val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
+
NIR_VLA(struct glsl_struct_field, fields, count);
- for (unsigned i = 0; i < count - 2; i++) {
+ for (unsigned i = 0; i < num_fields; i++) {
/* TODO: Handle decorators */
- fields[i].type = vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+ val->type->members[i] =
+ vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+ fields[i].type = val->type->members[i]->type;
fields[i].name = ralloc_asprintf(b, "field%d", i);
fields[i].location = -1;
fields[i].interpolation = 0;
fields[i].stream = -1;
}
- vtn_foreach_decoration(b, val, struct_member_decoration_cb, fields);
+ struct member_decoration_ctx ctx = {
+ .fields = fields,
+ .type = val->type
+ };
+
+ vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
const char *name = val->name ? val->name : "struct";
- val->type = glsl_struct_type(fields, count, name);
+ val->type->type = glsl_struct_type(fields, num_fields, name);
return;
}
case SpvOpTypeFunction: {
- const struct glsl_type *return_type = b->values[w[2]].type;
+ const struct glsl_type *return_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
NIR_VLA(struct glsl_function_param, params, count - 3);
for (unsigned i = 0; i < count - 3; i++) {
- params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type;
+ params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
/* FIXME: */
params[i].in = true;
params[i].out = true;
}
- val->type = glsl_function_type(return_type, params, count - 3);
+ val->type->type = glsl_function_type(return_type, params, count - 3);
return;
}
case SpvOpTypeSampler: {
const struct glsl_type *sampled_type =
- vtn_value(b, w[2], vtn_value_type_type)->type;
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
assert(glsl_type_is_vector_or_scalar(sampled_type));
assert(w[7] == 0 && "FIXME: Handl multi-sampled textures");
- val->type = glsl_sampler_type(dim, is_shadow, is_array,
- glsl_get_base_type(sampled_type));
+ val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+ glsl_get_base_type(sampled_type));
return;
}
const uint32_t *w, unsigned count)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
val->constant = ralloc(b, nir_constant);
switch (opcode) {
case SpvOpConstantTrue:
- assert(val->type == glsl_bool_type());
+ assert(val->const_type == glsl_bool_type());
val->constant->value.u[0] = NIR_TRUE;
break;
case SpvOpConstantFalse:
- assert(val->type == glsl_bool_type());
+ assert(val->const_type == glsl_bool_type());
val->constant->value.u[0] = NIR_FALSE;
break;
case SpvOpConstant:
- assert(glsl_type_is_scalar(val->type));
+ assert(glsl_type_is_scalar(val->const_type));
val->constant->value.u[0] = w[3];
break;
case SpvOpConstantComposite: {
for (unsigned i = 0; i < elem_count; i++)
elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
- switch (glsl_get_base_type(val->type)) {
+ switch (glsl_get_base_type(val->const_type)) {
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
- if (glsl_type_is_matrix(val->type)) {
- unsigned rows = glsl_get_vector_elements(val->type);
- assert(glsl_get_matrix_columns(val->type) == elem_count);
+ if (glsl_type_is_matrix(val->const_type)) {
+ unsigned rows = glsl_get_vector_elements(val->const_type);
+ assert(glsl_get_matrix_columns(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
for (unsigned j = 0; j < rows; j++)
val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
} else {
- assert(glsl_type_is_vector(val->type));
- assert(glsl_get_vector_elements(val->type) == elem_count);
+ assert(glsl_type_is_vector(val->const_type));
+ assert(glsl_get_vector_elements(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
val->constant->value.u[i] = elems[i]->value.u[0];
}
}
}
+static void
+vtn_get_builtin_location(SpvBuiltIn builtin, int *location,
+ nir_variable_mode *mode)
+{
+ switch (builtin) {
+ case SpvBuiltInPosition:
+ *location = VARYING_SLOT_POS;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInPointSize:
+ *location = VARYING_SLOT_PSIZ;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInClipVertex:
+ *location = VARYING_SLOT_CLIP_VERTEX;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInClipDistance:
+ *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInCullDistance:
+ /* XXX figure this out */
+ unreachable("unhandled builtin");
+ case SpvBuiltInVertexId:
+ *location = SYSTEM_VALUE_VERTEX_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInInstanceId:
+ *location = SYSTEM_VALUE_INSTANCE_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInPrimitiveId:
+ *location = VARYING_SLOT_PRIMITIVE_ID;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInInvocationId:
+ *location = SYSTEM_VALUE_INVOCATION_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInLayer:
+ *location = VARYING_SLOT_LAYER;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInTessLevelOuter:
+ case SpvBuiltInTessLevelInner:
+ case SpvBuiltInTessCoord:
+ case SpvBuiltInPatchVertices:
+ unreachable("no tessellation support");
+ case SpvBuiltInFragCoord:
+ *location = VARYING_SLOT_POS;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInPointCoord:
+ *location = VARYING_SLOT_PNTC;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInFrontFacing:
+ *location = VARYING_SLOT_FACE;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInSampleId:
+ *location = SYSTEM_VALUE_SAMPLE_ID;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInSamplePosition:
+ *location = SYSTEM_VALUE_SAMPLE_POS;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInSampleMask:
+ *location = SYSTEM_VALUE_SAMPLE_MASK_IN; /* XXX out? */
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInFragColor:
+ *location = FRAG_RESULT_COLOR;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInFragDepth:
+ *location = FRAG_RESULT_DEPTH;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInHelperInvocation:
+ unreachable("unsupported builtin"); /* XXX */
+ break;
+ case SpvBuiltInNumWorkgroups:
+ case SpvBuiltInWorkgroupSize:
+ /* these are constants, need to be handled specially */
+ unreachable("unsupported builtin");
+ case SpvBuiltInWorkgroupId:
+ case SpvBuiltInLocalInvocationId:
+ case SpvBuiltInGlobalInvocationId:
+ case SpvBuiltInLocalInvocationIndex:
+ unreachable("no compute shader support");
+ default:
+ unreachable("unsupported builtin");
+ }
+}
+
static void
var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
const struct vtn_decoration *dec, void *void_var)
case SpvDecorationDescriptorSet:
var->data.descriptor_set = dec->literals[0];
break;
- case SpvDecorationBuiltIn:
- var->data.mode = nir_var_system_value;
- var->data.read_only = true;
- switch ((SpvBuiltIn)dec->literals[0]) {
- case SpvBuiltInFrontFacing:
- var->data.location = SYSTEM_VALUE_FRONT_FACE;
- break;
- case SpvBuiltInVertexId:
- var->data.location = SYSTEM_VALUE_VERTEX_ID;
- break;
- case SpvBuiltInInstanceId:
- var->data.location = SYSTEM_VALUE_INSTANCE_ID;
- break;
- case SpvBuiltInSampleId:
- var->data.location = SYSTEM_VALUE_SAMPLE_ID;
- break;
- case SpvBuiltInSamplePosition:
- var->data.location = SYSTEM_VALUE_SAMPLE_POS;
- break;
- case SpvBuiltInSampleMask:
- var->data.location = SYSTEM_VALUE_SAMPLE_MASK_IN;
- break;
- case SpvBuiltInInvocationId:
- var->data.location = SYSTEM_VALUE_INVOCATION_ID;
- break;
- case SpvBuiltInPrimitiveId:
- case SpvBuiltInPosition:
- case SpvBuiltInPointSize:
- case SpvBuiltInClipVertex:
- case SpvBuiltInClipDistance:
- case SpvBuiltInCullDistance:
- case SpvBuiltInLayer:
- case SpvBuiltInViewportIndex:
- case SpvBuiltInTessLevelOuter:
- case SpvBuiltInTessLevelInner:
- case SpvBuiltInTessCoord:
- case SpvBuiltInPatchVertices:
- case SpvBuiltInFragCoord:
- case SpvBuiltInPointCoord:
- case SpvBuiltInFragColor:
- case SpvBuiltInFragDepth:
- case SpvBuiltInHelperInvocation:
- case SpvBuiltInNumWorkgroups:
- case SpvBuiltInWorkgroupSize:
- case SpvBuiltInWorkgroupId:
- case SpvBuiltInLocalInvocationId:
- case SpvBuiltInGlobalInvocationId:
- case SpvBuiltInLocalInvocationIndex:
- case SpvBuiltInWorkDim:
- case SpvBuiltInGlobalSize:
- case SpvBuiltInEnqueuedWorkgroupSize:
- case SpvBuiltInGlobalOffset:
- case SpvBuiltInGlobalLinearId:
- case SpvBuiltInWorkgroupLinearId:
- case SpvBuiltInSubgroupSize:
- case SpvBuiltInSubgroupMaxSize:
- case SpvBuiltInNumSubgroups:
- case SpvBuiltInNumEnqueuedSubgroups:
- case SpvBuiltInSubgroupId:
- case SpvBuiltInSubgroupLocalInvocationId:
- unreachable("Unhandled builtin enum");
- }
+ case SpvDecorationBuiltIn: {
+ nir_variable_mode mode;
+ vtn_get_builtin_location(dec->literals[0], &var->data.location,
+ &mode);
+ var->data.mode = mode;
+ if (mode == nir_var_shader_in || mode == nir_var_system_value)
+ var->data.read_only = true;
+ b->builtins[dec->literals[0]] = var;
break;
+ }
case SpvDecorationNoStaticUse:
/* This can safely be ignored */
break;
}
}
+static nir_variable *
+get_builtin_variable(struct vtn_builder *b,
+ const struct glsl_type *type,
+ SpvBuiltIn builtin)
+{
+ nir_variable *var = b->builtins[builtin];
+
+ if (!var) {
+ var = ralloc(b->shader, nir_variable);
+ var->type = type;
+
+ nir_variable_mode mode;
+ vtn_get_builtin_location(builtin, &var->data.location, &mode);
+ var->data.mode = mode;
+ var->name = ralloc_strdup(b->shader, "builtin");
+
+ switch (mode) {
+ case nir_var_shader_in:
+ exec_list_push_tail(&b->shader->inputs, &var->node);
+ break;
+ case nir_var_shader_out:
+ exec_list_push_tail(&b->shader->outputs, &var->node);
+ break;
+ case nir_var_system_value:
+ exec_list_push_tail(&b->shader->system_values, &var->node);
+ break;
+ default:
+ unreachable("bad builtin mode");
+ }
+
+ b->builtins[builtin] = var;
+ }
+
+ return var;
+}
+
+static void
+vtn_builtin_load(struct vtn_builder *b,
+ struct vtn_ssa_value *val,
+ SpvBuiltIn builtin)
+{
+ assert(glsl_type_is_vector_or_scalar(val->type));
+
+ nir_variable *var = get_builtin_variable(b, val->type, builtin);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ glsl_get_vector_elements(val->type), NULL);
+
+ load->variables[0] = nir_deref_var_create(load, var);
+ load->num_components = glsl_get_vector_elements(val->type);
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ val->def = &load->dest.ssa;
+}
+
+static void
+vtn_builtin_store(struct vtn_builder *b,
+ struct vtn_ssa_value *val,
+ SpvBuiltIn builtin)
+{
+ assert(glsl_type_is_vector_or_scalar(val->type));
+
+ nir_variable *var = get_builtin_variable(b, val->type, builtin);
+
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
+
+ store->variables[0] = nir_deref_var_create(store, var);
+ store->num_components = glsl_get_vector_elements(val->type);
+ store->src[0] = nir_src_for_ssa(val->def);
+ nir_builder_instr_insert(&b->nb, &store->instr);
+}
+
static struct vtn_ssa_value *
_vtn_variable_load(struct vtn_builder *b,
- nir_deref_var *src_deref, nir_deref *src_deref_tail)
+ nir_deref_var *src_deref, struct vtn_type *src_type,
+ nir_deref *src_deref_tail)
{
struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
val->type = src_deref_tail->type;
+ if (src_type->is_builtin) {
+ vtn_builtin_load(b, val, src_type->builtin);
+ return val;
+ }
+
/* The deref tail may contain a deref to select a component of a vector (in
* other words, it might not be an actual tail) so we have to save it away
* here since we overwrite it later.
src_deref_tail->child = &deref->deref;
for (unsigned i = 0; i < elems; i++) {
deref->base_offset = i;
- val->elems[i] = _vtn_variable_load(b, src_deref, &deref->deref);
+ val->elems[i] = _vtn_variable_load(b, src_deref,
+ src_type->array_element,
+ &deref->deref);
}
} else {
assert(glsl_get_base_type(val->type) == GLSL_TYPE_STRUCT);
for (unsigned i = 0; i < elems; i++) {
deref->index = i;
deref->deref.type = glsl_get_struct_field(val->type, i);
- val->elems[i] = _vtn_variable_load(b, src_deref, &deref->deref);
+ val->elems[i] = _vtn_variable_load(b, src_deref,
+ src_type->members[i],
+ &deref->deref);
}
}
}
static void
-_vtn_variable_store(struct vtn_builder *b, nir_deref_var *dest_deref,
- nir_deref *dest_deref_tail, struct vtn_ssa_value *src)
+_vtn_variable_store(struct vtn_builder *b, struct vtn_type *dest_type,
+ nir_deref_var *dest_deref, nir_deref *dest_deref_tail,
+ struct vtn_ssa_value *src)
{
+ if (dest_type->is_builtin) {
+ vtn_builtin_store(b, src, dest_type->builtin);
+ return;
+ }
+
nir_deref *old_child = dest_deref_tail->child;
if (glsl_type_is_vector_or_scalar(src->type)) {
nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
store->variables[0] =
nir_deref_as_var(nir_copy_deref(store, &dest_deref->deref));
+ store->num_components = glsl_get_vector_elements(src->type);
store->src[0] = nir_src_for_ssa(src->def);
nir_builder_instr_insert(&b->nb, &store->instr);
dest_deref_tail->child = &deref->deref;
for (unsigned i = 0; i < elems; i++) {
deref->base_offset = i;
- _vtn_variable_store(b, dest_deref, &deref->deref, src->elems[i]);
+ _vtn_variable_store(b, dest_type->array_element, dest_deref,
+ &deref->deref, src->elems[i]);
}
} else {
assert(glsl_get_base_type(src->type) == GLSL_TYPE_STRUCT);
for (unsigned i = 0; i < elems; i++) {
deref->index = i;
deref->deref.type = glsl_get_struct_field(src->type, i);
- _vtn_variable_store(b, dest_deref, &deref->deref, src->elems[i]);
+ _vtn_variable_store(b, dest_type->members[i], dest_deref,
+ &deref->deref, src->elems[i]);
}
}
nir_ssa_def *index);
static struct vtn_ssa_value *
-vtn_variable_load(struct vtn_builder *b, nir_deref_var *src)
+vtn_variable_load(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type *src_type)
{
nir_deref *src_tail = get_deref_tail(src);
- struct vtn_ssa_value *val = _vtn_variable_load(b, src, src_tail);
+ struct vtn_ssa_value *val = _vtn_variable_load(b, src, src_type, src_tail);
if (src_tail->child) {
nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
nir_ssa_def *index);
static void
vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
- nir_deref_var *dest)
+ nir_deref_var *dest, struct vtn_type *dest_type)
{
nir_deref *dest_tail = get_deref_tail(dest);
if (dest_tail->child) {
- struct vtn_ssa_value *val = _vtn_variable_load(b, dest, dest_tail);
+ struct vtn_ssa_value *val = _vtn_variable_load(b, dest, dest_type,
+ dest_tail);
nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
assert(deref->deref.child == NULL);
if (deref->deref_array_type == nir_deref_array_type_direct)
else
val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
deref->indirect.ssa);
- _vtn_variable_store(b, dest, dest_tail, val);
+ _vtn_variable_store(b, dest_type, dest, dest_tail, val);
} else {
- _vtn_variable_store(b, dest, dest_tail, src);
+ _vtn_variable_store(b, dest_type, dest, dest_tail, src);
}
}
static void
vtn_variable_copy(struct vtn_builder *b, nir_deref_var *src,
- nir_deref_var *dest)
+ nir_deref_var *dest, struct vtn_type *type)
{
nir_deref *src_tail = get_deref_tail(src);
if (src_tail->child) {
assert(get_deref_tail(dest)->child);
- struct vtn_ssa_value *val = vtn_variable_load(b, src);
- vtn_variable_store(b, val, dest);
+ struct vtn_ssa_value *val = vtn_variable_load(b, src, type);
+ vtn_variable_store(b, val, dest, type);
} else {
nir_intrinsic_instr *copy =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var);
{
switch (opcode) {
case SpvOpVariable: {
- const struct glsl_type *type =
+ struct vtn_type *type =
vtn_value(b, w[1], vtn_value_type_type)->type;
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
nir_variable *var = ralloc(b->shader, nir_variable);
- var->type = type;
+ var->type = type->type;
var->name = ralloc_strdup(var, val->name);
switch ((SpvStorageClass)w[3]) {
case SpvStorageClassUniformConstant:
var->data.mode = nir_var_uniform;
var->data.read_only = true;
- var->interface_type = type;
+ var->interface_type = type->type;
break;
case SpvStorageClassInput:
var->data.mode = nir_var_shader_in;
}
val->deref = nir_deref_var_create(b, var);
+ val->deref_type = type;
vtn_foreach_decoration(b, val, var_decoration_cb, var);
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
nir_deref_var *base = vtn_value(b, w[3], vtn_value_type_deref)->deref;
val->deref = nir_deref_as_var(nir_copy_deref(b, &base->deref));
+ val->deref_type = vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
nir_deref *tail = &val->deref->deref;
while (tail->child)
case GLSL_TYPE_BOOL:
case GLSL_TYPE_ARRAY: {
nir_deref_array *deref_arr = nir_deref_array_create(b);
- if (base_type == GLSL_TYPE_ARRAY) {
- deref_arr->deref.type = glsl_get_array_element(tail->type);
- } else if (glsl_type_is_matrix(tail->type)) {
- deref_arr->deref.type = glsl_get_column_type(tail->type);
+ if (base_type == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(tail->type)) {
+ val->deref_type = val->deref_type->array_element;
} else {
assert(glsl_type_is_vector(tail->type));
- deref_arr->deref.type = glsl_scalar_type(base_type);
+ val->deref_type = ralloc(b, struct vtn_type);
+ val->deref_type->type = glsl_scalar_type(base_type);
}
+ deref_arr->deref.type = val->deref_type->type;
+
if (idx_val->value_type == vtn_value_type_constant) {
unsigned idx = idx_val->constant->value.u[0];
deref_arr->deref_array_type = nir_deref_array_type_direct;
case GLSL_TYPE_STRUCT: {
assert(idx_val->value_type == vtn_value_type_constant);
unsigned idx = idx_val->constant->value.u[0];
+ val->deref_type = val->deref_type->members[idx];
nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
- deref_struct->deref.type = glsl_get_struct_field(tail->type, idx);
+ deref_struct->deref.type = val->deref_type->type;
tail->child = &deref_struct->deref;
break;
}
case SpvOpCopyMemory: {
nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
nir_deref_var *src = vtn_value(b, w[2], vtn_value_type_deref)->deref;
+ struct vtn_type *type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
- vtn_variable_copy(b, src, dest);
+ vtn_variable_copy(b, src, dest, type);
break;
}
case SpvOpLoad: {
nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref;
- const struct glsl_type *src_type = nir_deref_tail(&src->deref)->type;
+ struct vtn_type *src_type =
+ vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
- if (glsl_get_base_type(src_type) == GLSL_TYPE_SAMPLER) {
+ if (glsl_get_base_type(src_type->type) == GLSL_TYPE_SAMPLER) {
vtn_push_value(b, w[2], vtn_value_type_deref)->deref = src;
return;
}
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_variable_load(b, src);
+ val->ssa = vtn_variable_load(b, src, src_type);
break;
}
case SpvOpStore: {
nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ struct vtn_type *dest_type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
- vtn_variable_store(b, src, dest);
+ vtn_variable_store(b, src, dest, dest_type);
break;
}
unreachable("Unhandled opcode");
}
+static struct vtn_ssa_value *
+vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ if (!glsl_type_is_vector_or_scalar(type)) {
+ unsigned elems = glsl_get_length(type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *child_type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ child_type = glsl_get_column_type(type);
+ break;
+ case GLSL_TYPE_ARRAY:
+ child_type = glsl_get_array_element(type);
+ break;
+ case GLSL_TYPE_STRUCT:
+ child_type = glsl_get_struct_field(type, i);
+ break;
+ default:
+ unreachable("unkown base type");
+ }
+
+ val->elems[i] = vtn_create_ssa_value(b, child_type);
+ }
+ }
+
+ return val;
+}
+
static nir_tex_src
vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
{
instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
+ val->ssa = vtn_create_ssa_value(b, glsl_vector_type(GLSL_TYPE_FLOAT, 4));
val->ssa->def = &instr->dest.ssa;
- val->ssa->type = val->type;
nir_builder_instr_insert(&b->nb, &instr->instr);
}
-static struct vtn_ssa_value *
-vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
-{
- struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
- val->type = type;
-
- if (!glsl_type_is_vector_or_scalar(type)) {
- unsigned elems = glsl_get_length(type);
- val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
- for (unsigned i = 0; i < elems; i++) {
- const struct glsl_type *child_type;
-
- switch (glsl_get_base_type(type)) {
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
- child_type = glsl_get_column_type(type);
- break;
- case GLSL_TYPE_ARRAY:
- child_type = glsl_get_array_element(type);
- break;
- case GLSL_TYPE_STRUCT:
- child_type = glsl_get_struct_field(type, i);
- break;
- default:
- unreachable("unkown base type");
- }
-
- val->elems[i] = vtn_create_ssa_value(b, child_type);
- }
- }
-
- return val;
-}
static nir_alu_instr *
create_vec(void *mem_ctx, unsigned num_components)
nir_alu_instr *vec = nir_alu_instr_create(mem_ctx, op);
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
+ vec->dest.write_mask = (1 << num_components) - 1;
return vec;
}
const uint32_t *w, unsigned count)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
switch (opcode) {
case SpvOpTranspose: {
const uint32_t *w, unsigned count)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
- val->ssa = vtn_create_ssa_value(b, val->type);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
/* Collect the various SSA sources */
unsigned num_inputs = count - 3;
nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
- glsl_get_vector_elements(val->type), val->name);
+ glsl_get_vector_elements(type), val->name);
val->ssa->def = &instr->dest.dest.ssa;
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
const uint32_t *w, unsigned count)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
switch (opcode) {
case SpvOpVectorExtractDynamic:
break;
case SpvOpVectorShuffle:
- val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(val->type),
+ val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
vtn_ssa_value(b, w[3])->def,
vtn_ssa_value(b, w[4])->def,
w + 5);
break;
case SpvOpCompositeConstruct: {
- val->ssa = rzalloc(b, struct vtn_ssa_value);
unsigned elems = count - 3;
- if (glsl_type_is_vector_or_scalar(val->type)) {
+ if (glsl_type_is_vector_or_scalar(type)) {
nir_ssa_def *srcs[4];
for (unsigned i = 0; i < elems; i++)
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
val->ssa->def =
- vtn_vector_construct(b, glsl_get_vector_elements(val->type),
+ vtn_vector_construct(b, glsl_get_vector_elements(type),
elems, srcs);
} else {
val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
default:
unreachable("unknown composite operation");
}
-
- val->ssa->type = val->type;
}
static void
vtn_handle_phi_first_pass(struct vtn_builder *b, const uint32_t *w)
{
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
- val->ssa = vtn_phi_node_create(b, val->type);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_phi_node_create(b, type);
}
static void
b->func = rzalloc(b, struct vtn_function);
const struct glsl_type *result_type =
- vtn_value(b, w[1], vtn_value_type_type)->type;
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
const struct glsl_type *func_type =
- vtn_value(b, w[4], vtn_value_type_type)->type;
+ vtn_value(b, w[4], vtn_value_type_type)->type->type;
assert(glsl_get_function_return_type(func_type) == result_type);