if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
- nir_ssa_undef_instr *undef =
- nir_ssa_undef_instr_create(b->shader, num_components);
-
- nir_instr_insert_before_cf_list(&b->impl->body, &undef->instr);
- val->def = &undef->def;
+ unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(val->type));
+ val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
} else {
unsigned elems = glsl_get_length(val->type);
val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, num_components);
+ nir_load_const_instr_create(b->shader, num_components, 32);
for (unsigned i = 0; i < num_components; i++)
- load->value.u[i] = constant->value.u[i];
+ load->value.u32[i] = constant->value.u[i];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
val->def = &load->def;
struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
col_val->type = glsl_get_column_type(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, rows);
+ nir_load_const_instr_create(b->shader, rows, 32);
for (unsigned j = 0; j < rows; j++)
- load->value.u[j] = constant->value.u[rows * i + j];
+ load->value.u32[j] = constant->value.u[rows * i + j];
nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
col_val->def = &load->def;
if (opcode == SpvOpGroupDecorate) {
dec->scope = VTN_DEC_DECORATION;
} else {
- dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
+ dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
}
/* Link into the list */
}
struct member_decoration_ctx {
+ unsigned num_fields;
struct glsl_struct_field *fields;
struct vtn_type *type;
};
if (member < 0)
return;
+ assert(member < ctx->num_fields);
+
switch (dec->decoration) {
case SpvDecorationRelaxedPrecision:
break; /* FIXME: Do nothing with this for now. */
}
struct member_decoration_ctx ctx = {
+ .num_fields = num_fields,
.fields = fields,
.type = val->type
};
else
val->type->access_qualifier = SpvAccessQualifierReadWrite;
- assert(!multisampled && "FIXME: Handl multi-sampled textures");
+ if (multisampled) {
+ assert(dim == GLSL_SAMPLER_DIM_2D);
+ dim = GLSL_SAMPLER_DIM_MS;
+ }
val->type->image_format = translate_image_format(format);
* thrown away the moment you combine it with an image. What really
* matters is that it's a sampler type as opposed to an integer type
* so the backend knows what to do.
- *
- * TODO: Eventually we should consider adding a "bare sampler" type
- * to glsl_types.
*/
- val->type->type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false,
- GLSL_TYPE_FLOAT);
+ val->type->type = glsl_bare_sampler_type();
break;
case SpvOpTypeOpaque:
nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
unsigned num_components = glsl_get_vector_elements(val->const_type);
+ unsigned bit_size =
+ glsl_get_bit_size(glsl_get_base_type(val->const_type));
nir_const_value src[3];
assert(count <= 7);
vtn_value(b, w[4 + i], vtn_value_type_constant)->constant;
unsigned j = swap ? 1 - i : i;
+ assert(bit_size == 32);
for (unsigned k = 0; k < num_components; k++)
- src[j].u[k] = c->value.u[k];
+ src[j].u32[k] = c->value.u[k];
}
- nir_const_value res = nir_eval_const_opcode(op, num_components, src);
+ nir_const_value res = nir_eval_const_opcode(op, num_components,
+ bit_size, src);
for (unsigned k = 0; k < num_components; k++)
- val->constant->value.u[k] = res.u[k];
+ val->constant->value.u[k] = res.u32[k];
return;
} /* default */
sampled.sampler = sampled_val->access_chain;
}
+ const struct glsl_type *image_type;
+ if (sampled.image) {
+ image_type = sampled.image->var->var->interface_type;
+ } else {
+ image_type = sampled.sampler->var->var->interface_type;
+ }
+
nir_tex_src srcs[8]; /* 8 should be enough */
nir_tex_src *p = srcs;
break;
case SpvOpImageFetch:
- texop = nir_texop_txf;
+ if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
+ texop = nir_texop_txf_ms;
+ } else {
+ texop = nir_texop_txf;
+ }
break;
case SpvOpImageGather:
if (operands & SpvImageOperandsLodMask) {
assert(texop == nir_texop_txl || texop == nir_texop_txf ||
- texop == nir_texop_txs);
+ texop == nir_texop_txf_ms || texop == nir_texop_txs);
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
}
assert(!"Constant offsets to texture gather not yet implemented");
if (operands & SpvImageOperandsSampleMask) {
- assert(texop == nir_texop_txf);
+ assert(texop == nir_texop_txf_ms);
texop = nir_texop_txf_ms;
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
}
memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
- const struct glsl_type *image_type;
- if (sampled.image) {
- image_type = sampled.image->var->var->interface_type;
- } else {
- image_type = sampled.sampler->var->var->interface_type;
- }
-
instr->sampler_dim = glsl_get_sampler_dim(image_type);
instr->is_array = glsl_sampler_type_is_array(image_type);
instr->is_shadow = glsl_sampler_type_is_shadow(image_type);
break;
case GLSL_SAMPLER_DIM_2D:
case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_MS:
instr->coord_components = 2;
break;
case GLSL_SAMPLER_DIM_3D:
case GLSL_SAMPLER_DIM_CUBE:
- case GLSL_SAMPLER_DIM_MS:
instr->coord_components = 3;
break;
default:
}
nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
- instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
if (sampled.image) {
nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
} else {
- instr->texture = NULL;
+ instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ }
+
+ switch (instr->op) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ case nir_texop_txl:
+ case nir_texop_txd:
+ /* These operations require a sampler */
+ instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ break;
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ case nir_texop_txs:
+ case nir_texop_lod:
+ case nir_texop_tg4:
+ case nir_texop_query_levels:
+ case nir_texop_texture_samples:
+ case nir_texop_samples_identical:
+ /* These don't */
+ instr->sampler = NULL;
+ break;
}
nir_ssa_dest_init(&instr->instr, &instr->dest,
- nir_tex_instr_dest_size(instr), NULL);
+ nir_tex_instr_dest_size(instr), 32, NULL);
assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
assert(w[5] == SpvImageOperandsSampleMask);
image.sample = vtn_ssa_value(b, w[6])->def;
} else {
- image.sample = nir_ssa_undef(&b->nb, 1);
+ image.sample = nir_ssa_undef(&b->nb, 1, 32);
}
break;
assert(w[4] == SpvImageOperandsSampleMask);
image.sample = vtn_ssa_value(b, w[5])->def;
} else {
- image.sample = nir_ssa_undef(&b->nb, 1);
+ image.sample = nir_ssa_undef(&b->nb, 1, 32);
}
break;
if (opcode != SpvOpImageWrite) {
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
- nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, NULL);
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL);
nir_builder_instr_insert(&b->nb, &intrin->instr);
fill_common_atomic_sources(b, opcode, w, &atomic->src[2]);
}
- nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, NULL);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest, 1, 32, NULL);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
}
static nir_alu_instr *
-create_vec(nir_shader *shader, unsigned num_components)
+create_vec(nir_shader *shader, unsigned num_components, unsigned bit_size)
{
nir_op op;
switch (num_components) {
}
nir_alu_instr *vec = nir_alu_instr_create(shader, op);
- nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
+ bit_size, NULL);
vec->dest.write_mask = (1 << num_components) - 1;
return vec;
for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
nir_alu_instr *vec = create_vec(b->shader,
- glsl_get_matrix_columns(src->type));
+ glsl_get_matrix_columns(src->type),
+ glsl_get_bit_size(glsl_get_base_type(src->type)));
if (glsl_type_is_vector_or_scalar(src->type)) {
vec->src[0].src = nir_src_for_ssa(src->def);
vec->src[0].swizzle[0] = i;
vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
unsigned index)
{
- nir_alu_instr *vec = create_vec(b->shader, src->num_components);
+ nir_alu_instr *vec = create_vec(b->shader, src->num_components,
+ src->bit_size);
for (unsigned i = 0; i < src->num_components; i++) {
if (i == index) {
nir_ssa_def *src0, nir_ssa_def *src1,
const uint32_t *indices)
{
- nir_alu_instr *vec = create_vec(b->shader, num_components);
-
- nir_ssa_undef_instr *undef = nir_ssa_undef_instr_create(b->shader, 1);
- nir_builder_instr_insert(&b->nb, &undef->instr);
+ nir_alu_instr *vec = create_vec(b->shader, num_components, src0->bit_size);
for (unsigned i = 0; i < num_components; i++) {
uint32_t index = indices[i];
if (index == 0xffffffff) {
- vec->src[i].src = nir_src_for_ssa(&undef->def);
+ vec->src[i].src =
+ nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
} else if (index < src0->num_components) {
vec->src[i].src = nir_src_for_ssa(src0);
vec->src[i].swizzle[0] = index;
vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
unsigned num_srcs, nir_ssa_def **srcs)
{
- nir_alu_instr *vec = create_vec(b->shader, num_components);
+ nir_alu_instr *vec = create_vec(b->shader, num_components,
+ srcs[0]->bit_size);
unsigned dest_idx = 0;
for (unsigned i = 0; i < num_srcs; i++) {
nir_intrinsic_instr_create(b->shader, intrinsic_op);
if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
- intrin->const_index[0] = w[1];
+ nir_intrinsic_set_stream_id(intrin, w[1]);
nir_builder_instr_insert(&b->nb, &intrin->instr);
}
/* Unhandled, but these are for debug so that's ok. */
break;
- case SpvOpCapability:
- switch ((SpvCapability)w[1]) {
+ case SpvOpCapability: {
+ SpvCapability cap = w[1];
+ switch (cap) {
case SpvCapabilityMatrix:
case SpvCapabilityShader:
case SpvCapabilityGeometry:
+ case SpvCapabilityTessellationPointSize:
+ case SpvCapabilityGeometryPointSize:
+ case SpvCapabilityUniformBufferArrayDynamicIndexing:
+ case SpvCapabilitySampledImageArrayDynamicIndexing:
+ case SpvCapabilityStorageBufferArrayDynamicIndexing:
+ case SpvCapabilityStorageImageArrayDynamicIndexing:
+ case SpvCapabilityImageRect:
+ case SpvCapabilitySampledRect:
+ case SpvCapabilitySampled1D:
+ case SpvCapabilityImage1D:
+ case SpvCapabilitySampledCubeArray:
+ case SpvCapabilitySampledBuffer:
+ case SpvCapabilityImageBuffer:
+ case SpvCapabilityImageQuery:
+ break;
+ case SpvCapabilityClipDistance:
+ case SpvCapabilityCullDistance:
+ case SpvCapabilityGeometryStreams:
+ fprintf(stderr, "WARNING: Unsupported SPIR-V Capability\n");
break;
default:
assert(!"Unsupported capability");
}
break;
+ }
case SpvOpExtInstImport:
vtn_handle_extension(b, opcode, w, count);
b->shader = nir_shader_create(NULL, stage, options);
+ /* Set shader info defaults */
+ b->shader->info.gs.invocations = 1;
+
/* Parse execution modes */
vtn_foreach_execution_mode(b, b->entry_point,
vtn_handle_execution_mode, NULL);