ctx->fields[member].type = ctx->type->members[member]->type;
}
+static void
+struct_block_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *ctx)
+{
+ if (member != -1)
+ return;
+
+ struct vtn_type *type = val->type;
+ if (dec->decoration == SpvDecorationBlock)
+ type->block = true;
+ else if (dec->decoration == SpvDecorationBufferBlock)
+ type->buffer_block = true;
+}
+
static void
type_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
break;
case SpvDecorationBlock:
vtn_assert(type->base_type == vtn_base_type_struct);
- type->block = true;
+ vtn_assert(type->block);
break;
case SpvDecorationBufferBlock:
vtn_assert(type->base_type == vtn_base_type_struct);
- type->buffer_block = true;
+ vtn_assert(type->buffer_block);
break;
case SpvDecorationGLSLShared:
case SpvDecorationGLSLPacked:
vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
- const char *name = val->name ? val->name : "struct";
+ vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
- val->type->type = glsl_struct_type(fields, num_fields, name, false);
+ const char *name = val->name;
+
+ if (val->type->block || val->type->buffer_block) {
+ /* Packing will be ignored since types coming from SPIR-V are
+ * explicitly laid out.
+ */
+ val->type->type = glsl_interface_type(fields, num_fields,
+ /* packing */ 0, false,
+ name ? name : "block");
+ } else {
+ val->type->type = glsl_struct_type(fields, num_fields,
+ name ? name : "struct", false);
+ }
break;
}
* declaration.
*/
val = vtn_untyped_value(b, w[1]);
- struct vtn_type *deref_type = vtn_untyped_value(b, w[3])->type;
SpvStorageClass storage_class = w[2];
break;
case SpvStorageClassWorkgroup:
val->type->type = b->options->shared_ptr_type;
- if (b->physical_ptrs)
- val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
break;
case SpvStorageClassCrossWorkgroup:
val->type->type = b->options->global_ptr_type;
- if (b->physical_ptrs)
- val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
break;
case SpvStorageClassFunction:
- if (b->physical_ptrs) {
+ if (b->physical_ptrs)
val->type->type = b->options->temp_ptr_type;
- val->type->stride = align(glsl_get_cl_size(deref_type->type), glsl_get_cl_alignment(deref_type->type));
- }
break;
default:
/* In this case, no variable pointers are allowed so all deref
vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
+ if (b->physical_ptrs) {
+ switch (storage_class) {
+ case SpvStorageClassFunction:
+ case SpvStorageClassWorkgroup:
+ case SpvStorageClassCrossWorkgroup:
+ val->type->stride = align(glsl_get_cl_size(val->type->deref->type),
+ glsl_get_cl_alignment(val->type->deref->type));
+ break;
+ default:
+ break;
+ }
+ }
+
if (storage_class == SpvStorageClassWorkgroup &&
b->options->lower_workgroup_access_to_offsets) {
uint32_t size, align;
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
nir_alu_type_get_type_size(src_alu_type),
nir_alu_type_get_type_size(dst_alu_type));
- nir_const_value src[4];
+ nir_const_value src[3];
for (unsigned i = 0; i < count - 4; i++) {
struct vtn_value *src_val =
child_type = glsl_get_array_element(type);
break;
case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
child_type = glsl_get_struct_field(type, i);
break;
default:
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
/* Now we need to handle some number of optional arguments */
- const struct vtn_ssa_value *gather_offsets = NULL;
+ struct vtn_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx++];
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
if (operands & SpvImageOperandsConstOffsetsMask) {
- nir_tex_src none = {0};
- gather_offsets = vtn_ssa_value(b, w[idx++]);
- (*p++) = none;
+ vtn_assert(texop == nir_texop_tg4);
+ gather_offsets = vtn_value(b, w[idx++], vtn_value_type_constant);
}
if (operands & SpvImageOperandsSampleMask) {
is_shadow && glsl_get_components(ret_type->type) == 1;
instr->component = gather_component;
+ if (sampled.image && (sampled.image->access & ACCESS_NON_UNIFORM))
+ instr->texture_non_uniform = true;
+
+ if (sampled.sampler && (sampled.sampler->access & ACCESS_NON_UNIFORM))
+ instr->sampler_non_uniform = true;
+
switch (glsl_get_sampler_result_type(image_type)) {
case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
vtn_assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
- nir_ssa_def *def;
- nir_instr *instruction;
if (gather_offsets) {
- vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
- vtn_assert(glsl_get_length(gather_offsets->type) == 4);
- nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
-
- /* Copy the current instruction 4x */
- for (uint32_t i = 1; i < 4; i++) {
- instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
- instrs[i]->op = instr->op;
- instrs[i]->coord_components = instr->coord_components;
- instrs[i]->sampler_dim = instr->sampler_dim;
- instrs[i]->is_array = instr->is_array;
- instrs[i]->is_shadow = instr->is_shadow;
- instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
- instrs[i]->component = instr->component;
- instrs[i]->dest_type = instr->dest_type;
-
- memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
-
- nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
- nir_tex_instr_dest_size(instr), 32, NULL);
- }
-
- /* Fill in the last argument with the offset from the passed in offsets
- * and insert the instruction into the stream.
- */
- for (uint32_t i = 0; i < 4; i++) {
- nir_tex_src src;
- src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
- src.src_type = nir_tex_src_offset;
- instrs[i]->src[instrs[i]->num_srcs - 1] = src;
- nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
- }
-
- /* Combine the results of the 4 instructions by taking their .w
- * components
- */
- nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
- nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
- vec4->dest.write_mask = 0xf;
+ vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
+ gather_offsets->type->length != 4,
+ "ConstOffsets must be an array of size four of vectors "
+ "of two integer components");
+
+ struct vtn_type *vec_type = gather_offsets->type->array_element;
+ vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
+ vec_type->length != 2 ||
+ !glsl_type_is_integer(vec_type->type),
+ "ConstOffsets must be an array of size four of vectors "
+ "of two integer components");
+
+ unsigned bit_size = glsl_get_bit_size(vec_type->type);
for (uint32_t i = 0; i < 4; i++) {
- vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
- vec4->src[i].swizzle[0] = 3;
+ const nir_const_value *cvec =
+ &gather_offsets->constant->elements[i]->values[0];
+ for (uint32_t j = 0; j < 2; j++) {
+ switch (bit_size) {
+ case 8: instr->tg4_offsets[i][j] = cvec->i8[j]; break;
+ case 16: instr->tg4_offsets[i][j] = cvec->i16[j]; break;
+ case 32: instr->tg4_offsets[i][j] = cvec->i32[j]; break;
+ case 64: instr->tg4_offsets[i][j] = cvec->i64[j]; break;
+ default:
+ vtn_fail("Unsupported bit size");
+ }
+ }
}
- def = &vec4->dest.dest.ssa;
- instruction = &vec4->instr;
- } else {
- def = &instr->dest.ssa;
- instruction = &instr->instr;
}
val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = def;
+ val->ssa->def = &instr->dest.ssa;
- nir_builder_instr_insert(&b->nb, instruction);
+ nir_builder_instr_insert(&b->nb, &instr->instr);
}
static void
spv_check_supported(physical_storage_buffer_address, cap);
break;
+ case SpvCapabilityComputeDerivativeGroupQuadsNV:
+ case SpvCapabilityComputeDerivativeGroupLinearNV:
+ spv_check_supported(derivative_group, cap);
+ break;
+
default:
vtn_fail("Unhandled capability");
}
vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
break;
+ case SpvExecutionModeDerivativeGroupQuadsNV:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_QUADS;
+ break;
+
+ case SpvExecutionModeDerivativeGroupLinearNV:
+ vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
+ break;
+
default:
vtn_fail("Unhandled execution mode");
}