case GLSL_TYPE_UINT:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(type);
if (glsl_type_is_vector_or_scalar(type)) {
unsigned num_components = glsl_get_vector_elements(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, num_components, 32);
+ nir_load_const_instr_create(b->shader, num_components, bit_size);
load->value = constant->values[0];
struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
col_val->type = glsl_get_column_type(val->type);
nir_load_const_instr *load =
- nir_load_const_instr_create(b->shader, rows, 32);
+ nir_load_const_instr_create(b->shader, rows, bit_size);
load->value = constant->values[i];
}
}
break;
+ }
case GLSL_TYPE_ARRAY: {
unsigned elems = glsl_get_length(val->type);
val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
break;
}
- case SpvOpTypeFloat:
- val->type->type = glsl_float_type();
+ case SpvOpTypeFloat: {
+ int bit_size = w[2];
+ val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type();
break;
+ }
case SpvOpTypeVector: {
struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
}
static void
-spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v,
+spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
int member, const struct vtn_decoration *dec,
void *data)
{
get_specialization(struct vtn_builder *b, struct vtn_value *val,
uint32_t const_value)
{
- vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value);
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &const_value);
return const_value;
}
break;
}
- case SpvOpConstant:
+ case SpvOpConstant: {
assert(glsl_type_is_scalar(val->const_type));
- val->constant->values[0].u32[0] = w[3];
+ int bit_size = glsl_get_bit_size(val->const_type);
+ if (bit_size == 64) {
+ val->constant->values->u32[0] = w[3];
+ val->constant->values->u32[1] = w[4];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values->u32[0] = w[3];
+ }
break;
+ }
case SpvOpSpecConstant:
assert(glsl_type_is_scalar(val->const_type));
val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
case GLSL_TYPE_INT:
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ int bit_size = glsl_get_bit_size(val->const_type);
if (glsl_type_is_matrix(val->const_type)) {
assert(glsl_get_matrix_columns(val->const_type) == elem_count);
for (unsigned i = 0; i < elem_count; i++)
} else {
assert(glsl_type_is_vector(val->const_type));
assert(glsl_get_vector_elements(val->const_type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ for (unsigned i = 0; i < elem_count; i++) {
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ }
+ }
}
ralloc_free(elems);
break;
-
+ }
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_ARRAY:
ralloc_steal(val->constant, elems);
unsigned len0 = glsl_get_vector_elements(v0->const_type);
unsigned len1 = glsl_get_vector_elements(v1->const_type);
- uint32_t u[8];
- for (unsigned i = 0; i < len0; i++)
- u[i] = v0->constant->values[0].u32[i];
- for (unsigned i = 0; i < len1; i++)
- u[len0 + i] = v1->constant->values[0].u32[i];
-
- for (unsigned i = 0; i < count - 6; i++) {
- uint32_t comp = w[i + 6];
- if (comp == (uint32_t)-1) {
- val->constant->values[0].u32[i] = 0xdeadbeef;
- } else {
- val->constant->values[0].u32[i] = u[comp];
+ assert(len0 + len1 < 16);
+
+ unsigned bit_size = glsl_get_bit_size(val->const_type);
+ assert(bit_size == glsl_get_bit_size(v0->const_type) &&
+ bit_size == glsl_get_bit_size(v1->const_type));
+
+ if (bit_size == 64) {
+ uint64_t u64[8];
+ for (unsigned i = 0; i < len0; i++)
+ u64[i] = v0->constant->values[0].u64[i];
+ for (unsigned i = 0; i < len1; i++)
+ u64[len0 + i] = v1->constant->values[0].u64[i];
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
+ else
+ val->constant->values[0].u64[j] = u64[comp];
+ }
+ } else {
+ uint32_t u32[8];
+ for (unsigned i = 0; i < len0; i++)
+ u32[i] = v0->constant->values[0].u32[i];
+
+ for (unsigned i = 0; i < len1; i++)
+ u32[len0 + i] = v1->constant->values[0].u32[i];
+
+ for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
+ uint32_t comp = w[i + 6];
+ /* If component is not used, set the value to a known constant
+ * to detect if it is wrongly used.
+ */
+ if (comp == (uint32_t)-1)
+ val->constant->values[0].u32[j] = 0xdeadbeef;
+ else
+ val->constant->values[0].u32[j] = u32[comp];
}
}
break;
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_BOOL:
/* If we hit this granularity, we're picking off an element */
if (glsl_type_is_matrix(type)) {
val->constant = *c;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ if (bit_size == 64) {
+ val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
+ } else {
+ assert(bit_size == 32);
+ val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
+ }
}
} else {
struct vtn_value *insert =
*c = insert->constant;
} else {
unsigned num_components = glsl_get_vector_elements(type);
+ unsigned bit_size = glsl_get_bit_size(type);
for (unsigned i = 0; i < num_components; i++)
- (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ if (bit_size == 64) {
+ (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
+ } else {
+ assert(bit_size == 32);
+ (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
+ }
}
}
break;
default: {
bool swap;
- nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap);
+ nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type);
+ nir_alu_type src_alu_type = dst_alu_type;
+ nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type);
unsigned num_components = glsl_get_vector_elements(val->const_type);
unsigned bit_size =
struct vtn_value *arg = vtn_untyped_value(b, arg_id);
if (arg->value_type == vtn_value_type_access_chain) {
nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain);
- call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref));
+ call->params[i] = nir_deref_var_clone(d, call);
} else {
struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
/* Now we need to handle some number of optional arguments */
+ const struct vtn_ssa_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx++];
operands & SpvImageOperandsConstOffsetMask)
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
- if (operands & SpvImageOperandsConstOffsetsMask)
- assert(!"Constant offsets to texture gather not yet implemented");
+ if (operands & SpvImageOperandsConstOffsetsMask) {
+ gather_offsets = vtn_ssa_value(b, w[idx++]);
+ (*p++) = (nir_tex_src){};
+ }
if (operands & SpvImageOperandsSampleMask) {
assert(texop == nir_texop_txf_ms);
}
nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler);
+ nir_deref_var *texture;
if (sampled.image) {
nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image);
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref));
+ texture = image;
} else {
- instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ texture = sampler;
}
+ instr->texture = nir_deref_var_clone(texture, instr);
+
switch (instr->op) {
case nir_texop_tex:
case nir_texop_txb:
case nir_texop_txl:
case nir_texop_txd:
/* These operations require a sampler */
- instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+ instr->sampler = nir_deref_var_clone(sampler, instr);
break;
case nir_texop_txf:
case nir_texop_txf_ms:
assert(glsl_get_vector_elements(ret_type->type) ==
nir_tex_instr_dest_size(instr));
+ nir_ssa_def *def;
+ nir_instr *instruction;
+ if (gather_offsets) {
+ assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
+ assert(glsl_get_length(gather_offsets->type) == 4);
+ nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
+
+ /* Copy the current instruction 4x */
+ for (uint32_t i = 1; i < 4; i++) {
+ instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
+ instrs[i]->op = instr->op;
+ instrs[i]->coord_components = instr->coord_components;
+ instrs[i]->sampler_dim = instr->sampler_dim;
+ instrs[i]->is_array = instr->is_array;
+ instrs[i]->is_shadow = instr->is_shadow;
+ instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
+ instrs[i]->component = instr->component;
+ instrs[i]->dest_type = instr->dest_type;
+ instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
+ instrs[i]->sampler = NULL;
+
+ memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
+
+ nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
+ nir_tex_instr_dest_size(instr), 32, NULL);
+ }
+
+ /* Fill in the last argument with the offset from the passed in offsets
+ * and insert the instruction into the stream.
+ */
+ for (uint32_t i = 0; i < 4; i++) {
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
+ src.src_type = nir_tex_src_offset;
+ instrs[i]->src[instrs[i]->num_srcs - 1] = src;
+ nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
+ }
+
+ /* Combine the results of the 4 instructions by taking their .w
+ * components
+ */
+ nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
+ nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
+ vec4->dest.write_mask = 0xf;
+ for (uint32_t i = 0; i < 4; i++) {
+ vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
+ vec4->src[i].swizzle[0] = 3;
+ }
+ def = &vec4->dest.dest.ssa;
+ instruction = &vec4->instr;
+ } else {
+ def = &instr->dest.ssa;
+ instruction = &instr->instr;
+ }
+
val->ssa = vtn_create_ssa_value(b, ret_type->type);
- val->ssa->def = &instr->dest.ssa;
+ val->ssa->def = def;
- nir_builder_instr_insert(&b->nb, &instr->instr);
+ nir_builder_instr_insert(&b->nb, instruction);
}
static void
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image);
- intrin->variables[0] =
- nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref));
+ intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
if (chain->var->mode == vtn_variable_mode_workgroup) {
struct vtn_type *type = chain->var->type;
- nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref;
+ nir_deref_var *deref = vtn_access_chain_to_deref(b, chain);
nir_intrinsic_op op = get_shared_nir_atomic_op(opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref));
+ atomic->variables[0] = nir_deref_var_clone(deref, atomic);
switch (opcode) {
case SpvOpAtomicLoad:
}
}
+#define spv_check_supported(name, cap) do { \
+ if (!(b->ext && b->ext->name)) \
+ vtn_warn("Unsupported SPIR-V capability: %s", \
+ spirv_capability_to_string(cap)); \
+ } while(0)
+
static bool
vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvCapabilityClipDistance:
case SpvCapabilityCullDistance:
case SpvCapabilityInputAttachment:
+ case SpvCapabilityImageGatherExtended:
+ case SpvCapabilityStorageImageExtendedFormats:
break;
case SpvCapabilityGeometryStreams:
case SpvCapabilityInt64Atomics:
case SpvCapabilityAtomicStorage:
case SpvCapabilityInt16:
- case SpvCapabilityImageGatherExtended:
case SpvCapabilityStorageImageMultisample:
case SpvCapabilityImageCubeArray:
case SpvCapabilityInt8:
case SpvCapabilitySparseResidency:
case SpvCapabilityMinLod:
- case SpvCapabilityImageMSArray:
- case SpvCapabilityStorageImageExtendedFormats:
case SpvCapabilityTransformFeedback:
case SpvCapabilityStorageImageReadWithoutFormat:
case SpvCapabilityStorageImageWriteWithoutFormat:
vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
spirv_capability_to_string(cap));
break;
+
+ case SpvCapabilityImageMSArray:
+ spv_check_supported(image_ms_array, cap);
+ break;
}
break;
}
spirv_to_nir(const uint32_t *words, size_t word_count,
struct nir_spirv_specialization *spec, unsigned num_spec,
gl_shader_stage stage, const char *entry_point_name,
+ const struct nir_spirv_supported_extensions *ext,
const nir_shader_compiler_options *options)
{
const uint32_t *word_end = words + word_count;
exec_list_make_empty(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
+ b->ext = ext;
/* Handle all the preamble instructions */
words = vtn_foreach_instruction(b, words, word_end,