struct vtn_access_chain *chain =
vtn_access_chain_extend(b, base->chain, deref_chain->length);
struct vtn_type *type = base->type;
+ enum gl_access_qualifier access = base->access;
/* OpPtrAccessChain is only allowed on things which support variable
* pointers. For everything else, the client is expected to just pass us
} else {
type = type->array_element;
}
+
+ access |= type->access;
}
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
ptr->var = base->var;
ptr->deref = base->deref;
ptr->chain = chain;
+ ptr->access = access;
return ptr;
}
nir_ssa_def *src0 = vtn_ssa_value(b, link.id)->def;
if (src0->bit_size != 32)
src0 = nir_u2u32(&b->nb, src0);
- return nir_imul(&b->nb, src0, nir_imm_int(&b->nb, stride));
+ return nir_imul_imm(&b->nb, src0, stride);
}
}
nir_ssa_def *block_index = base->block_index;
nir_ssa_def *offset = base->offset;
struct vtn_type *type = base->type;
+ enum gl_access_qualifier access = base->access;
unsigned idx = 0;
if (base->mode == vtn_variable_mode_ubo ||
idx++;
/* This consumes a level of type */
type = type->array_element;
+ access |= type->access;
} else {
/* This is annoying. We've been asked for a pointer to the
* array of UBOs/SSBOs and not a specifc buffer. Return a
vtn_access_link_as_ssa(b, deref_chain->link[idx], type->stride);
offset = nir_iadd(&b->nb, offset, elem_offset);
type = type->array_element;
+ access |= type->access;
break;
}
case GLSL_TYPE_STRUCT: {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned member = deref_chain->link[idx].id;
- nir_ssa_def *mem_offset = nir_imm_int(&b->nb, type->offsets[member]);
- offset = nir_iadd(&b->nb, offset, mem_offset);
+ offset = nir_iadd_imm(&b->nb, offset, type->offsets[member]);
type = type->members[member];
+ access |= type->access;
break;
}
ptr->type = type;
ptr->block_index = block_index;
ptr->offset = offset;
+ ptr->access = access;
return ptr;
}
vtn_assert(ptr_type->deref->type == var->type->type);
pointer->ptr_type = ptr_type;
pointer->var = var;
+ pointer->access = var->access | var->type->access;
return pointer;
}
_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *index, nir_ssa_def *offset,
unsigned access_offset, unsigned access_size,
- struct vtn_ssa_value **inout, const struct glsl_type *type)
+ struct vtn_ssa_value **inout, const struct glsl_type *type,
+ enum gl_access_qualifier access)
{
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
instr->num_components = glsl_get_vector_elements(type);
+ /* Booleans usually shouldn't show up in external memory in SPIR-V.
+ * However, they do for certain older GLSLang versions and can for shared
+ * memory when we lower access chains internally.
+ */
+ const unsigned data_bit_size = glsl_type_is_boolean(type) ? 32 :
+ glsl_get_bit_size(type);
+
int src = 0;
if (!load) {
nir_intrinsic_set_write_mask(instr, (1 << instr->num_components) - 1);
nir_intrinsic_set_range(instr, access_size);
}
+ if (op == nir_intrinsic_load_ssbo ||
+ op == nir_intrinsic_store_ssbo) {
+ nir_intrinsic_set_access(instr, access);
+ }
+
if (index)
instr->src[src++] = nir_src_for_ssa(index);
if (load) {
nir_ssa_dest_init(&instr->instr, &instr->dest,
- instr->num_components,
- glsl_get_bit_size(type), NULL);
+ instr->num_components, data_bit_size, NULL);
(*inout)->def = &instr->dest.ssa;
}
_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *index, nir_ssa_def *offset,
unsigned access_offset, unsigned access_size,
- struct vtn_type *type, struct vtn_ssa_value **inout)
+ struct vtn_type *type, enum gl_access_qualifier access,
+ struct vtn_ssa_value **inout)
{
if (load && *inout == NULL)
*inout = vtn_create_ssa_value(b, type->type);
for (unsigned i = 0; i < num_ops; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * col_stride));
+ nir_iadd_imm(&b->nb, offset, i * col_stride);
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
&(*inout)->elems[i],
- glsl_vector_type(base_type, vec_width));
+ glsl_vector_type(base_type, vec_width),
+ type->access | access);
}
if (load && type->row_major)
vtn_assert(glsl_type_is_vector_or_scalar(type->type));
_vtn_load_store_tail(b, op, load, index, offset,
access_offset, access_size,
- inout, type->type);
+ inout, type->type,
+ type->access | access);
} else {
/* This is a strided load. We have to load N things separately.
* This is the single column of a row-major matrix case.
nir_ssa_def *per_comp[4];
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_offset =
- nir_iadd(&b->nb, offset,
- nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
struct vtn_ssa_value *comp, temp_val;
if (!load) {
temp_val.def = nir_channel(&b->nb, (*inout)->def, i);
comp = &temp_val;
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
- &comp, glsl_scalar_type(base_type));
+ &comp, glsl_scalar_type(base_type),
+ type->access | access);
per_comp[i] = comp->def;
}
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ nir_iadd_imm(&b->nb, offset, i * type->stride);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
- type->array_element, &(*inout)->elems[i]);
+ type->array_element,
+ type->array_element->access | access,
+ &(*inout)->elems[i]);
}
return;
}
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *elem_off =
- nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ nir_iadd_imm(&b->nb, offset, type->offsets[i]);
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
- type->members[i], &(*inout)->elems[i]);
+ type->members[i],
+ type->members[i]->access | access,
+ &(*inout)->elems[i]);
}
return;
}
struct vtn_ssa_value *value = NULL;
_vtn_block_load_store(b, op, true, index, offset,
access_offset, access_size,
- src->type, &value);
+ src->type, src->access, &value);
return value;
}
offset = vtn_pointer_to_offset(b, dst, &index);
_vtn_block_load_store(b, op, false, index, offset,
- 0, 0, dst->type, &src);
+ 0, 0, dst->type, dst->access, &src);
}
static void
case SpvBuiltInCullDistance:
*location = VARYING_SLOT_CULL_DIST0;
break;
- case SpvBuiltInVertexIndex:
- *location = SYSTEM_VALUE_VERTEX_ID;
- set_mode_system_value(b, mode);
- break;
case SpvBuiltInVertexId:
- /* Vulkan defines VertexID to be zero-based and reserves the new
- * builtin keyword VertexIndex to indicate the non-zero-based value.
+ case SpvBuiltInVertexIndex:
+ /* The Vulkan spec defines VertexIndex to be non-zero-based and doesn't
+ * allow VertexId. The ARB_gl_spirv spec defines VertexId to be the
+ * same as gl_VertexID, which is non-zero-based, and removes
+ * VertexIndex. Since they're both defined to be non-zero-based, we use
+ * SYSTEM_VALUE_VERTEX_ID for both.
*/
- *location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
+ *location = SYSTEM_VALUE_VERTEX_ID;
set_mode_system_value(b, mode);
break;
case SpvBuiltInInstanceIndex:
*location = FRAG_RESULT_STENCIL;
vtn_assert(*mode == nir_var_shader_out);
break;
+ case SpvBuiltInWorkDim:
+ *location = SYSTEM_VALUE_WORK_DIM;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInGlobalSize:
+ *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
+ set_mode_system_value(b, mode);
+ break;
default:
- vtn_fail("unsupported builtin");
+ vtn_fail("unsupported builtin: %u", builtin);
}
}
var_data->read_only = true;
break;
case SpvDecorationNonReadable:
- var_data->image.write_only = true;
+ var_data->image.access |= ACCESS_NON_READABLE;
break;
case SpvDecorationNonWritable:
var_data->read_only = true;
- var_data->image.read_only = true;
+ var_data->image.access |= ACCESS_NON_WRITEABLE;
break;
case SpvDecorationRestrict:
- var_data->image.restrict_flag = true;
+ var_data->image.access |= ACCESS_RESTRICT;
break;
case SpvDecorationVolatile:
- var_data->image._volatile = true;
+ var_data->image.access |= ACCESS_VOLATILE;
break;
case SpvDecorationCoherent:
- var_data->image.coherent = true;
+ var_data->image.access |= ACCESS_COHERENT;
break;
case SpvDecorationComponent:
var_data->location_frac = dec->literals[0];
case SpvDecorationMatrixStride:
case SpvDecorationAliased:
case SpvDecorationUniform:
- case SpvDecorationStream:
- case SpvDecorationOffset:
case SpvDecorationLinkageAttributes:
break; /* Do nothing with these here */
break;
case SpvDecorationXfbBuffer:
+ var_data->explicit_xfb_buffer = true;
+ var_data->xfb_buffer = dec->literals[0];
+ var_data->always_active_io = true;
+ break;
case SpvDecorationXfbStride:
- vtn_warn("Vulkan does not have transform feedback: %s",
- spirv_decoration_to_string(dec->decoration));
+ var_data->explicit_xfb_stride = true;
+ var_data->xfb_stride = dec->literals[0];
+ break;
+ case SpvDecorationOffset:
+ var_data->explicit_offset = true;
+ var_data->offset = dec->literals[0];
+ break;
+
+ case SpvDecorationStream:
+ var_data->stream = dec->literals[0];
break;
case SpvDecorationCPacked:
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationHlslSemanticGOOGLE:
+ /* HLSL semantic decorations can safely be ignored by the driver. */
+ break;
+
default:
vtn_fail("Unhandled decoration");
}
case SpvDecorationOffset:
vtn_var->offset = dec->literals[0];
break;
+ case SpvDecorationNonWritable:
+ vtn_var->access |= ACCESS_NON_WRITEABLE;
+ break;
+ case SpvDecorationNonReadable:
+ vtn_var->access |= ACCESS_NON_READABLE;
+ break;
+ case SpvDecorationVolatile:
+ vtn_var->access |= ACCESS_VOLATILE;
+ break;
+ case SpvDecorationCoherent:
+ vtn_var->access |= ACCESS_COHERENT;
+ break;
+ case SpvDecorationHlslCounterBufferGOOGLE:
+ /* HLSL semantic decorations can safely be ignored by the driver. */
+ break;
default:
break;
}
*/
if (dec->decoration == SpvDecorationLocation) {
unsigned location = dec->literals[0];
- bool is_vertex_input;
+ bool is_vertex_input = false;
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += FRAG_RESULT_DATA0;
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
location += VERT_ATTRIB_GENERIC0;
} else if (vtn_var->mode == vtn_variable_mode_input ||
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
} else if (vtn_var->mode != vtn_variable_mode_uniform) {
vtn_warn("Location must be on input, output, uniform, sampler or "