# Ask the driver for the size of a given buffer. It takes the buffer index
# as source.
-intrinsic("get_buffer_size", src_comp=[1], dest_comp=1,
+intrinsic("get_buffer_size", src_comp=[-1], dest_comp=1,
flags=[CAN_ELIMINATE, CAN_REORDER])
# a barrier is an intrinsic with no inputs/outputs but which can't be moved
# (the result of a vulkan_resource_index or vulkan_resource_reindex) which
# corresponds to the tuple (set, binding, index) and computes an index
# corresponding to tuple (set, binding, idx + src1).
-intrinsic("vulkan_resource_index", src_comp=[1], dest_comp=1,
+intrinsic("vulkan_resource_index", src_comp=[1], dest_comp=0,
indices=[DESC_SET, BINDING, DESC_TYPE],
flags=[CAN_ELIMINATE, CAN_REORDER])
-intrinsic("vulkan_resource_reindex", src_comp=[1, 1], dest_comp=1,
+intrinsic("vulkan_resource_reindex", src_comp=[0, 1], dest_comp=0,
indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
-intrinsic("load_vulkan_descriptor", src_comp=[1], dest_comp=0,
+intrinsic("load_vulkan_descriptor", src_comp=[-1], dest_comp=0,
indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
# variable atomic intrinsics
}
}
+static const struct glsl_type *
+vtn_ptr_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ switch (mode) {
+ case vtn_variable_mode_ubo:
+ return b->options->ubo_ptr_type;
+ case vtn_variable_mode_ssbo:
+ return b->options->ssbo_ptr_type;
+ default:
+ vtn_fail("Invalid mode for vulkan_resource_index");
+ }
+}
+
static nir_ssa_def *
vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
nir_ssa_def *desc_array_index)
nir_intrinsic_set_binding(instr, var->binding);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : vtn_ptr_type_for_mode(b, var->mode);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
instr->src[1] = nir_src_for_ssa(offset_index);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : vtn_ptr_type_for_mode(b, mode);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
static nir_ssa_def *
vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
- const struct glsl_type *desc_type, nir_ssa_def *desc_index)
+ nir_ssa_def *desc_index)
{
nir_intrinsic_instr *desc_load =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_load_vulkan_descriptor);
desc_load->src[0] = nir_src_for_ssa(desc_index);
- desc_load->num_components = glsl_get_vector_elements(desc_type);
nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
+
+ const struct glsl_type *ptr_type = vtn_ptr_type_for_mode(b, mode);
+
+ desc_load->num_components = glsl_get_vector_elements(ptr_type);
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
desc_load->num_components,
- glsl_get_bit_size(desc_type), NULL);
+ glsl_get_bit_size(ptr_type), NULL);
nir_builder_instr_insert(&b->nb, &desc_load->instr);
return &desc_load->dest.ssa;
* final block index. Insert a descriptor load and cast to a deref to
* start the deref chain.
*/
- nir_ssa_def *desc =
- vtn_descriptor_load(b, base->mode, base->ptr_type->type, block_index);
+ nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
assert(base->mode == vtn_variable_mode_ssbo ||
base->mode == vtn_variable_mode_ubo);
if (vtn_pointer_is_external_block(b, ptr) &&
vtn_type_contains_block(b, ptr->type) &&
ptr->mode != vtn_variable_mode_phys_ssbo) {
- const unsigned bit_size = glsl_get_bit_size(ptr->ptr_type->type);
- const unsigned num_components =
- glsl_get_vector_elements(ptr->ptr_type->type);
-
/* In this case, we're looking for a block index and not an actual
* deref.
*
ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
}
- /* A block index is just a 32-bit value but the pointer has some
- * other dimensionality. Cram it in there and we'll unpack it later
- * in vtn_pointer_from_ssa.
- */
- const unsigned swiz[4] = { 0, };
- return nir_swizzle(&b->nb, nir_u2u(&b->nb, ptr->block_index, bit_size),
- swiz, num_components, false);
+ return ptr->block_index;
} else {
return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
}
} else if (vtn_type_contains_block(b, ptr->type) &&
ptr->mode != vtn_variable_mode_phys_ssbo) {
/* This is a pointer to somewhere in an array of blocks, not a
- * pointer to somewhere inside the block. We squashed it into a
- * random vector type before so just pick off the first channel and
- * cast it back to 32 bits.
+ * pointer to somewhere inside the block. Set the block index
+ * instead of making a cast.
*/
- ptr->block_index = nir_u2u32(&b->nb, nir_channel(&b->nb, ssa, 0));
+ ptr->block_index = ssa;
} else {
/* This is a pointer to something internal or a pointer inside a
* block. It's just a regular cast.
if (nir_src_is_const(intrin->src[0]) || state->add_bounds_checks)
array_index = nir_umin(b, array_index, nir_imm_int(b, array_size - 1));
- nir_ssa_def *block_index = nir_iadd_imm(b, array_index, surface_index);
+ /* We're using nir_address_format_vk_index_offset */
+ nir_ssa_def *index =
+ nir_vec2(b, nir_iadd_imm(b, array_index, surface_index),
+ nir_imm_int(b, 0));
assert(intrin->dest.is_ssa);
- nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
nir_instr_remove(&intrin->instr);
}
* add of the two indices.
*/
assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
- nir_ssa_def *new_index = nir_iadd(b, intrin->src[0].ssa,
- intrin->src[1].ssa);
+ nir_ssa_def *old_index = intrin->src[0].ssa;
+ nir_ssa_def *offset = intrin->src[1].ssa;
+
+ nir_ssa_def *new_index =
+ nir_vec2(b, nir_iadd(b, nir_channel(b, old_index, 0), offset),
+ nir_channel(b, old_index, 1));
assert(intrin->dest.is_ssa);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(new_index));
/* We follow the nir_address_format_vk_index_offset model */
assert(intrin->src[0].is_ssa);
- nir_ssa_def *vec2 = nir_vec2(b, intrin->src[0].ssa, nir_imm_int(b, 0));
+ nir_ssa_def *index = intrin->src[0].ssa;
assert(intrin->dest.is_ssa);
- nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(vec2));
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(index));
nir_instr_remove(&intrin->instr);
}
+static void
+lower_get_buffer_size(nir_intrinsic_instr *intrin,
+ struct apply_pipeline_layout_state *state)
+{
+ nir_builder *b = &state->builder;
+
+ b->cursor = nir_before_instr(&intrin->instr);
+
+ assert(intrin->src[0].is_ssa);
+ nir_ssa_def *index = intrin->src[0].ssa;
+
+ /* We're following the nir_address_format_vk_index_offset model so the
+ * binding table index is the first component of the address. The
+ * back-end wants a scalar binding table index source.
+ */
+ nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
+ nir_src_for_ssa(nir_channel(b, index, 0)));
+}
+
static void
lower_image_intrinsic(nir_intrinsic_instr *intrin,
struct apply_pipeline_layout_state *state)
case nir_intrinsic_load_vulkan_descriptor:
lower_load_vulkan_descriptor(intrin, state);
break;
+ case nir_intrinsic_get_buffer_size:
+ lower_get_buffer_size(intrin, state);
+ break;
case nir_intrinsic_image_deref_load:
case nir_intrinsic_image_deref_store:
case nir_intrinsic_image_deref_atomic_add: