+ const VkDescriptorType desc_type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+
+ assert(intrin->src[0].is_ssa);
+ nir_ssa_def *index = intrin->src[0].ssa;
+
+ if (state->pdevice->has_a64_buffer_access) {
+ nir_ssa_def *desc = build_ssbo_descriptor_load(desc_type, index, state);
+ nir_ssa_def *size = nir_channel(b, desc, 2);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(size));
+ nir_instr_remove(&intrin->instr);
+ } else {
+ /* We're following the nir_address_format_32bit_index_offset model so
+ * the binding table index is the first component of the address. The
+ * back-end wants a scalar binding table index source.
+ */
+ nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
+ nir_src_for_ssa(nir_channel(b, index, 0)));
+ }
+}
+
+static nir_ssa_def *
+build_descriptor_load(nir_deref_instr *deref, unsigned offset,
+ unsigned num_components, unsigned bit_size,
+ struct apply_pipeline_layout_state *state)
+{
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ unsigned set = var->data.descriptor_set;
+ unsigned binding = var->data.binding;
+ unsigned array_size =
+ state->layout->set[set].layout->binding[binding].array_size;
+
+ const struct anv_descriptor_set_binding_layout *bind_layout =
+ &state->layout->set[set].layout->binding[binding];
+
+ nir_builder *b = &state->builder;
+
+ nir_ssa_def *desc_buffer_index =
+ nir_imm_int(b, state->set[set].desc_offset);
+
+ nir_ssa_def *desc_offset =
+ nir_imm_int(b, bind_layout->descriptor_offset + offset);
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
+
+ const unsigned descriptor_size = anv_descriptor_size(bind_layout);
+ nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ if (state->add_bounds_checks)
+ arr_index = nir_umin(b, arr_index, nir_imm_int(b, array_size - 1));
+
+ desc_offset = nir_iadd(b, desc_offset,
+ nir_imul_imm(b, arr_index, descriptor_size));
+ }
+
+ nir_intrinsic_instr *desc_load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ desc_load->src[0] = nir_src_for_ssa(desc_buffer_index);
+ desc_load->src[1] = nir_src_for_ssa(desc_offset);
+ desc_load->num_components = num_components;
+ nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
+ num_components, bit_size, NULL);
+ nir_builder_instr_insert(b, &desc_load->instr);
+
+ return &desc_load->dest.ssa;
+}
+
+static void
+lower_image_intrinsic(nir_intrinsic_instr *intrin,
+ struct apply_pipeline_layout_state *state)
+{
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ unsigned set = var->data.descriptor_set;
+ unsigned binding = var->data.binding;
+ unsigned binding_offset = state->set[set].surface_offsets[binding];
+
+ nir_builder *b = &state->builder;
+ b->cursor = nir_before_instr(&intrin->instr);
+
+ ASSERTED const bool use_bindless = state->pdevice->has_bindless_images;
+
+ if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
+ b->cursor = nir_instr_remove(&intrin->instr);
+
+ assert(!use_bindless); /* Otherwise our offsets would be wrong */
+ const unsigned param = nir_intrinsic_base(intrin);
+
+ nir_ssa_def *desc =
+ build_descriptor_load(deref, param * 16,
+ intrin->dest.ssa.num_components,
+ intrin->dest.ssa.bit_size, state);
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
+ } else if (binding_offset > MAX_BINDING_TABLE_SIZE) {
+ const bool write_only =
+ (var->data.image.access & ACCESS_NON_READABLE) != 0;
+ nir_ssa_def *desc =
+ build_descriptor_load(deref, 0, 2, 32, state);
+ nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
+ nir_rewrite_image_intrinsic(intrin, handle, true);
+ } else {
+ unsigned array_size =
+ state->layout->set[set].layout->binding[binding].array_size;
+
+ nir_ssa_def *index = NULL;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
+ index = nir_ssa_for_src(b, deref->arr.index, 1);
+ if (state->add_bounds_checks)
+ index = nir_umin(b, index, nir_imm_int(b, array_size - 1));
+ } else {
+ index = nir_imm_int(b, 0);
+ }
+
+ index = nir_iadd_imm(b, index, binding_offset);
+ nir_rewrite_image_intrinsic(intrin, index, false);
+ }
+}
+
+static void
+lower_load_constant(nir_intrinsic_instr *intrin,
+ struct apply_pipeline_layout_state *state)
+{
+ nir_builder *b = &state->builder;
+
+ b->cursor = nir_before_instr(&intrin->instr);
+
+ nir_ssa_def *index = nir_imm_int(b, state->constants_offset);
+ nir_ssa_def *offset = nir_iadd(b, nir_ssa_for_src(b, intrin->src[0], 1),
+ nir_imm_int(b, nir_intrinsic_base(intrin)));
+
+ nir_intrinsic_instr *load_ubo =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ load_ubo->num_components = intrin->num_components;
+ load_ubo->src[0] = nir_src_for_ssa(index);
+ load_ubo->src[1] = nir_src_for_ssa(offset);
+ nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
+ intrin->dest.ssa.num_components,
+ intrin->dest.ssa.bit_size, NULL);
+ nir_builder_instr_insert(b, &load_ubo->instr);
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+ nir_src_for_ssa(&load_ubo->dest.ssa));
+ nir_instr_remove(&intrin->instr);
+}
+
+static void
+lower_tex_deref(nir_tex_instr *tex, nir_tex_src_type deref_src_type,
+ unsigned *base_index, unsigned plane,
+ struct apply_pipeline_layout_state *state)
+{
+ int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
+ if (deref_src_idx < 0)