+ assert(nir->info.stage == stage);
+ nir_validate_shader(nir, "after spirv_to_nir");
+
+ return nir;
+}
+
+static void
+lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *instr,
+ struct tu_shader *shader)
+{
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
+ load->num_components = instr->num_components;
+ uint32_t base = nir_intrinsic_base(instr);
+ assert(base % 4 == 0);
+ assert(base >= shader->push_consts.lo * 16);
+ base -= shader->push_consts.lo * 16;
+ nir_intrinsic_set_base(load, base / 4);
+ load->src[0] =
+ nir_src_for_ssa(nir_ushr(b, instr->src[0].ssa, nir_imm_int(b, 2)));
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+ nir_instr_remove(&instr->instr);
+}
+
+static void
+lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
+{
+ nir_ssa_def *vulkan_idx = instr->src[0].ssa;
+
+ unsigned set = nir_intrinsic_desc_set(instr);
+ unsigned binding = nir_intrinsic_binding(instr);
+ struct tu_descriptor_set_layout *set_layout = layout->set[set].layout;
+ struct tu_descriptor_set_binding_layout *binding_layout =
+ &set_layout->binding[binding];
+ uint32_t base;
+
+ switch (binding_layout->type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ base = layout->set[set].dynamic_offset_start +
+ binding_layout->dynamic_offset_offset +
+ layout->input_attachment_count;
+ set = MAX_SETS;
+ break;
+ default:
+ base = binding_layout->offset / (4 * A6XX_TEX_CONST_DWORDS);
+ break;
+ }
+
+ nir_intrinsic_instr *bindless =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_bindless_resource_ir3);
+ bindless->num_components = 1;
+ nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+ 1, 32, NULL);
+ nir_intrinsic_set_desc_set(bindless, set);
+ bindless->src[0] = nir_src_for_ssa(nir_iadd(b, nir_imm_int(b, base), vulkan_idx));
+ nir_builder_instr_insert(b, &bindless->instr);
+
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+ nir_src_for_ssa(&bindless->dest.ssa));
+ nir_instr_remove(&instr->instr);
+}
+
+static nir_ssa_def *
+build_bindless(nir_builder *b, nir_deref_instr *deref, bool is_sampler,
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
+{
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ unsigned set = var->data.descriptor_set;
+ unsigned binding = var->data.binding;
+ const struct tu_descriptor_set_binding_layout *bind_layout =
+ &layout->set[set].layout->binding[binding];
+
+ nir_ssa_def *desc_offset;
+ unsigned descriptor_stride;
+ if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {
+ unsigned offset =
+ layout->set[set].input_attachment_start +
+ bind_layout->input_attachment_offset;
+ desc_offset = nir_imm_int(b, offset);
+ set = MAX_SETS;
+ descriptor_stride = 1;
+ } else {
+ unsigned offset = 0;
+ /* Samplers come second in combined image/sampler descriptors, see
+ * write_combined_image_sampler_descriptor().
+ */
+ if (is_sampler && bind_layout->type ==
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+ offset = 1;
+ }
+ desc_offset =
+ nir_imm_int(b, (bind_layout->offset / (4 * A6XX_TEX_CONST_DWORDS)) +
+ offset);
+ descriptor_stride = bind_layout->size / (4 * A6XX_TEX_CONST_DWORDS);
+ }
+
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
+
+ nir_ssa_def *arr_index = nir_ssa_for_src(b, deref->arr.index, 1);
+ desc_offset = nir_iadd(b, desc_offset,
+ nir_imul_imm(b, arr_index, descriptor_stride));
+ }
+
+ nir_intrinsic_instr *bindless =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_bindless_resource_ir3);
+ bindless->num_components = 1;
+ nir_ssa_dest_init(&bindless->instr, &bindless->dest,
+ 1, 32, NULL);
+ nir_intrinsic_set_desc_set(bindless, set);
+ bindless->src[0] = nir_src_for_ssa(desc_offset);
+ nir_builder_instr_insert(b, &bindless->instr);
+
+ return &bindless->dest.ssa;
+}
+
+static void
+lower_image_deref(nir_builder *b,
+ nir_intrinsic_instr *instr, struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
+{
+ nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
+ nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_rewrite_image_intrinsic(instr, bindless, true);
+}
+
+static bool
+lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr,
+ struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
+{
+ switch (instr->intrinsic) {
+ case nir_intrinsic_load_layer_id:
+ /* TODO: remove this when layered rendering is implemented */
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+ nir_src_for_ssa(nir_imm_int(b, 0)));
+ nir_instr_remove(&instr->instr);
+ return true;
+
+ case nir_intrinsic_load_push_constant:
+ lower_load_push_constant(b, instr, shader);
+ return true;
+
+ case nir_intrinsic_vulkan_resource_index:
+ lower_vulkan_resource_index(b, instr, shader, layout);
+ return true;
+
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ case nir_intrinsic_image_deref_atomic_add:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
+ case nir_intrinsic_image_deref_atomic_and:
+ case nir_intrinsic_image_deref_atomic_or:
+ case nir_intrinsic_image_deref_atomic_xor:
+ case nir_intrinsic_image_deref_atomic_exchange:
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ case nir_intrinsic_image_deref_size:
+ case nir_intrinsic_image_deref_samples:
+ lower_image_deref(b, instr, shader, layout);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void
+lower_tex_ycbcr(const struct tu_pipeline_layout *layout,
+ nir_builder *builder,
+ nir_tex_instr *tex)
+{
+ int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
+ assert(deref_src_idx >= 0);
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
+
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+ const struct tu_descriptor_set_layout *set_layout =
+ layout->set[var->data.descriptor_set].layout;
+ const struct tu_descriptor_set_binding_layout *binding =
+ &set_layout->binding[var->data.binding];
+ const struct tu_sampler_ycbcr_conversion *ycbcr_samplers =
+ tu_immutable_ycbcr_samplers(set_layout, binding);
+
+ if (!ycbcr_samplers)
+ return;
+
+ /* For the following instructions, we don't apply any change */
+ if (tex->op == nir_texop_txs ||
+ tex->op == nir_texop_query_levels ||
+ tex->op == nir_texop_lod)
+ return;
+
+ assert(tex->texture_index == 0);
+ unsigned array_index = 0;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->deref_type == nir_deref_type_array);
+ if (!nir_src_is_const(deref->arr.index))
+ return;
+ array_index = nir_src_as_uint(deref->arr.index);
+ array_index = MIN2(array_index, binding->array_size - 1);
+ }
+ const struct tu_sampler_ycbcr_conversion *ycbcr_sampler = ycbcr_samplers + array_index;
+
+ if (ycbcr_sampler->ycbcr_model == VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY)
+ return;
+
+ builder->cursor = nir_after_instr(&tex->instr);
+
+ uint8_t bits = vk_format_get_component_bits(ycbcr_sampler->format,
+ UTIL_FORMAT_COLORSPACE_RGB,
+ PIPE_SWIZZLE_X);
+ uint32_t bpcs[3] = {bits, bits, bits}; /* TODO: use right bpc for each channel ? */
+ nir_ssa_def *result = nir_convert_ycbcr_to_rgb(builder,
+ ycbcr_sampler->ycbcr_model,
+ ycbcr_sampler->ycbcr_range,
+ &tex->dest.ssa,
+ bpcs);
+ nir_ssa_def_rewrite_uses_after(&tex->dest.ssa, nir_src_for_ssa(result),
+ result->parent_instr);
+
+ builder->cursor = nir_before_instr(&tex->instr);
+}
+
+static bool
+lower_tex(nir_builder *b, nir_tex_instr *tex,
+ struct tu_shader *shader, const struct tu_pipeline_layout *layout)
+{
+ lower_tex_ycbcr(layout, b, tex);
+
+ int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
+ if (sampler_src_idx >= 0) {
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
+ nir_ssa_def *bindless = build_bindless(b, deref, true, shader, layout);
+ nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
+ nir_src_for_ssa(bindless));
+ tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
+ }
+
+ int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
+ if (tex_src_idx >= 0) {
+ nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
+ nir_ssa_def *bindless = build_bindless(b, deref, false, shader, layout);
+ nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
+ nir_src_for_ssa(bindless));
+ tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
+ }
+
+ return true;
+}
+
+static bool
+lower_impl(nir_function_impl *impl, struct tu_shader *shader,
+ const struct tu_pipeline_layout *layout)
+{
+ nir_builder b;
+ nir_builder_init(&b, impl);
+ bool progress = false;
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr_safe(instr, block) {
+ b.cursor = nir_before_instr(instr);
+ switch (instr->type) {
+ case nir_instr_type_tex:
+ progress |= lower_tex(&b, nir_instr_as_tex(instr), shader, layout);
+ break;
+ case nir_instr_type_intrinsic:
+ progress |= lower_intrinsic(&b, nir_instr_as_intrinsic(instr), shader, layout);
+ break;
+ default:
+ break;
+ }
+ }
+ }