+static unsigned
+type_scalar_size_bytes(const struct glsl_type *type)
+{
+ assert(glsl_type_is_vector_or_scalar(type) ||
+ glsl_type_is_matrix(type));
+ return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
+}
+
+static nir_ssa_def *
+build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, nir_ssa_def *offset)
+{
+ assert(offset->num_components == 1);
+
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ case nir_address_format_32bit_offset:
+ assert(addr->bit_size == offset->bit_size);
+ assert(addr->num_components == 1);
+ return nir_iadd(b, addr, offset);
+
+ case nir_address_format_32bit_offset_as_64bit:
+ assert(addr->num_components == 1);
+ assert(offset->bit_size == 32);
+ return nir_u2u64(b, nir_iadd(b, nir_u2u32(b, addr), offset));
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ assert(addr->bit_size == offset->bit_size);
+ return nir_vec4(b, nir_channel(b, addr, 0),
+ nir_channel(b, addr, 1),
+ nir_channel(b, addr, 2),
+ nir_iadd(b, nir_channel(b, addr, 3), offset));
+
+ case nir_address_format_32bit_index_offset:
+ assert(addr->num_components == 2);
+ assert(addr->bit_size == offset->bit_size);
+ return nir_vec2(b, nir_channel(b, addr, 0),
+ nir_iadd(b, nir_channel(b, addr, 1), offset));
+
+ case nir_address_format_32bit_index_offset_pack64:
+ assert(addr->num_components == 1);
+ assert(offset->bit_size == 32);
+ return nir_pack_64_2x32_split(b,
+ nir_iadd(b, nir_unpack_64_2x32_split_x(b, addr), offset),
+ nir_unpack_64_2x32_split_y(b, addr));
+
+ case nir_address_format_vec2_index_32bit_offset:
+ assert(addr->num_components == 3);
+ assert(offset->bit_size == 32);
+ return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
+ nir_iadd(b, nir_channel(b, addr, 2), offset));
+
+ case nir_address_format_logical:
+ unreachable("Unsupported address format");
+ }
+ unreachable("Invalid address format");
+}
+
+static unsigned
+addr_get_offset_bit_size(nir_ssa_def *addr, nir_address_format addr_format)
+{
+ if (addr_format == nir_address_format_32bit_offset_as_64bit ||
+ addr_format == nir_address_format_32bit_index_offset_pack64)
+ return 32;
+ return addr->bit_size;
+}
+
+static nir_ssa_def *
+build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, int64_t offset)
+{
+ return build_addr_iadd(b, addr, addr_format,
+ nir_imm_intN_t(b, offset,
+ addr_get_offset_bit_size(addr, addr_format)));
+}
+
+static nir_ssa_def *
+addr_to_index(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_index_offset:
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 0);
+ case nir_address_format_32bit_index_offset_pack64:
+ return nir_unpack_64_2x32_split_y(b, addr);
+ case nir_address_format_vec2_index_32bit_offset:
+ assert(addr->num_components == 3);
+ return nir_channels(b, addr, 0x3);
+ default: unreachable("Invalid address format");
+ }
+}
+
+static nir_ssa_def *
+addr_to_offset(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_index_offset:
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 1);
+ case nir_address_format_32bit_index_offset_pack64:
+ return nir_unpack_64_2x32_split_x(b, addr);
+ case nir_address_format_vec2_index_32bit_offset:
+ assert(addr->num_components == 3);
+ return nir_channel(b, addr, 2);
+ case nir_address_format_32bit_offset:
+ return addr;
+ case nir_address_format_32bit_offset_as_64bit:
+ return nir_u2u32(b, addr);
+ default:
+ unreachable("Invalid address format");
+ }
+}
+
+/** Returns true if the given address format resolves to a global address */
+static bool
+addr_format_is_global(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_32bit_global ||
+ addr_format == nir_address_format_64bit_global ||
+ addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static bool
+addr_format_is_offset(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_32bit_offset ||
+ addr_format == nir_address_format_32bit_offset_as_64bit;
+}
+
+static nir_ssa_def *
+addr_to_global(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ assert(addr->num_components == 1);
+ return addr;
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
+ nir_u2u64(b, nir_channel(b, addr, 3)));
+
+ case nir_address_format_32bit_index_offset:
+ case nir_address_format_32bit_index_offset_pack64:
+ case nir_address_format_vec2_index_32bit_offset:
+ case nir_address_format_32bit_offset:
+ case nir_address_format_32bit_offset_as_64bit:
+ case nir_address_format_logical:
+ unreachable("Cannot get a 64-bit address with this address format");
+ }
+
+ unreachable("Invalid address format");
+}
+
+static bool
+addr_format_needs_bounds_check(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static nir_ssa_def *
+addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, unsigned size)
+{
+ assert(addr_format == nir_address_format_64bit_bounded_global);
+ assert(addr->num_components == 4);
+ return nir_ige(b, nir_channel(b, addr, 2),
+ nir_iadd_imm(b, nir_channel(b, addr, 3), size));
+}
+
+static nir_ssa_def *
+build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ unsigned num_components)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ubo:
+ op = nir_intrinsic_load_ubo;
+ break;
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_load_global;
+ else
+ op = nir_intrinsic_load_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global;
+ break;
+ case nir_var_shader_in:
+ assert(addr_format_is_offset(addr_format));
+ op = nir_intrinsic_load_kernel_input;
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format_is_offset(addr_format));
+ op = nir_intrinsic_load_shared;
+ break;
+ case nir_var_shader_temp:
+ case nir_var_function_temp:
+ if (addr_format_is_offset(addr_format)) {
+ op = nir_intrinsic_load_scratch;
+ } else {
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global;
+ }
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
+
+ if (addr_format_is_global(addr_format)) {
+ load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format_is_offset(addr_format)) {
+ assert(addr->num_components == 1);
+ load->src[0] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ } else {
+ load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ if (nir_intrinsic_has_access(load))
+ nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
+
+ unsigned bit_size = intrin->dest.ssa.bit_size;
+ if (bit_size == 1) {
+ /* TODO: Make the native bool bit_size an option. */
+ bit_size = 32;
+ }
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(load, bit_size / 8, 0);
+
+ assert(intrin->dest.is_ssa);
+ load->num_components = num_components;
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components,
+ bit_size, intrin->dest.ssa.name);
+
+ assert(bit_size % 8 == 0);
+
+ nir_ssa_def *result;
+ if (addr_format_needs_bounds_check(addr_format)) {
+ /* The Vulkan spec for robustBufferAccess gives us quite a few options
+ * as to what we can do with an OOB read. Unfortunately, returning
+ * undefined values isn't one of them so we return an actual zero.
+ */
+ nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
+
+ const unsigned load_size = (bit_size / 8) * load->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
+
+ nir_builder_instr_insert(b, &load->instr);
+
+ nir_pop_if(b, NULL);
+
+ result = nir_if_phi(b, &load->dest.ssa, zero);
+ } else {
+ nir_builder_instr_insert(b, &load->instr);
+ result = &load->dest.ssa;
+ }
+
+ if (intrin->dest.ssa.bit_size == 1) {
+ /* For shared, we can go ahead and use NIR's and/or the back-end's
+ * standard encoding for booleans rather than forcing a 0/1 boolean.
+ * This should save an instruction or two.
+ */
+ if (mode == nir_var_mem_shared ||
+ mode == nir_var_shader_temp ||
+ mode == nir_var_function_temp)
+ result = nir_b2b1(b, result);
+ else
+ result = nir_i2b(b, result);
+ }
+
+ return result;
+}
+
+static void
+build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ nir_ssa_def *value, nir_component_mask_t write_mask)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_store_global;
+ else
+ op = nir_intrinsic_store_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_store_global;
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format_is_offset(addr_format));
+ op = nir_intrinsic_store_shared;
+ break;
+ case nir_var_shader_temp:
+ case nir_var_function_temp:
+ if (addr_format_is_offset(addr_format)) {
+ op = nir_intrinsic_store_scratch;
+ } else {
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_store_global;
+ }
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
+
+ if (value->bit_size == 1) {
+ /* For shared, we can go ahead and use NIR's and/or the back-end's
+ * standard encoding for booleans rather than forcing a 0/1 boolean.
+ * This should save an instruction or two.
+ *
+ * TODO: Make the native bool bit_size an option.
+ */
+ if (mode == nir_var_mem_shared ||
+ mode == nir_var_shader_temp ||
+ mode == nir_var_function_temp)
+ value = nir_b2b32(b, value);
+ else
+ value = nir_b2i(b, value, 32);
+ }
+
+ store->src[0] = nir_src_for_ssa(value);
+ if (addr_format_is_global(addr_format)) {
+ store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format_is_offset(addr_format)) {
+ assert(addr->num_components == 1);
+ store->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ } else {
+ store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ nir_intrinsic_set_write_mask(store, write_mask);
+
+ if (nir_intrinsic_has_access(store))
+ nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(store, value->bit_size / 8, 0);
+
+ assert(value->num_components == 1 ||
+ value->num_components == intrin->num_components);
+ store->num_components = value->num_components;
+
+ assert(value->bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned store_size = (value->bit_size / 8) * store->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
+
+ nir_builder_instr_insert(b, &store->instr);
+
+ nir_pop_if(b, NULL);
+ } else {
+ nir_builder_instr_insert(b, &store->instr);
+ }
+}
+
+static nir_ssa_def *
+build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+ const unsigned num_data_srcs =
+ nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = global_atomic_for_deref(intrin->intrinsic);
+ else
+ op = ssbo_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = global_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format_is_offset(addr_format));
+ op = shared_atomic_for_deref(intrin->intrinsic);
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
+
+ unsigned src = 0;
+ if (addr_format_is_global(addr_format)) {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format_is_offset(addr_format)) {
+ assert(addr->num_components == 1);
+ atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ } else {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+ for (unsigned i = 0; i < num_data_srcs; i++) {
+ atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
+ }
+
+ /* Global atomics don't have access flags because they assume that the
+ * address may be non-uniform.
+ */
+ if (nir_intrinsic_has_access(atomic))
+ nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
+
+ assert(intrin->dest.ssa.num_components == 1);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest,
+ 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
+
+ assert(atomic->dest.ssa.bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
+
+ nir_builder_instr_insert(b, &atomic->instr);
+
+ nir_pop_if(b, NULL);
+ return nir_if_phi(b, &atomic->dest.ssa,
+ nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
+ } else {
+ nir_builder_instr_insert(b, &atomic->instr);
+ return &atomic->dest.ssa;
+ }
+}
+
+nir_ssa_def *
+nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_ssa_def *base_addr,
+ nir_address_format addr_format)
+{
+ assert(deref->dest.is_ssa);
+ switch (deref->deref_type) {
+ case nir_deref_type_var:
+ assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared |
+ nir_var_shader_temp | nir_var_function_temp));
+ if (addr_format_is_global(addr_format)) {
+ assert(nir_var_shader_temp | nir_var_function_temp);
+ base_addr =
+ nir_load_scratch_base_ptr(b, !(deref->mode & nir_var_shader_temp),
+ nir_address_format_num_components(addr_format),
+ nir_address_format_bit_size(addr_format));
+ return build_addr_iadd_imm(b, base_addr, addr_format,
+ deref->var->data.driver_location);
+ } else {
+ assert(deref->var->data.driver_location <= UINT32_MAX);
+ return nir_imm_intN_t(b, deref->var->data.driver_location,
+ deref->dest.ssa.bit_size);
+ }
+
+ case nir_deref_type_array: {
+ nir_deref_instr *parent = nir_deref_instr_parent(deref);
+
+ unsigned stride = glsl_get_explicit_stride(parent->type);
+ if ((glsl_type_is_matrix(parent->type) &&
+ glsl_matrix_type_is_row_major(parent->type)) ||
+ (glsl_type_is_vector(parent->type) && stride == 0))
+ stride = type_scalar_size_bytes(parent->type);
+
+ assert(stride > 0);
+
+ nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
+ return build_addr_iadd(b, base_addr, addr_format,
+ nir_amul_imm(b, index, stride));
+ }
+
+ case nir_deref_type_ptr_as_array: {
+ nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = nir_i2i(b, index, addr_get_offset_bit_size(base_addr, addr_format));
+ unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
+ return build_addr_iadd(b, base_addr, addr_format,
+ nir_amul_imm(b, index, stride));
+ }
+
+ case nir_deref_type_array_wildcard:
+ unreachable("Wildcards should be lowered by now");
+ break;
+
+ case nir_deref_type_struct: {
+ nir_deref_instr *parent = nir_deref_instr_parent(deref);
+ int offset = glsl_get_struct_field_offset(parent->type,
+ deref->strct.index);
+ assert(offset >= 0);
+ return build_addr_iadd_imm(b, base_addr, addr_format, offset);
+ }
+
+ case nir_deref_type_cast:
+ /* Nothing to do here */
+ return base_addr;
+ }
+
+ unreachable("Invalid NIR deref type");
+}
+
+void
+nir_lower_explicit_io_instr(nir_builder *b,
+ nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ b->cursor = nir_after_instr(&intrin->instr);
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ unsigned vec_stride = glsl_get_explicit_stride(deref->type);
+ unsigned scalar_size = type_scalar_size_bytes(deref->type);
+ assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
+ assert(vec_stride == 0 || vec_stride >= scalar_size);
+
+ if (intrin->intrinsic == nir_intrinsic_load_deref) {
+ nir_ssa_def *value;
+ if (vec_stride > scalar_size) {
+ nir_ssa_def *comps[4] = { NULL, };
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
+ vec_stride * i);
+ comps[i] = build_explicit_io_load(b, intrin, comp_addr,
+ addr_format, 1);
+ }
+ value = nir_vec(b, comps, intrin->num_components);
+ } else {
+ value = build_explicit_io_load(b, intrin, addr, addr_format,
+ intrin->num_components);
+ }
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
+ } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
+ assert(intrin->src[1].is_ssa);
+ nir_ssa_def *value = intrin->src[1].ssa;
+ nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
+ if (vec_stride > scalar_size) {
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+
+ nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
+ vec_stride * i);
+ build_explicit_io_store(b, intrin, comp_addr, addr_format,
+ nir_channel(b, value, i), 1);
+ }
+ } else {
+ build_explicit_io_store(b, intrin, addr, addr_format,
+ value, write_mask);
+ }
+ } else {
+ nir_ssa_def *value =
+ build_explicit_io_atomic(b, intrin, addr, addr_format);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
+ }
+
+ nir_instr_remove(&intrin->instr);
+}
+
+static void
+lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_address_format addr_format)
+{
+ /* Just delete the deref if it's not used. We can't use
+ * nir_deref_instr_remove_if_unused here because it may remove more than
+ * one deref which could break our list walking since we walk the list
+ * backwards.
+ */
+ assert(list_is_empty(&deref->dest.ssa.if_uses));
+ if (list_is_empty(&deref->dest.ssa.uses)) {
+ nir_instr_remove(&deref->instr);
+ return;
+ }
+
+ b->cursor = nir_after_instr(&deref->instr);
+
+ nir_ssa_def *base_addr = NULL;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->parent.is_ssa);
+ base_addr = deref->parent.ssa;
+ }
+
+ nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
+ addr_format);
+
+ nir_instr_remove(&deref->instr);
+ nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
+}
+
+static void
+lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_address_format addr_format)
+{
+ assert(intrin->src[0].is_ssa);
+ nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
+}
+
+static void
+lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_address_format addr_format)
+{
+ b->cursor = nir_after_instr(&intrin->instr);
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+
+ assert(glsl_type_is_array(deref->type));
+ assert(glsl_get_length(deref->type) == 0);
+ unsigned stride = glsl_get_explicit_stride(deref->type);
+ assert(stride > 0);
+
+ nir_ssa_def *addr = &deref->dest.ssa;
+ nir_ssa_def *index = addr_to_index(b, addr, addr_format);
+ nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
+
+ nir_intrinsic_instr *bsize =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
+ bsize->src[0] = nir_src_for_ssa(index);
+ nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
+ nir_builder_instr_insert(b, &bsize->instr);
+
+ nir_ssa_def *arr_size =
+ nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
+ nir_imm_int(b, stride));
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
+ nir_instr_remove(&intrin->instr);
+}
+
+static bool
+nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
+ nir_address_format addr_format)
+{
+ bool progress = false;
+
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ /* Walk in reverse order so that we can see the full deref chain when we
+ * lower the access operations. We lower them assuming that the derefs
+ * will be turned into address calculations later.
+ */
+ nir_foreach_block_reverse(block, impl) {
+ nir_foreach_instr_reverse_safe(instr, block) {
+ switch (instr->type) {
+ case nir_instr_type_deref: {
+ nir_deref_instr *deref = nir_instr_as_deref(instr);
+ if (deref->mode & modes) {
+ lower_explicit_io_deref(&b, deref, addr_format);
+ progress = true;
+ }
+ break;
+ }
+
+ case nir_instr_type_intrinsic: {
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_deref:
+ case nir_intrinsic_store_deref:
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ case nir_intrinsic_deref_atomic_fadd:
+ case nir_intrinsic_deref_atomic_fmin:
+ case nir_intrinsic_deref_atomic_fmax:
+ case nir_intrinsic_deref_atomic_fcomp_swap: {
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ if (deref->mode & modes) {
+ lower_explicit_io_access(&b, intrin, addr_format);
+ progress = true;
+ }
+ break;
+ }
+
+ case nir_intrinsic_deref_buffer_array_length: {
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ if (deref->mode & modes) {
+ lower_explicit_io_array_length(&b, intrin, addr_format);
+ progress = true;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ default:
+ /* Nothing to do */
+ break;
+ }
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+
+ return progress;
+}
+
+/** Lower explicitly laid out I/O access to byte offset/address intrinsics
+ *
+ * This pass is intended to be used for any I/O which touches memory external
+ * to the shader or which is directly visible to the client. It requires that
+ * all data types in the given modes have a explicit stride/offset decorations
+ * to tell it exactly how to calculate the offset/address for the given load,
+ * store, or atomic operation. If the offset/stride information does not come
+ * from the client explicitly (as with shared variables in GL or Vulkan),
+ * nir_lower_vars_to_explicit_types() can be used to add them.
+ *
+ * Unlike nir_lower_io, this pass is fully capable of handling incomplete
+ * pointer chains which may contain cast derefs. It does so by walking the
+ * deref chain backwards and simply replacing each deref, one at a time, with
+ * the appropriate address calculation. The pass takes a nir_address_format
+ * parameter which describes how the offset or address is to be represented
+ * during calculations. By ensuring that the address is always in a
+ * consistent format, pointers can safely be conjured from thin air by the
+ * driver, stored to variables, passed through phis, etc.
+ *
+ * The one exception to the simple algorithm described above is for handling
+ * row-major matrices in which case we may look down one additional level of
+ * the deref chain.
+ */
+bool
+nir_lower_explicit_io(nir_shader *shader, nir_variable_mode modes,
+ nir_address_format addr_format)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl &&
+ nir_lower_explicit_io_impl(function->impl, modes, addr_format))
+ progress = true;
+ }
+
+ return progress;
+}
+
+static bool
+nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
+ nir_variable_mode modes,
+ glsl_type_size_align_func type_info)
+{
+ bool progress = false;
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_deref)
+ continue;
+
+ nir_deref_instr *deref = nir_instr_as_deref(instr);
+ if (!(deref->mode & modes))
+ continue;
+
+ unsigned size, alignment;
+ const struct glsl_type *new_type =
+ glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
+ if (new_type != deref->type) {
+ progress = true;
+ deref->type = new_type;
+ }
+ if (deref->deref_type == nir_deref_type_cast) {
+ /* See also glsl_type::get_explicit_type_for_size_align() */
+ unsigned new_stride = align(size, alignment);
+ if (new_stride != deref->cast.ptr_stride) {
+ deref->cast.ptr_stride = new_stride;
+ progress = true;
+ }
+ }
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance |
+ nir_metadata_live_ssa_defs |
+ nir_metadata_loop_analysis);
+ }
+
+ return progress;
+}
+
+static bool
+lower_vars_to_explicit(nir_shader *shader,
+ struct exec_list *vars, nir_variable_mode mode,
+ glsl_type_size_align_func type_info)
+{
+ bool progress = false;
+ unsigned offset;
+ switch (mode) {
+ case nir_var_function_temp:
+ case nir_var_shader_temp:
+ offset = shader->scratch_size;
+ break;
+ case nir_var_mem_shared:
+ offset = 0;
+ break;
+ default:
+ unreachable("Unsupported mode");
+ }
+ nir_foreach_variable_in_list(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
+ unsigned size, align;
+ const struct glsl_type *explicit_type =
+ glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
+
+ if (explicit_type != var->type) {
+ progress = true;
+ var->type = explicit_type;
+ }
+
+ var->data.driver_location = ALIGN_POT(offset, align);
+ offset = var->data.driver_location + size;
+ }
+
+ switch (mode) {
+ case nir_var_shader_temp:
+ case nir_var_function_temp:
+ shader->scratch_size = offset;
+ break;
+ case nir_var_mem_shared:
+ shader->info.cs.shared_size = offset;
+ shader->num_shared = offset;
+ break;
+ default:
+ unreachable("Unsupported mode");
+ }
+
+ return progress;
+}
+
+bool
+nir_lower_vars_to_explicit_types(nir_shader *shader,
+ nir_variable_mode modes,
+ glsl_type_size_align_func type_info)
+{
+ /* TODO: Situations which need to be handled to support more modes:
+ * - row-major matrices
+ * - compact shader inputs/outputs
+ * - interface types
+ */
+ ASSERTED nir_variable_mode supported = nir_var_mem_shared |
+ nir_var_shader_temp | nir_var_function_temp;
+ assert(!(modes & ~supported) && "unsupported");
+
+ bool progress = false;
+
+ if (modes & nir_var_mem_shared)
+ progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_mem_shared, type_info);
+ if (modes & nir_var_shader_temp)
+ progress |= lower_vars_to_explicit(shader, &shader->variables, nir_var_shader_temp, type_info);
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ if (modes & nir_var_function_temp)
+ progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
+
+ progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
+ }
+ }
+
+ return progress;
+}
+