+ int (*type_size)(const struct glsl_type *),
+ nir_lower_io_options options)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ progress |= nir_lower_io_impl(function->impl, modes,
+ type_size, options);
+ }
+ }
+
+ return progress;
+}
+
+static unsigned
+type_scalar_size_bytes(const struct glsl_type *type)
+{
+ assert(glsl_type_is_vector_or_scalar(type) ||
+ glsl_type_is_matrix(type));
+ return glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
+}
+
+static nir_ssa_def *
+build_addr_iadd(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, nir_ssa_def *offset)
+{
+ assert(offset->num_components == 1);
+ assert(addr->bit_size == offset->bit_size);
+
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ assert(addr->num_components == 1);
+ return nir_iadd(b, addr, offset);
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ return nir_vec4(b, nir_channel(b, addr, 0),
+ nir_channel(b, addr, 1),
+ nir_channel(b, addr, 2),
+ nir_iadd(b, nir_channel(b, addr, 3), offset));
+
+ case nir_address_format_32bit_index_offset:
+ assert(addr->num_components == 2);
+ return nir_vec2(b, nir_channel(b, addr, 0),
+ nir_iadd(b, nir_channel(b, addr, 1), offset));
+ }
+ unreachable("Invalid address format");
+}
+
+static nir_ssa_def *
+build_addr_iadd_imm(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, int64_t offset)
+{
+ return build_addr_iadd(b, addr, addr_format,
+ nir_imm_intN_t(b, offset, addr->bit_size));
+}
+
+static nir_ssa_def *
+addr_to_index(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ assert(addr_format == nir_address_format_32bit_index_offset);
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 0);
+}
+
+static nir_ssa_def *
+addr_to_offset(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ assert(addr_format == nir_address_format_32bit_index_offset);
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 1);
+}
+
+/** Returns true if the given address format resolves to a global address */
+static bool
+addr_format_is_global(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_32bit_global ||
+ addr_format == nir_address_format_64bit_global ||
+ addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static nir_ssa_def *
+addr_to_global(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ assert(addr->num_components == 1);
+ return addr;
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
+ nir_u2u64(b, nir_channel(b, addr, 3)));
+
+ case nir_address_format_32bit_index_offset:
+ unreachable("Cannot get a 64-bit address with this address format");
+ }
+
+ unreachable("Invalid address format");
+}
+
+static bool
+addr_format_needs_bounds_check(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static nir_ssa_def *
+addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, unsigned size)
+{
+ assert(addr_format == nir_address_format_64bit_bounded_global);
+ assert(addr->num_components == 4);
+ return nir_ige(b, nir_channel(b, addr, 2),
+ nir_iadd_imm(b, nir_channel(b, addr, 3), size));
+}
+
+static nir_ssa_def *
+build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ unsigned num_components)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ubo:
+ op = nir_intrinsic_load_ubo;
+ break;
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_load_global;
+ else
+ op = nir_intrinsic_load_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global;
+ break;
+ case nir_var_shader_in:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_kernel_input;
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
+
+ if (addr_format_is_global(addr_format)) {
+ load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
+ nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
+
+ assert(intrin->dest.is_ssa);
+ load->num_components = num_components;
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components,
+ intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
+
+ assert(load->dest.ssa.bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ /* The Vulkan spec for robustBufferAccess gives us quite a few options
+ * as to what we can do with an OOB read. Unfortunately, returning
+ * undefined values isn't one of them so we return an actual zero.
+ */
+ nir_const_value zero_val;
+ memset(&zero_val, 0, sizeof(zero_val));
+ nir_ssa_def *zero = nir_build_imm(b, load->num_components,
+ load->dest.ssa.bit_size, zero_val);
+
+ const unsigned load_size =
+ (load->dest.ssa.bit_size / 8) * load->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
+
+ nir_builder_instr_insert(b, &load->instr);
+
+ nir_pop_if(b, NULL);
+
+ return nir_if_phi(b, &load->dest.ssa, zero);
+ } else {
+ nir_builder_instr_insert(b, &load->instr);
+ return &load->dest.ssa;
+ }
+}
+
+static void
+build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ nir_ssa_def *value, nir_component_mask_t write_mask)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_store_global;
+ else
+ op = nir_intrinsic_store_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_store_global;
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
+
+ store->src[0] = nir_src_for_ssa(value);
+ if (addr_format_is_global(addr_format)) {
+ store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ nir_intrinsic_set_write_mask(store, write_mask);
+
+ nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(store, value->bit_size / 8, 0);
+
+ assert(value->num_components == 1 ||
+ value->num_components == intrin->num_components);
+ store->num_components = value->num_components;
+
+ assert(value->bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned store_size = (value->bit_size / 8) * store->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
+
+ nir_builder_instr_insert(b, &store->instr);
+
+ nir_pop_if(b, NULL);
+ } else {
+ nir_builder_instr_insert(b, &store->instr);
+ }
+}
+
+static nir_ssa_def *
+build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+ const unsigned num_data_srcs =
+ nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = global_atomic_for_deref(intrin->intrinsic);
+ else
+ op = ssbo_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = global_atomic_for_deref(intrin->intrinsic);
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
+
+ unsigned src = 0;
+ if (addr_format_is_global(addr_format)) {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+ for (unsigned i = 0; i < num_data_srcs; i++) {
+ atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
+ }
+
+ /* Global atomics don't have access flags because they assume that the
+ * address may be non-uniform.
+ */
+ if (!addr_format_is_global(addr_format))
+ nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
+
+ assert(intrin->dest.ssa.num_components == 1);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest,
+ 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
+
+ assert(atomic->dest.ssa.bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
+
+ nir_builder_instr_insert(b, &atomic->instr);
+
+ nir_pop_if(b, NULL);
+ return nir_if_phi(b, &atomic->dest.ssa,
+ nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
+ } else {
+ nir_builder_instr_insert(b, &atomic->instr);
+ return &atomic->dest.ssa;
+ }
+}
+
+static void
+lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_address_format addr_format)