+static nir_ssa_def *
+addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, unsigned size)
+{
+ assert(addr_format == nir_address_format_64bit_bounded_global);
+ assert(addr->num_components == 4);
+ return nir_ige(b, nir_channel(b, addr, 2),
+ nir_iadd_imm(b, nir_channel(b, addr, 3), size));
+}
+
+static nir_ssa_def *
+build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ unsigned num_components)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ubo:
+ op = nir_intrinsic_load_ubo;
+ break;
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_load_global;
+ else
+ op = nir_intrinsic_load_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global;
+ break;
+ case nir_var_shader_in:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_kernel_input;
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format == nir_address_format_32bit_offset);
+ op = nir_intrinsic_load_shared;
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
+
+ if (addr_format_is_global(addr_format)) {
+ load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format == nir_address_format_32bit_offset) {
+ assert(addr->num_components == 1);
+ load->src[0] = nir_src_for_ssa(addr);
+ } else {
+ load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
+ nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
+
+ unsigned bit_size = intrin->dest.ssa.bit_size;
+ if (bit_size == 1) {
+ /* TODO: Make the native bool bit_size an option. */
+ bit_size = 32;
+ }
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(load, bit_size / 8, 0);
+
+ assert(intrin->dest.is_ssa);
+ load->num_components = num_components;
+ nir_ssa_dest_init(&load->instr, &load->dest, num_components,
+ bit_size, intrin->dest.ssa.name);
+
+ assert(bit_size % 8 == 0);
+
+ nir_ssa_def *result;
+ if (addr_format_needs_bounds_check(addr_format)) {
+ /* The Vulkan spec for robustBufferAccess gives us quite a few options
+ * as to what we can do with an OOB read. Unfortunately, returning
+ * undefined values isn't one of them so we return an actual zero.
+ */
+ nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
+
+ const unsigned load_size = (bit_size / 8) * load->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
+
+ nir_builder_instr_insert(b, &load->instr);
+
+ nir_pop_if(b, NULL);
+
+ result = nir_if_phi(b, &load->dest.ssa, zero);
+ } else {
+ nir_builder_instr_insert(b, &load->instr);
+ result = &load->dest.ssa;
+ }
+
+ if (intrin->dest.ssa.bit_size == 1)
+ result = nir_i2b(b, result);
+
+ return result;
+}
+
+static void
+build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format,
+ nir_ssa_def *value, nir_component_mask_t write_mask)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_store_global;
+ else
+ op = nir_intrinsic_store_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_store_global;
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format == nir_address_format_32bit_offset);
+ op = nir_intrinsic_store_shared;
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
+
+ if (value->bit_size == 1) {
+ /* TODO: Make the native bool bit_size an option. */
+ value = nir_b2i(b, value, 32);
+ }
+
+ store->src[0] = nir_src_for_ssa(value);
+ if (addr_format_is_global(addr_format)) {
+ store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format == nir_address_format_32bit_offset) {
+ assert(addr->num_components == 1);
+ store->src[1] = nir_src_for_ssa(addr);
+ } else {
+ store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+
+ nir_intrinsic_set_write_mask(store, write_mask);
+
+ if (mode != nir_var_mem_shared)
+ nir_intrinsic_set_access(store, nir_intrinsic_access(intrin));
+
+ /* TODO: We should try and provide a better alignment. For OpenCL, we need
+ * to plumb the alignment through from SPIR-V when we have one.
+ */
+ nir_intrinsic_set_align(store, value->bit_size / 8, 0);
+
+ assert(value->num_components == 1 ||
+ value->num_components == intrin->num_components);
+ store->num_components = value->num_components;
+
+ assert(value->bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned store_size = (value->bit_size / 8) * store->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
+
+ nir_builder_instr_insert(b, &store->instr);
+
+ nir_pop_if(b, NULL);
+ } else {
+ nir_builder_instr_insert(b, &store->instr);
+ }
+}
+
+static nir_ssa_def *
+build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr, nir_address_format addr_format)
+{
+ nir_variable_mode mode = nir_src_as_deref(intrin->src[0])->mode;
+ const unsigned num_data_srcs =
+ nir_intrinsic_infos[intrin->intrinsic].num_srcs - 1;
+
+ nir_intrinsic_op op;
+ switch (mode) {
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = global_atomic_for_deref(intrin->intrinsic);
+ else
+ op = ssbo_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = global_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_shared:
+ assert(addr_format == nir_address_format_32bit_offset);
+ op = shared_atomic_for_deref(intrin->intrinsic);
+ break;
+ default:
+ unreachable("Unsupported explicit IO variable mode");
+ }
+
+ nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
+
+ unsigned src = 0;
+ if (addr_format_is_global(addr_format)) {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else if (addr_format == nir_address_format_32bit_offset) {
+ assert(addr->num_components == 1);
+ atomic->src[src++] = nir_src_for_ssa(addr);
+ } else {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
+ for (unsigned i = 0; i < num_data_srcs; i++) {
+ atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
+ }
+
+ /* Global atomics don't have access flags because they assume that the
+ * address may be non-uniform.
+ */
+ if (!addr_format_is_global(addr_format) && mode != nir_var_mem_shared)
+ nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
+
+ assert(intrin->dest.ssa.num_components == 1);
+ nir_ssa_dest_init(&atomic->instr, &atomic->dest,
+ 1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
+
+ assert(atomic->dest.ssa.bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
+
+ nir_builder_instr_insert(b, &atomic->instr);
+
+ nir_pop_if(b, NULL);
+ return nir_if_phi(b, &atomic->dest.ssa,
+ nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
+ } else {
+ nir_builder_instr_insert(b, &atomic->instr);
+ return &atomic->dest.ssa;
+ }
+}
+
+nir_ssa_def *
+nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_ssa_def *base_addr,
+ nir_address_format addr_format)
+{
+ assert(deref->dest.is_ssa);
+ switch (deref->deref_type) {
+ case nir_deref_type_var:
+ assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared));
+ return nir_imm_intN_t(b, deref->var->data.driver_location,
+ deref->dest.ssa.bit_size);
+
+ case nir_deref_type_array: {
+ nir_deref_instr *parent = nir_deref_instr_parent(deref);
+
+ unsigned stride = glsl_get_explicit_stride(parent->type);
+ if ((glsl_type_is_matrix(parent->type) &&
+ glsl_matrix_type_is_row_major(parent->type)) ||
+ (glsl_type_is_vector(parent->type) && stride == 0))
+ stride = type_scalar_size_bytes(parent->type);
+
+ assert(stride > 0);
+
+ nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = nir_i2i(b, index, base_addr->bit_size);
+ return build_addr_iadd(b, base_addr, addr_format,
+ nir_amul_imm(b, index, stride));
+ }
+
+ case nir_deref_type_ptr_as_array: {
+ nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
+ index = nir_i2i(b, index, base_addr->bit_size);
+ unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
+ return build_addr_iadd(b, base_addr, addr_format,
+ nir_amul_imm(b, index, stride));
+ }
+
+ case nir_deref_type_array_wildcard:
+ unreachable("Wildcards should be lowered by now");
+ break;
+
+ case nir_deref_type_struct: {
+ nir_deref_instr *parent = nir_deref_instr_parent(deref);
+ int offset = glsl_get_struct_field_offset(parent->type,
+ deref->strct.index);
+ assert(offset >= 0);
+ return build_addr_iadd_imm(b, base_addr, addr_format, offset);
+ }
+
+ case nir_deref_type_cast:
+ /* Nothing to do here */
+ return base_addr;
+ }
+
+ unreachable("Invalid NIR deref type");
+}
+
+void
+nir_lower_explicit_io_instr(nir_builder *b,
+ nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ b->cursor = nir_after_instr(&intrin->instr);
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ unsigned vec_stride = glsl_get_explicit_stride(deref->type);
+ unsigned scalar_size = type_scalar_size_bytes(deref->type);
+ assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
+ assert(vec_stride == 0 || vec_stride >= scalar_size);
+
+ if (intrin->intrinsic == nir_intrinsic_load_deref) {
+ nir_ssa_def *value;
+ if (vec_stride > scalar_size) {
+ nir_ssa_def *comps[4] = { NULL, };
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
+ vec_stride * i);
+ comps[i] = build_explicit_io_load(b, intrin, comp_addr,
+ addr_format, 1);
+ }
+ value = nir_vec(b, comps, intrin->num_components);
+ } else {
+ value = build_explicit_io_load(b, intrin, addr, addr_format,
+ intrin->num_components);
+ }
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(value));
+ } else if (intrin->intrinsic == nir_intrinsic_store_deref) {
+ assert(intrin->src[1].is_ssa);
+ nir_ssa_def *value = intrin->src[1].ssa;
+ nir_component_mask_t write_mask = nir_intrinsic_write_mask(intrin);
+ if (vec_stride > scalar_size) {
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+
+ nir_ssa_def *comp_addr = build_addr_iadd_imm(b, addr, addr_format,
+ vec_stride * i);
+ build_explicit_io_store(b, intrin, comp_addr, addr_format,
+ nir_channel(b, value, i), 1);
+ }
+ } else {
+ build_explicit_io_store(b, intrin, addr, addr_format,
+ value, write_mask);