struct lower_io_state {
void *dead_ctx;
nir_builder builder;
- int (*type_size)(const struct glsl_type *type);
+ int (*type_size)(const struct glsl_type *type, bool);
nir_variable_mode modes;
nir_lower_io_options options;
};
+static nir_intrinsic_op
+ssbo_atomic_for_deref(nir_intrinsic_op deref_op)
+{
+ switch (deref_op) {
+#define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
+ OP(atomic_exchange)
+ OP(atomic_comp_swap)
+ OP(atomic_add)
+ OP(atomic_imin)
+ OP(atomic_umin)
+ OP(atomic_imax)
+ OP(atomic_umax)
+ OP(atomic_and)
+ OP(atomic_or)
+ OP(atomic_xor)
+ OP(atomic_fadd)
+ OP(atomic_fmin)
+ OP(atomic_fmax)
+ OP(atomic_fcomp_swap)
+#undef OP
+ default:
+ unreachable("Invalid SSBO atomic");
+ }
+}
+
+static nir_intrinsic_op
+global_atomic_for_deref(nir_intrinsic_op deref_op)
+{
+ switch (deref_op) {
+#define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
+ OP(atomic_exchange)
+ OP(atomic_comp_swap)
+ OP(atomic_add)
+ OP(atomic_imin)
+ OP(atomic_umin)
+ OP(atomic_imax)
+ OP(atomic_umax)
+ OP(atomic_and)
+ OP(atomic_or)
+ OP(atomic_xor)
+ OP(atomic_fadd)
+ OP(atomic_fmin)
+ OP(atomic_fmax)
+ OP(atomic_fcomp_swap)
+#undef OP
+ default:
+ unreachable("Invalid SSBO atomic");
+ }
+}
+
void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *, bool))
{
unsigned location = 0;
* UBOs have their own address spaces, so don't count them towards the
* number of global uniforms
*/
- if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_ssbo)
+ if (var->data.mode == nir_var_mem_ubo || var->data.mode == nir_var_mem_ssbo)
continue;
var->data.driver_location = location;
- location += type_size(var->type);
+ bool bindless_type_size = var->data.mode == nir_var_shader_in ||
+ var->data.mode == nir_var_shader_out ||
+ var->data.bindless;
+ location += type_size(var->type, bindless_type_size);
}
*size = location;
static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def **vertex_index,
- int (*type_size)(const struct glsl_type *),
- unsigned *component)
+ int (*type_size)(const struct glsl_type *, bool),
+ unsigned *component, bool bts)
{
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
const unsigned total_offset = *component + index;
const unsigned slot_offset = total_offset / 4;
*component = total_offset % 4;
- return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset);
+ return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
}
/* Just emit code and let constant-folding go to town */
for (; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- unsigned size = type_size((*p)->type);
+ unsigned size = type_size((*p)->type, bts);
nir_ssa_def *mul =
nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
unsigned field_offset = 0;
for (unsigned i = 0; i < (*p)->strct.index; i++) {
- field_offset += type_size(glsl_get_struct_field(parent->type, i));
+ field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
}
offset = nir_iadd_imm(b, offset, field_offset);
} else {
static nir_intrinsic_instr *
lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
- unsigned component)
+ unsigned component, const struct glsl_type *type)
{
const nir_shader *nir = state->builder.shader;
nir_variable_mode mode = var->data.mode;
case nir_var_uniform:
op = nir_intrinsic_load_uniform;
break;
- case nir_var_shared:
+ case nir_var_mem_shared:
op = nir_intrinsic_load_shared;
break;
default:
nir_intrinsic_set_component(load, component);
if (load->intrinsic == nir_intrinsic_load_uniform)
- nir_intrinsic_set_range(load, state->type_size(var->type));
+ nir_intrinsic_set_range(load,
+ state->type_size(var->type, var->data.bindless));
+
+ if (load->intrinsic == nir_intrinsic_load_input ||
+ load->intrinsic == nir_intrinsic_load_uniform)
+ nir_intrinsic_set_type(load, nir_get_nir_type_for_glsl_type(type));
if (vertex_index) {
load->src[0] = nir_src_for_ssa(vertex_index);
static nir_intrinsic_instr *
lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
- unsigned component)
+ unsigned component, const struct glsl_type *type)
{
nir_variable_mode mode = var->data.mode;
nir_intrinsic_op op;
- if (mode == nir_var_shared) {
+ if (mode == nir_var_mem_shared) {
op = nir_intrinsic_store_shared;
} else {
assert(mode == nir_var_shader_out);
if (mode == nir_var_shader_out)
nir_intrinsic_set_component(store, component);
+ if (store->intrinsic == nir_intrinsic_store_output)
+ nir_intrinsic_set_type(store, nir_get_nir_type_for_glsl_type(type));
+
nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
if (vertex_index)
lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_variable *var, nir_ssa_def *offset)
{
- assert(var->data.mode == nir_var_shared);
+ assert(var->data.mode == nir_var_mem_shared);
nir_intrinsic_op op;
switch (intrin->intrinsic) {
static nir_intrinsic_instr *
lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
- nir_variable *var, nir_ssa_def *offset, unsigned component)
+ nir_variable *var, nir_ssa_def *offset, unsigned component,
+ const struct glsl_type *type)
{
assert(var->data.mode == nir_var_shader_in);
/* Ignore interpolateAt() for flat variables - flat is flat. */
if (var->data.interpolation == INTERP_MODE_FLAT)
- return lower_load(intrin, state, NULL, var, offset, component);
+ return lower_load(intrin, state, NULL, var, offset, component, type);
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {
if (mode != nir_var_shader_in &&
mode != nir_var_shader_out &&
- mode != nir_var_shared &&
+ mode != nir_var_mem_shared &&
mode != nir_var_uniform)
continue;
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
unsigned component_offset = var->data.location_frac;
+ bool bindless_type_size = mode == nir_var_shader_in ||
+ mode == nir_var_shader_out ||
+ var->data.bindless;
offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
- state->type_size, &component_offset);
+ state->type_size, &component_offset,
+ bindless_type_size);
nir_intrinsic_instr *replacement;
switch (intrin->intrinsic) {
case nir_intrinsic_load_deref:
replacement = lower_load(intrin, state, vertex_index, var, offset,
- component_offset);
+ component_offset, deref->type);
break;
case nir_intrinsic_store_deref:
replacement = lower_store(intrin, state, vertex_index, var, offset,
- component_offset);
+ component_offset, deref->type);
break;
case nir_intrinsic_deref_atomic_add:
case nir_intrinsic_interp_deref_at_offset:
assert(vertex_index == NULL);
replacement = lower_interpolate_at(intrin, state, var, offset,
- component_offset);
+ component_offset, deref->type);
break;
default:
static bool
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
struct lower_io_state state;
bool
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
bool progress = false;
assert(addr->bit_size == offset->bit_size);
switch (addr_format) {
- case nir_address_format_vk_index_offset:
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ case nir_address_format_32bit_offset:
+ assert(addr->num_components == 1);
+ return nir_iadd(b, addr, offset);
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ return nir_vec4(b, nir_channel(b, addr, 0),
+ nir_channel(b, addr, 1),
+ nir_channel(b, addr, 2),
+ nir_iadd(b, nir_channel(b, addr, 3), offset));
+
+ case nir_address_format_32bit_index_offset:
assert(addr->num_components == 2);
return nir_vec2(b, nir_channel(b, addr, 0),
nir_iadd(b, nir_channel(b, addr, 1), offset));
+ case nir_address_format_logical:
+ unreachable("Unsupported address format");
}
unreachable("Invalid address format");
}
addr_to_index(nir_builder *b, nir_ssa_def *addr,
nir_address_format addr_format)
{
- assert(addr_format == nir_address_format_vk_index_offset);
+ assert(addr_format == nir_address_format_32bit_index_offset);
assert(addr->num_components == 2);
return nir_channel(b, addr, 0);
}
addr_to_offset(nir_builder *b, nir_ssa_def *addr,
nir_address_format addr_format)
{
- assert(addr_format == nir_address_format_vk_index_offset);
+ assert(addr_format == nir_address_format_32bit_index_offset);
assert(addr->num_components == 2);
return nir_channel(b, addr, 1);
}
+/** Returns true if the given address format resolves to a global address */
+static bool
+addr_format_is_global(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_32bit_global ||
+ addr_format == nir_address_format_64bit_global ||
+ addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static nir_ssa_def *
+addr_to_global(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global:
+ assert(addr->num_components == 1);
+ return addr;
+
+ case nir_address_format_64bit_bounded_global:
+ assert(addr->num_components == 4);
+ return nir_iadd(b, nir_pack_64_2x32(b, nir_channels(b, addr, 0x3)),
+ nir_u2u64(b, nir_channel(b, addr, 3)));
+
+ case nir_address_format_32bit_index_offset:
+ case nir_address_format_32bit_offset:
+ case nir_address_format_logical:
+ unreachable("Cannot get a 64-bit address with this address format");
+ }
+
+ unreachable("Invalid address format");
+}
+
+static bool
+addr_format_needs_bounds_check(nir_address_format addr_format)
+{
+ return addr_format == nir_address_format_64bit_bounded_global;
+}
+
+static nir_ssa_def *
+addr_is_in_bounds(nir_builder *b, nir_ssa_def *addr,
+ nir_address_format addr_format, unsigned size)
+{
+ assert(addr_format == nir_address_format_64bit_bounded_global);
+ assert(addr->num_components == 4);
+ return nir_ige(b, nir_channel(b, addr, 2),
+ nir_iadd_imm(b, nir_channel(b, addr, 3), size));
+}
+
static nir_ssa_def *
build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin,
nir_ssa_def *addr, nir_address_format addr_format,
case nir_var_mem_ubo:
op = nir_intrinsic_load_ubo;
break;
- case nir_var_ssbo:
- op = nir_intrinsic_load_ssbo;
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_load_global;
+ else
+ op = nir_intrinsic_load_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global;
+ break;
+ case nir_var_shader_in:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_kernel_input;
break;
default:
unreachable("Unsupported explicit IO variable mode");
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
- load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
- load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ if (addr_format_is_global(addr_format)) {
+ load->src[0] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ load->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
- if (mode != nir_var_mem_ubo)
+ if (mode != nir_var_mem_ubo && mode != nir_var_shader_in)
nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
/* TODO: We should try and provide a better alignment. For OpenCL, we need
load->num_components = num_components;
nir_ssa_dest_init(&load->instr, &load->dest, num_components,
intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
- nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ assert(load->dest.ssa.bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ /* The Vulkan spec for robustBufferAccess gives us quite a few options
+ * as to what we can do with an OOB read. Unfortunately, returning
+ * undefined values isn't one of them so we return an actual zero.
+ */
+ nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
+ load->dest.ssa.bit_size);
+
+ const unsigned load_size =
+ (load->dest.ssa.bit_size / 8) * load->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
+
+ nir_builder_instr_insert(b, &load->instr);
+
+ nir_pop_if(b, NULL);
+
+ return nir_if_phi(b, &load->dest.ssa, zero);
+ } else {
+ nir_builder_instr_insert(b, &load->instr);
+ return &load->dest.ssa;
+ }
}
static void
nir_intrinsic_op op;
switch (mode) {
- case nir_var_ssbo:
- op = nir_intrinsic_store_ssbo;
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = nir_intrinsic_store_global;
+ else
+ op = nir_intrinsic_store_ssbo;
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_store_global;
break;
default:
unreachable("Unsupported explicit IO variable mode");
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
store->src[0] = nir_src_for_ssa(value);
- store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
- store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ if (addr_format_is_global(addr_format)) {
+ store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ store->src[1] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ store->src[2] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
nir_intrinsic_set_write_mask(store, write_mask);
assert(value->num_components == 1 ||
value->num_components == intrin->num_components);
store->num_components = value->num_components;
- nir_builder_instr_insert(b, &store->instr);
+
+ assert(value->bit_size % 8 == 0);
+
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned store_size = (value->bit_size / 8) * store->num_components;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size));
+
+ nir_builder_instr_insert(b, &store->instr);
+
+ nir_pop_if(b, NULL);
+ } else {
+ nir_builder_instr_insert(b, &store->instr);
+ }
}
static nir_ssa_def *
nir_intrinsic_op op;
switch (mode) {
- case nir_var_ssbo:
- switch (intrin->intrinsic) {
-#define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_ssbo_##O; break;
- OP(atomic_exchange)
- OP(atomic_comp_swap)
- OP(atomic_add)
- OP(atomic_imin)
- OP(atomic_umin)
- OP(atomic_imax)
- OP(atomic_umax)
- OP(atomic_and)
- OP(atomic_or)
- OP(atomic_xor)
- OP(atomic_fadd)
- OP(atomic_fmin)
- OP(atomic_fmax)
- OP(atomic_fcomp_swap)
-#undef OP
- default:
- unreachable("Invalid SSBO atomic");
- }
+ case nir_var_mem_ssbo:
+ if (addr_format_is_global(addr_format))
+ op = global_atomic_for_deref(intrin->intrinsic);
+ else
+ op = ssbo_atomic_for_deref(intrin->intrinsic);
+ break;
+ case nir_var_mem_global:
+ assert(addr_format_is_global(addr_format));
+ op = global_atomic_for_deref(intrin->intrinsic);
break;
default:
unreachable("Unsupported explicit IO variable mode");
nir_intrinsic_instr *atomic = nir_intrinsic_instr_create(b->shader, op);
- atomic->src[0] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
- atomic->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ unsigned src = 0;
+ if (addr_format_is_global(addr_format)) {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
+ } else {
+ atomic->src[src++] = nir_src_for_ssa(addr_to_index(b, addr, addr_format));
+ atomic->src[src++] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
+ }
for (unsigned i = 0; i < num_data_srcs; i++) {
- assert(intrin->src[1 + i].is_ssa);
- atomic->src[2 + i] = nir_src_for_ssa(intrin->src[1 + i].ssa);
+ atomic->src[src++] = nir_src_for_ssa(intrin->src[1 + i].ssa);
}
+ /* Global atomics don't have access flags because they assume that the
+ * address may be non-uniform.
+ */
+ if (!addr_format_is_global(addr_format))
+ nir_intrinsic_set_access(atomic, nir_intrinsic_access(intrin));
+
assert(intrin->dest.ssa.num_components == 1);
nir_ssa_dest_init(&atomic->instr, &atomic->dest,
1, intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
- nir_builder_instr_insert(b, &atomic->instr);
- return &atomic->dest.ssa;
-}
+ assert(atomic->dest.ssa.bit_size % 8 == 0);
-static void
-lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
- nir_address_format addr_format)
-{
- /* Just delete the deref if it's not used. We can't use
- * nir_deref_instr_remove_if_unused here because it may remove more than
- * one deref which could break our list walking since we walk the list
- * backwards.
- */
- assert(list_empty(&deref->dest.ssa.if_uses));
- if (list_empty(&deref->dest.ssa.uses)) {
- nir_instr_remove(&deref->instr);
- return;
- }
+ if (addr_format_needs_bounds_check(addr_format)) {
+ const unsigned atomic_size = atomic->dest.ssa.bit_size / 8;
+ nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size));
- b->cursor = nir_after_instr(&deref->instr);
+ nir_builder_instr_insert(b, &atomic->instr);
- /* Var derefs must be lowered away by the driver */
- assert(deref->deref_type != nir_deref_type_var);
-
- assert(deref->parent.is_ssa);
- nir_ssa_def *parent_addr = deref->parent.ssa;
+ nir_pop_if(b, NULL);
+ return nir_if_phi(b, &atomic->dest.ssa,
+ nir_ssa_undef(b, 1, atomic->dest.ssa.bit_size));
+ } else {
+ nir_builder_instr_insert(b, &atomic->instr);
+ return &atomic->dest.ssa;
+ }
+}
- nir_ssa_def *addr = NULL;
+nir_ssa_def *
+nir_explicit_io_address_from_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_ssa_def *base_addr,
+ nir_address_format addr_format)
+{
assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
- unreachable("Must be lowered by the driver");
- break;
+ assert(deref->mode == nir_var_shader_in);
+ return nir_imm_intN_t(b, deref->var->data.driver_location,
+ deref->dest.ssa.bit_size);
case nir_deref_type_array: {
nir_deref_instr *parent = nir_deref_instr_parent(deref);
assert(stride > 0);
nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
- index = nir_i2i(b, index, parent_addr->bit_size);
- addr = build_addr_iadd(b, parent_addr, addr_format,
+ index = nir_i2i(b, index, base_addr->bit_size);
+ return build_addr_iadd(b, base_addr, addr_format,
nir_imul_imm(b, index, stride));
- break;
}
case nir_deref_type_ptr_as_array: {
nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
- index = nir_i2i(b, index, parent_addr->bit_size);
+ index = nir_i2i(b, index, base_addr->bit_size);
unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
- addr = build_addr_iadd(b, parent_addr, addr_format,
+ return build_addr_iadd(b, base_addr, addr_format,
nir_imul_imm(b, index, stride));
- break;
}
case nir_deref_type_array_wildcard:
int offset = glsl_get_struct_field_offset(parent->type,
deref->strct.index);
assert(offset >= 0);
- addr = build_addr_iadd_imm(b, parent_addr, addr_format, offset);
- break;
+ return build_addr_iadd_imm(b, base_addr, addr_format, offset);
}
case nir_deref_type_cast:
/* Nothing to do here */
- addr = parent_addr;
- break;
+ return base_addr;
}
- nir_instr_remove(&deref->instr);
- nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
+ unreachable("Invalid NIR deref type");
}
-static void
-lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
- nir_address_format addr_format)
+void
+nir_lower_explicit_io_instr(nir_builder *b,
+ nir_intrinsic_instr *intrin,
+ nir_ssa_def *addr,
+ nir_address_format addr_format)
{
b->cursor = nir_after_instr(&intrin->instr);
assert(vec_stride == 0 || glsl_type_is_vector(deref->type));
assert(vec_stride == 0 || vec_stride >= scalar_size);
- nir_ssa_def *addr = &deref->dest.ssa;
if (intrin->intrinsic == nir_intrinsic_load_deref) {
nir_ssa_def *value;
if (vec_stride > scalar_size) {
nir_instr_remove(&intrin->instr);
}
+static void
+lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
+ nir_address_format addr_format)
+{
+ /* Just delete the deref if it's not used. We can't use
+ * nir_deref_instr_remove_if_unused here because it may remove more than
+ * one deref which could break our list walking since we walk the list
+ * backwards.
+ */
+ assert(list_empty(&deref->dest.ssa.if_uses));
+ if (list_empty(&deref->dest.ssa.uses)) {
+ nir_instr_remove(&deref->instr);
+ return;
+ }
+
+ b->cursor = nir_after_instr(&deref->instr);
+
+ nir_ssa_def *base_addr = NULL;
+ if (deref->deref_type != nir_deref_type_var) {
+ assert(deref->parent.is_ssa);
+ base_addr = deref->parent.ssa;
+ }
+
+ nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
+ addr_format);
+
+ nir_instr_remove(&deref->instr);
+ nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
+}
+
+static void
+lower_explicit_io_access(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_address_format addr_format)
+{
+ assert(intrin->src[0].is_ssa);
+ nir_lower_explicit_io_instr(b, intrin, intrin->src[0].ssa, addr_format);
+}
+
+static void
+lower_explicit_io_array_length(nir_builder *b, nir_intrinsic_instr *intrin,
+ nir_address_format addr_format)
+{
+ b->cursor = nir_after_instr(&intrin->instr);
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+
+ assert(glsl_type_is_array(deref->type));
+ assert(glsl_get_length(deref->type) == 0);
+ unsigned stride = glsl_get_explicit_stride(deref->type);
+ assert(stride > 0);
+
+ assert(addr_format == nir_address_format_32bit_index_offset);
+ nir_ssa_def *addr = &deref->dest.ssa;
+ nir_ssa_def *index = addr_to_index(b, addr, addr_format);
+ nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
+
+ nir_intrinsic_instr *bsize =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_get_buffer_size);
+ bsize->src[0] = nir_src_for_ssa(index);
+ nir_ssa_dest_init(&bsize->instr, &bsize->dest, 1, 32, NULL);
+ nir_builder_instr_insert(b, &bsize->instr);
+
+ nir_ssa_def *arr_size =
+ nir_idiv(b, nir_isub(b, &bsize->dest.ssa, offset),
+ nir_imm_int(b, stride));
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(arr_size));
+ nir_instr_remove(&intrin->instr);
+}
+
static bool
nir_lower_explicit_io_impl(nir_function_impl *impl, nir_variable_mode modes,
nir_address_format addr_format)
break;
}
+ case nir_intrinsic_deref_buffer_array_length: {
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ if (deref->mode & modes) {
+ lower_explicit_io_array_length(&b, intrin, addr_format);
+ progress = true;
+ }
+ break;
+ }
+
default:
break;
}
case nir_intrinsic_load_output:
case nir_intrinsic_load_shared:
case nir_intrinsic_load_uniform:
+ case nir_intrinsic_load_global:
+ case nir_intrinsic_load_scratch:
return &instr->src[0];
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_interpolated_input:
case nir_intrinsic_store_output:
case nir_intrinsic_store_shared:
+ case nir_intrinsic_store_global:
+ case nir_intrinsic_store_scratch:
return &instr->src[1];
case nir_intrinsic_store_ssbo:
case nir_intrinsic_store_per_vertex_output:
return NULL;
}
}
+
+/**
+ * Return the numeric constant that identify a NULL pointer for each address
+ * format.
+ */
+const nir_const_value *
+nir_address_format_null_value(nir_address_format addr_format)
+{
+ const static nir_const_value null_values[][NIR_MAX_VEC_COMPONENTS] = {
+ [nir_address_format_32bit_global] = {{0}},
+ [nir_address_format_64bit_global] = {{0}},
+ [nir_address_format_64bit_bounded_global] = {{0}},
+ [nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
+ [nir_address_format_32bit_offset] = {{.u32 = ~0}},
+ [nir_address_format_logical] = {{.u32 = ~0}},
+ };
+
+ assert(addr_format < ARRAY_SIZE(null_values));
+ return null_values[addr_format];
+}