#include "nir_builder.h"
#include "nir_deref.h"
+#include "util/u_math.h"
+
struct lower_io_state {
void *dead_ctx;
nir_builder builder;
unsigned size = type_size((*p)->type, bts);
nir_ssa_def *mul =
- nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
+ nir_amul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
offset = nir_iadd(b, offset, mul);
} else if ((*p)->deref_type == nir_deref_type_struct) {
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
nir->options->use_interpolated_input_intrinsics &&
var->data.interpolation != INTERP_MODE_FLAT) {
- assert(vertex_index == NULL);
-
- nir_intrinsic_op bary_op;
- if (var->data.sample ||
- (state->options & nir_lower_io_force_sample_interpolation))
- bary_op = nir_intrinsic_load_barycentric_sample;
- else if (var->data.centroid)
- bary_op = nir_intrinsic_load_barycentric_centroid;
- else
- bary_op = nir_intrinsic_load_barycentric_pixel;
-
- barycentric = nir_load_barycentric(&state->builder, bary_op,
- var->data.interpolation);
- op = nir_intrinsic_load_interpolated_input;
+ if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
+ assert(vertex_index != NULL);
+ op = nir_intrinsic_load_input_vertex;
+ } else {
+ assert(vertex_index == NULL);
+
+ nir_intrinsic_op bary_op;
+ if (var->data.sample ||
+ (state->options & nir_lower_io_force_sample_interpolation))
+ bary_op = nir_intrinsic_load_barycentric_sample;
+ else if (var->data.centroid)
+ bary_op = nir_intrinsic_load_barycentric_centroid;
+ else
+ bary_op = nir_intrinsic_load_barycentric_pixel;
+
+ barycentric = nir_load_barycentric(&state->builder, bary_op,
+ var->data.interpolation);
+ op = nir_intrinsic_load_interpolated_input;
+ }
} else {
op = vertex_index ? nir_intrinsic_load_per_vertex_input :
nir_intrinsic_load_input;
state->type_size(var->type, var->data.bindless));
if (load->intrinsic == nir_intrinsic_load_input ||
+ load->intrinsic == nir_intrinsic_load_input_vertex ||
load->intrinsic == nir_intrinsic_load_uniform)
nir_intrinsic_set_type(load, type);
}
return nir_vec(b, comp64, intrin->dest.ssa.num_components);
+ } else if (intrin->dest.ssa.bit_size == 1) {
+ /* Booleans are 32-bit */
+ assert(glsl_type_is_boolean(type));
+ return nir_b2b1(&state->builder,
+ emit_load(state, vertex_index, var, offset, component,
+ intrin->dest.ssa.num_components, 32,
+ nir_type_bool32));
} else {
return emit_load(state, vertex_index, var, offset, component,
intrin->dest.ssa.num_components,
write_mask >>= num_comps;
offset = nir_iadd_imm(b, offset, slot_size);
}
+ } else if (intrin->dest.ssa.bit_size == 1) {
+ /* Booleans are 32-bit */
+ assert(glsl_type_is_boolean(type));
+ nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
+ emit_store(state, b32_val, vertex_index, var, offset,
+ component, intrin->num_components,
+ nir_intrinsic_write_mask(intrin),
+ nir_type_bool32);
} else {
emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
component, intrin->num_components,
nir_builder *b = &state->builder;
assert(var->data.mode == nir_var_shader_in);
- /* Ignore interpolateAt() for flat variables - flat is flat. */
- if (var->data.interpolation == INTERP_MODE_FLAT)
- return lower_load(intrin, state, NULL, var, offset, component, type);
+ /* Ignore interpolateAt() for flat variables - flat is flat. Lower
+ * interpolateAtVertex() for explicit variables.
+ */
+ if (var->data.interpolation == INTERP_MODE_FLAT ||
+ var->data.interpolation == INTERP_MODE_EXPLICIT) {
+ nir_ssa_def *vertex_index = NULL;
+
+ if (var->data.interpolation == INTERP_MODE_EXPLICIT) {
+ assert(intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex);
+ vertex_index = intrin->src[1].ssa;
+ }
+
+ return lower_load(intrin, state, vertex_index, var, offset, component, type);
+ }
/* None of the supported APIs allow interpolation on 64-bit things */
assert(intrin->dest.is_ssa && intrin->dest.ssa.bit_size <= 32);
nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
- intrin->intrinsic == nir_intrinsic_interp_deref_at_offset)
+ intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
+ intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup);
nir_builder_instr_insert(b, &bary_setup->instr);
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
case nir_intrinsic_interp_deref_at_offset:
+ case nir_intrinsic_interp_deref_at_vertex:
/* We can optionally lower these to load_interpolated_input */
if (options->use_interpolated_input_intrinsics)
break;
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
case nir_intrinsic_interp_deref_at_offset:
+ case nir_intrinsic_interp_deref_at_vertex:
assert(vertex_index == NULL);
replacement = lower_interpolate_at(intrin, state, var, offset,
component_offset, deref->type);
if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
+ unsigned bit_size = intrin->dest.ssa.bit_size;
+ if (bit_size == 1) {
+ /* TODO: Make the native bool bit_size an option. */
+ bit_size = 32;
+ }
+
/* TODO: We should try and provide a better alignment. For OpenCL, we need
* to plumb the alignment through from SPIR-V when we have one.
*/
- nir_intrinsic_set_align(load, intrin->dest.ssa.bit_size / 8, 0);
+ nir_intrinsic_set_align(load, bit_size / 8, 0);
assert(intrin->dest.is_ssa);
load->num_components = num_components;
nir_ssa_dest_init(&load->instr, &load->dest, num_components,
- intrin->dest.ssa.bit_size, intrin->dest.ssa.name);
+ bit_size, intrin->dest.ssa.name);
- assert(load->dest.ssa.bit_size % 8 == 0);
+ assert(bit_size % 8 == 0);
+ nir_ssa_def *result;
if (addr_format_needs_bounds_check(addr_format)) {
/* The Vulkan spec for robustBufferAccess gives us quite a few options
* as to what we can do with an OOB read. Unfortunately, returning
* undefined values isn't one of them so we return an actual zero.
*/
- nir_ssa_def *zero = nir_imm_zero(b, load->num_components,
- load->dest.ssa.bit_size);
+ nir_ssa_def *zero = nir_imm_zero(b, load->num_components, bit_size);
- const unsigned load_size =
- (load->dest.ssa.bit_size / 8) * load->num_components;
+ const unsigned load_size = (bit_size / 8) * load->num_components;
nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size));
nir_builder_instr_insert(b, &load->instr);
nir_pop_if(b, NULL);
- return nir_if_phi(b, &load->dest.ssa, zero);
+ result = nir_if_phi(b, &load->dest.ssa, zero);
} else {
nir_builder_instr_insert(b, &load->instr);
- return &load->dest.ssa;
+ result = &load->dest.ssa;
}
+
+ if (intrin->dest.ssa.bit_size == 1) {
+ /* For shared, we can go ahead and use NIR's and/or the back-end's
+ * standard encoding for booleans rather than forcing a 0/1 boolean.
+ * This should save an instruction or two.
+ */
+ if (mode == nir_var_mem_shared)
+ result = nir_b2b1(b, result);
+ else
+ result = nir_i2b(b, result);
+ }
+
+ return result;
}
static void
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
+ if (value->bit_size == 1) {
+ /* For shared, we can go ahead and use NIR's and/or the back-end's
+ * standard encoding for booleans rather than forcing a 0/1 boolean.
+ * This should save an instruction or two.
+ *
+ * TODO: Make the native bool bit_size an option.
+ */
+ if (mode == nir_var_mem_shared)
+ value = nir_b2b32(b, value);
+ else
+ value = nir_b2i(b, value, 32);
+ }
+
store->src[0] = nir_src_for_ssa(value);
if (addr_format_is_global(addr_format)) {
store->src[1] = nir_src_for_ssa(addr_to_global(b, addr, addr_format));
nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
index = nir_i2i(b, index, base_addr->bit_size);
return build_addr_iadd(b, base_addr, addr_format,
- nir_imul_imm(b, index, stride));
+ nir_amul_imm(b, index, stride));
}
case nir_deref_type_ptr_as_array: {
index = nir_i2i(b, index, base_addr->bit_size);
unsigned stride = nir_deref_instr_ptr_as_array_stride(deref);
return build_addr_iadd(b, base_addr, addr_format,
- nir_imul_imm(b, index, stride));
+ nir_amul_imm(b, index, stride));
}
case nir_deref_type_array_wildcard:
* one deref which could break our list walking since we walk the list
* backwards.
*/
- assert(list_empty(&deref->dest.ssa.if_uses));
- if (list_empty(&deref->dest.ssa.uses)) {
+ assert(list_is_empty(&deref->dest.ssa.if_uses));
+ if (list_is_empty(&deref->dest.ssa.uses)) {
nir_instr_remove(&deref->instr);
return;
}
return progress;
}
+static bool
+nir_lower_vars_to_explicit_types_impl(nir_function_impl *impl,
+ nir_variable_mode modes,
+ glsl_type_size_align_func type_info)
+{
+ bool progress = false;
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_deref)
+ continue;
+
+ nir_deref_instr *deref = nir_instr_as_deref(instr);
+ if (!(deref->mode & modes))
+ continue;
+
+ unsigned size, alignment;
+ const struct glsl_type *new_type =
+ glsl_get_explicit_type_for_size_align(deref->type, type_info, &size, &alignment);
+ if (new_type != deref->type) {
+ progress = true;
+ deref->type = new_type;
+ }
+ if (deref->deref_type == nir_deref_type_cast) {
+ /* See also glsl_type::get_explicit_type_for_size_align() */
+ unsigned new_stride = align(size, alignment);
+ if (new_stride != deref->cast.ptr_stride) {
+ deref->cast.ptr_stride = new_stride;
+ progress = true;
+ }
+ }
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance |
+ nir_metadata_live_ssa_defs |
+ nir_metadata_loop_analysis);
+ }
+
+ return progress;
+}
+
+static bool
+lower_vars_to_explicit(nir_shader *shader,
+ struct exec_list *vars, nir_variable_mode mode,
+ glsl_type_size_align_func type_info)
+{
+ bool progress = false;
+ unsigned offset = 0;
+ nir_foreach_variable(var, vars) {
+ unsigned size, align;
+ const struct glsl_type *explicit_type =
+ glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
+
+ if (explicit_type != var->type) {
+ progress = true;
+ var->type = explicit_type;
+ }
+
+ var->data.driver_location = ALIGN_POT(offset, align);
+ offset = var->data.driver_location + size;
+ }
+
+ if (mode == nir_var_mem_shared) {
+ shader->info.cs.shared_size = offset;
+ shader->num_shared = offset;
+ }
+
+ return progress;
+}
+
+bool
+nir_lower_vars_to_explicit_types(nir_shader *shader,
+ nir_variable_mode modes,
+ glsl_type_size_align_func type_info)
+{
+ /* TODO: Situations which need to be handled to support more modes:
+ * - row-major matrices
+ * - compact shader inputs/outputs
+ * - interface types
+ */
+ ASSERTED nir_variable_mode supported = nir_var_mem_shared |
+ nir_var_shader_temp | nir_var_function_temp;
+ assert(!(modes & ~supported) && "unsupported");
+
+ bool progress = false;
+
+ if (modes & nir_var_mem_shared)
+ progress |= lower_vars_to_explicit(shader, &shader->shared, nir_var_mem_shared, type_info);
+ if (modes & nir_var_shader_temp)
+ progress |= lower_vars_to_explicit(shader, &shader->globals, nir_var_shader_temp, type_info);
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ if (modes & nir_var_function_temp)
+ progress |= lower_vars_to_explicit(shader, &function->impl->locals, nir_var_function_temp, type_info);
+
+ progress |= nir_lower_vars_to_explicit_types_impl(function->impl, modes, type_info);
+ }
+ }
+
+ return progress;
+}
+
/**
* Return the offset source for a load/store intrinsic.
*/
case nir_intrinsic_store_shared:
case nir_intrinsic_store_global:
case nir_intrinsic_store_scratch:
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_fadd:
+ case nir_intrinsic_ssbo_atomic_fmin:
+ case nir_intrinsic_ssbo_atomic_fmax:
+ case nir_intrinsic_ssbo_atomic_fcomp_swap:
return &instr->src[1];
case nir_intrinsic_store_ssbo:
case nir_intrinsic_store_per_vertex_output: