return false;
}
+static unsigned get_number_of_slots(struct lower_io_state *state,
+ const nir_variable *var)
+{
+ const struct glsl_type *type = var->type;
+
+ if (nir_is_per_vertex_io(var, state->builder.shader->info.stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ return state->type_size(type, var->data.bindless);
+}
+
static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def **vertex_index,
load->intrinsic == nir_intrinsic_load_uniform)
nir_intrinsic_set_type(load, type);
+ if (load->intrinsic != nir_intrinsic_load_uniform) {
+ nir_io_semantics semantics = {0};
+ semantics.location = var->data.location;
+ semantics.num_slots = get_number_of_slots(state, var);
+ semantics.fb_fetch_output = var->data.fb_fetch_output;
+ nir_intrinsic_set_io_semantics(load, semantics);
+ }
+
if (vertex_index) {
load->src[0] = nir_src_for_ssa(vertex_index);
load->src[1] = nir_src_for_ssa(offset);
store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset);
+ unsigned gs_streams = 0;
+ if (state->builder.shader->info.stage == MESA_SHADER_GEOMETRY) {
+ if (var->data.stream & NIR_STREAM_PACKED) {
+ gs_streams = var->data.stream & ~NIR_STREAM_PACKED;
+ } else {
+ assert(var->data.stream < 4);
+ gs_streams = 0;
+ for (unsigned i = 0; i < num_components; ++i)
+ gs_streams |= var->data.stream << (2 * i);
+ }
+ }
+
+ nir_io_semantics semantics = {0};
+ semantics.location = var->data.location;
+ semantics.num_slots = get_number_of_slots(state, var);
+ semantics.dual_source_blend_index = var->data.index;
+ semantics.gs_streams = gs_streams;
+ nir_intrinsic_set_io_semantics(store, semantics);
+
nir_builder_instr_insert(b, &store->instr);
}
nir_intrinsic_set_base(load, var->data.driver_location);
nir_intrinsic_set_component(load, component);
+ nir_io_semantics semantics = {0};
+ semantics.location = var->data.location;
+ semantics.num_slots = get_number_of_slots(state, var);
+ nir_intrinsic_set_io_semantics(load, semantics);
+
load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
load->src[1] = nir_src_for_ssa(offset);
assert(addr_format_is_global(addr_format));
op = nir_intrinsic_load_global;
break;
- case nir_var_shader_in:
+ case nir_var_uniform:
assert(addr_format_is_offset(addr_format));
+ assert(b->shader->info.stage == MESA_SHADER_KERNEL);
op = nir_intrinsic_load_kernel_input;
break;
case nir_var_mem_shared:
assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
- assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared |
+ assert(deref->mode & (nir_var_uniform | nir_var_mem_shared |
nir_var_shader_temp | nir_var_function_temp));
if (addr_format_is_global(addr_format)) {
assert(nir_var_shader_temp | nir_var_function_temp);
case nir_intrinsic_load_global:
case nir_intrinsic_load_scratch:
case nir_intrinsic_load_fs_input_interp_deltas:
+ case nir_intrinsic_shared_atomic_add:
+ case nir_intrinsic_shared_atomic_and:
+ case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_exchange:
+ case nir_intrinsic_shared_atomic_fadd:
+ case nir_intrinsic_shared_atomic_fcomp_swap:
+ case nir_intrinsic_shared_atomic_fmax:
+ case nir_intrinsic_shared_atomic_fmin:
+ case nir_intrinsic_shared_atomic_imax:
+ case nir_intrinsic_shared_atomic_imin:
+ case nir_intrinsic_shared_atomic_or:
+ case nir_intrinsic_shared_atomic_umax:
+ case nir_intrinsic_shared_atomic_umin:
+ case nir_intrinsic_shared_atomic_xor:
+ case nir_intrinsic_global_atomic_add:
+ case nir_intrinsic_global_atomic_and:
+ case nir_intrinsic_global_atomic_comp_swap:
+ case nir_intrinsic_global_atomic_exchange:
+ case nir_intrinsic_global_atomic_fadd:
+ case nir_intrinsic_global_atomic_fcomp_swap:
+ case nir_intrinsic_global_atomic_fmax:
+ case nir_intrinsic_global_atomic_fmin:
+ case nir_intrinsic_global_atomic_imax:
+ case nir_intrinsic_global_atomic_imin:
+ case nir_intrinsic_global_atomic_or:
+ case nir_intrinsic_global_atomic_umax:
+ case nir_intrinsic_global_atomic_umin:
+ case nir_intrinsic_global_atomic_xor:
return &instr->src[0];
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
}
+static bool is_dual_slot(nir_intrinsic_instr *intrin)
+{
+ if (intrin->intrinsic == nir_intrinsic_store_output ||
+ intrin->intrinsic == nir_intrinsic_store_per_vertex_output) {
+ return nir_src_bit_size(intrin->src[0]) == 64 &&
+ nir_src_num_components(intrin->src[0]) >= 3;
+ }
+
+ return nir_dest_bit_size(intrin->dest) &&
+ nir_dest_num_components(intrin->dest) >= 3;
+}
/**
* This pass adds constant offsets to instr->const_index[0] for input/output
nir_src *offset = nir_get_io_offset_src(intrin);
if (nir_src_is_const(*offset)) {
- intrin->const_index[0] += nir_src_as_uint(*offset);
+ unsigned off = nir_src_as_uint(*offset);
+
+ nir_intrinsic_set_base(intrin, nir_intrinsic_base(intrin) + off);
+
+ nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
+ sem.location += off;
+ /* non-indirect indexing should reduce num_slots */
+ sem.num_slots = is_dual_slot(intrin) ? 2 : 1;
+ nir_intrinsic_set_io_semantics(intrin, sem);
+
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));