struct lower_io_state {
void *dead_ctx;
nir_builder builder;
- int (*type_size)(const struct glsl_type *type);
+ int (*type_size)(const struct glsl_type *type, bool);
nir_variable_mode modes;
nir_lower_io_options options;
};
void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *, bool))
{
unsigned location = 0;
continue;
var->data.driver_location = location;
- location += type_size(var->type);
+ bool bindless_type_size = var->data.mode == nir_var_shader_in ||
+ var->data.mode == nir_var_shader_out ||
+ var->data.bindless;
+ location += type_size(var->type, bindless_type_size);
}
*size = location;
static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_instr *deref,
nir_ssa_def **vertex_index,
- int (*type_size)(const struct glsl_type *),
- unsigned *component)
+ int (*type_size)(const struct glsl_type *, bool),
+ unsigned *component, bool bts)
{
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
const unsigned total_offset = *component + index;
const unsigned slot_offset = total_offset / 4;
*component = total_offset % 4;
- return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset);
+ return nir_imm_int(b, type_size(glsl_vec4_type(), bts) * slot_offset);
}
/* Just emit code and let constant-folding go to town */
for (; *p; p++) {
if ((*p)->deref_type == nir_deref_type_array) {
- unsigned size = type_size((*p)->type);
+ unsigned size = type_size((*p)->type, bts);
nir_ssa_def *mul =
nir_imul_imm(b, nir_ssa_for_src(b, (*p)->arr.index, 1), size);
unsigned field_offset = 0;
for (unsigned i = 0; i < (*p)->strct.index; i++) {
- field_offset += type_size(glsl_get_struct_field(parent->type, i));
+ field_offset += type_size(glsl_get_struct_field(parent->type, i), bts);
}
offset = nir_iadd_imm(b, offset, field_offset);
} else {
nir_intrinsic_set_component(load, component);
if (load->intrinsic == nir_intrinsic_load_uniform)
- nir_intrinsic_set_range(load, state->type_size(var->type));
+ nir_intrinsic_set_range(load,
+ state->type_size(var->type, var->data.bindless));
if (vertex_index) {
load->src[0] = nir_src_for_ssa(vertex_index);
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
unsigned component_offset = var->data.location_frac;
+ bool bindless_type_size = mode == nir_var_shader_in ||
+ mode == nir_var_shader_out ||
+ var->data.bindless;
offset = get_io_offset(b, deref, per_vertex ? &vertex_index : NULL,
- state->type_size, &component_offset);
+ state->type_size, &component_offset,
+ bindless_type_size);
nir_intrinsic_instr *replacement;
static bool
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
struct lower_io_state state;
bool
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options options)
{
bool progress = false;