addr_get_offset_bit_size(addr, addr_format)));
}
+static nir_ssa_def *
+build_addr_for_var(nir_builder *b, nir_variable *var,
+ nir_address_format addr_format)
+{
+ assert(var->data.mode & (nir_var_uniform | nir_var_mem_shared |
+ nir_var_shader_temp | nir_var_function_temp |
+ nir_var_mem_constant));
+
+ const unsigned num_comps = nir_address_format_num_components(addr_format);
+ const unsigned bit_size = nir_address_format_bit_size(addr_format);
+
+ switch (addr_format) {
+ case nir_address_format_32bit_global:
+ case nir_address_format_64bit_global: {
+ nir_ssa_def *base_addr;
+ switch (var->data.mode) {
+ case nir_var_shader_temp:
+ base_addr = nir_load_scratch_base_ptr(b, 0, num_comps, bit_size);
+ break;
+
+ case nir_var_function_temp:
+ base_addr = nir_load_scratch_base_ptr(b, 1, num_comps, bit_size);
+ break;
+
+ case nir_var_mem_constant:
+ base_addr = nir_load_constant_base_ptr(b, num_comps, bit_size);
+ break;
+
+ default:
+ unreachable("Unsupported variable mode");
+ }
+
+ return build_addr_iadd_imm(b, base_addr, addr_format,
+ var->data.driver_location);
+ }
+
+ case nir_address_format_32bit_offset:
+ assert(var->data.driver_location <= UINT32_MAX);
+ return nir_imm_int(b, var->data.driver_location);
+
+ case nir_address_format_32bit_offset_as_64bit:
+ assert(var->data.driver_location <= UINT32_MAX);
+ return nir_imm_int64(b, var->data.driver_location);
+
+ default:
+ unreachable("Unsupported address format");
+ }
+}
+
static nir_ssa_def *
addr_to_index(nir_builder *b, nir_ssa_def *addr,
nir_address_format addr_format)
assert(addr_format_is_global(addr_format));
op = nir_intrinsic_load_global;
break;
- case nir_var_shader_in:
+ case nir_var_uniform:
assert(addr_format_is_offset(addr_format));
+ assert(b->shader->info.stage == MESA_SHADER_KERNEL);
op = nir_intrinsic_load_kernel_input;
break;
case nir_var_mem_shared:
op = nir_intrinsic_load_global;
}
break;
+ case nir_var_mem_constant:
+ if (addr_format_is_offset(addr_format)) {
+ op = nir_intrinsic_load_constant;
+ } else {
+ assert(addr_format_is_global(addr_format));
+ op = nir_intrinsic_load_global_constant;
+ }
+ break;
default:
unreachable("Unsupported explicit IO variable mode");
}
if (nir_intrinsic_has_access(load))
nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
+ if (op == nir_intrinsic_load_constant) {
+ nir_intrinsic_set_base(load, 0);
+ nir_intrinsic_set_range(load, b->shader->constant_data_size);
+ }
+
unsigned bit_size = intrin->dest.ssa.bit_size;
if (bit_size == 1) {
/* TODO: Make the native bool bit_size an option. */
assert(deref->dest.is_ssa);
switch (deref->deref_type) {
case nir_deref_type_var:
- assert(deref->mode & (nir_var_shader_in | nir_var_mem_shared |
- nir_var_shader_temp | nir_var_function_temp));
- if (addr_format_is_global(addr_format)) {
- assert(nir_var_shader_temp | nir_var_function_temp);
- base_addr =
- nir_load_scratch_base_ptr(b, !(deref->mode & nir_var_shader_temp),
- nir_address_format_num_components(addr_format),
- nir_address_format_bit_size(addr_format));
- return build_addr_iadd_imm(b, base_addr, addr_format,
- deref->var->data.driver_location);
- } else {
- assert(deref->var->data.driver_location <= UINT32_MAX);
- return nir_imm_intN_t(b, deref->var->data.driver_location,
- deref->dest.ssa.bit_size);
- }
+ return build_addr_for_var(b, deref->var, addr_format);
case nir_deref_type_array: {
nir_deref_instr *parent = nir_deref_instr_parent(deref);
nir_ssa_def *addr = nir_explicit_io_address_from_deref(b, deref, base_addr,
addr_format);
+ assert(addr->bit_size == deref->dest.ssa.bit_size);
+ assert(addr->num_components == deref->dest.ssa.num_components);
nir_instr_remove(&deref->instr);
nir_ssa_def_rewrite_uses(&deref->dest.ssa, nir_src_for_ssa(addr));
case nir_var_mem_shared:
offset = 0;
break;
+ case nir_var_mem_constant:
+ offset = shader->constant_data_size;
+ break;
default:
unreachable("Unsupported mode");
}
const struct glsl_type *explicit_type =
glsl_get_explicit_type_for_size_align(var->type, type_info, &size, &align);
- if (explicit_type != var->type) {
- progress = true;
+ if (explicit_type != var->type)
var->type = explicit_type;
- }
var->data.driver_location = ALIGN_POT(offset, align);
offset = var->data.driver_location + size;
+ progress = true;
}
switch (mode) {
break;
case nir_var_mem_shared:
shader->info.cs.shared_size = offset;
- shader->num_shared = offset;
+ shader->shared_size = offset;
+ break;
+ case nir_var_mem_constant:
+ shader->constant_data_size = offset;
break;
default:
unreachable("Unsupported mode");
return progress;
}
+static void
+write_constant(void *dst, const nir_constant *c, const struct glsl_type *type)
+{
+ if (glsl_type_is_vector_or_scalar(type)) {
+ const unsigned num_components = glsl_get_vector_elements(type);
+ const unsigned bit_size = glsl_get_bit_size(type);
+ if (bit_size == 1) {
+ /* Booleans are special-cased to be 32-bit
+ *
+ * TODO: Make the native bool bit_size an option.
+ */
+ for (unsigned i = 0; i < num_components; i++) {
+ int32_t b32 = -(int)c->values[i].b;
+ memcpy((char *)dst + i * 4, &b32, 4);
+ }
+ } else {
+ assert(bit_size >= 8 && bit_size % 8 == 0);
+ const unsigned byte_size = bit_size / 8;
+ for (unsigned i = 0; i < num_components; i++) {
+ /* Annoyingly, thanks to packed structs, we can't make any
+ * assumptions about the alignment of dst. To avoid any strange
+ * issues with unaligned writes, we always use memcpy.
+ */
+ memcpy((char *)dst + i * byte_size, &c->values[i], byte_size);
+ }
+ }
+ } else if (glsl_type_is_array_or_matrix(type)) {
+ const unsigned array_len = glsl_get_length(type);
+ const unsigned stride = glsl_get_explicit_stride(type);
+ assert(stride > 0);
+ const struct glsl_type *elem_type = glsl_get_array_element(type);
+ for (unsigned i = 0; i < array_len; i++)
+ write_constant((char *)dst + i * stride, c->elements[i], elem_type);
+ } else {
+ assert(glsl_type_is_struct_or_ifc(type));
+ const unsigned num_fields = glsl_get_length(type);
+ for (unsigned i = 0; i < num_fields; i++) {
+ const int field_offset = glsl_get_struct_field_offset(type, i);
+ assert(field_offset >= 0);
+ const struct glsl_type *field_type = glsl_get_struct_field(type, i);
+ write_constant((char *)dst + field_offset, c->elements[i], field_type);
+ }
+ }
+}
+
+bool
+nir_lower_mem_constant_vars(nir_shader *shader,
+ glsl_type_size_align_func type_info)
+{
+ unsigned old_constant_data_size = shader->constant_data_size;
+ if (!lower_vars_to_explicit(shader, &shader->variables,
+ nir_var_mem_constant, type_info)) {
+ nir_shader_preserve_all_metadata(shader);
+ return false;
+ }
+
+ shader->constant_data = rerzalloc_size(shader, shader->constant_data,
+ old_constant_data_size,
+ shader->constant_data_size);
+
+ nir_foreach_variable_with_modes(var, shader, nir_var_mem_constant) {
+ write_constant((char *)shader->constant_data + var->data.driver_location,
+ var->constant_initializer, var->type);
+ }
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_lower_vars_to_explicit_types_impl(function->impl,
+ nir_var_mem_constant,
+ type_info);
+ }
+
+ return true;
+}
+
/**
* Return the offset source for a load/store intrinsic.
*/
case nir_intrinsic_load_shared:
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_global:
+ case nir_intrinsic_load_global_constant:
case nir_intrinsic_load_scratch:
case nir_intrinsic_load_fs_input_interp_deltas:
case nir_intrinsic_shared_atomic_add:
nir_src_num_components(intrin->src[0]) >= 3;
}
- return nir_dest_bit_size(intrin->dest) &&
+ return nir_dest_bit_size(intrin->dest) == 64 &&
nir_dest_num_components(intrin->dest) >= 3;
}