}
return nir_vec(b, comp64, intrin->dest.ssa.num_components);
+ } else if (intrin->dest.ssa.bit_size == 1) {
+ /* Booleans are 32-bit */
+ assert(glsl_type_is_boolean(type));
+ return nir_b2b1(&state->builder,
+ emit_load(state, vertex_index, var, offset, component,
+ intrin->dest.ssa.num_components, 32,
+ nir_type_bool32));
} else {
return emit_load(state, vertex_index, var, offset, component,
intrin->dest.ssa.num_components,
write_mask >>= num_comps;
offset = nir_iadd_imm(b, offset, slot_size);
}
+ } else if (intrin->dest.ssa.bit_size == 1) {
+ /* Booleans are 32-bit */
+ assert(glsl_type_is_boolean(type));
+ nir_ssa_def *b32_val = nir_b2b32(&state->builder, intrin->src[1].ssa);
+ emit_store(state, b32_val, vertex_index, var, offset,
+ component, intrin->num_components,
+ nir_intrinsic_write_mask(intrin),
+ nir_type_bool32);
} else {
emit_store(state, intrin->src[1].ssa, vertex_index, var, offset,
component, intrin->num_components,
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
nir_variable_mode mode = deref->mode;
-
+ assert(util_is_power_of_two_nonzero(mode));
if ((state->modes & mode) == 0)
continue;
- if (mode != nir_var_shader_in &&
- mode != nir_var_shader_out &&
- mode != nir_var_mem_shared &&
- mode != nir_var_uniform)
- continue;
-
nir_variable *var = nir_deref_instr_get_variable(deref);
b->cursor = nir_before_instr(instr);
state.type_size = type_size;
state.options = options;
+ ASSERTED nir_variable_mode supported_modes =
+ nir_var_shader_in | nir_var_shader_out |
+ nir_var_mem_shared | nir_var_uniform;
+ assert(!(modes & ~supported_modes));
+
nir_foreach_block(block, impl) {
progress |= nir_lower_io_block(block, &state);
}
assert(addr->num_components == 2);
return nir_vec2(b, nir_channel(b, addr, 0),
nir_iadd(b, nir_channel(b, addr, 1), offset));
+ case nir_address_format_vec2_index_32bit_offset:
+ assert(addr->num_components == 3);
+ return nir_vec3(b, nir_channel(b, addr, 0), nir_channel(b, addr, 1),
+ nir_iadd(b, nir_channel(b, addr, 2), offset));
case nir_address_format_logical:
unreachable("Unsupported address format");
}
addr_to_index(nir_builder *b, nir_ssa_def *addr,
nir_address_format addr_format)
{
- assert(addr_format == nir_address_format_32bit_index_offset);
- assert(addr->num_components == 2);
- return nir_channel(b, addr, 0);
+ if (addr_format == nir_address_format_32bit_index_offset) {
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 0);
+ } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
+ assert(addr->num_components == 3);
+ return nir_channels(b, addr, 0x3);
+ } else {
+ unreachable("bad address format for index");
+ }
}
static nir_ssa_def *
addr_to_offset(nir_builder *b, nir_ssa_def *addr,
nir_address_format addr_format)
{
- assert(addr_format == nir_address_format_32bit_index_offset);
- assert(addr->num_components == 2);
- return nir_channel(b, addr, 1);
+ if (addr_format == nir_address_format_32bit_index_offset) {
+ assert(addr->num_components == 2);
+ return nir_channel(b, addr, 1);
+ } else if (addr_format == nir_address_format_vec2_index_32bit_offset) {
+ assert(addr->num_components == 3);
+ return nir_channel(b, addr, 2);
+ } else {
+ unreachable("bad address format for offset");
+ }
}
/** Returns true if the given address format resolves to a global address */
nir_u2u64(b, nir_channel(b, addr, 3)));
case nir_address_format_32bit_index_offset:
+ case nir_address_format_vec2_index_32bit_offset:
case nir_address_format_32bit_offset:
case nir_address_format_logical:
unreachable("Cannot get a 64-bit address with this address format");
load->src[1] = nir_src_for_ssa(addr_to_offset(b, addr, addr_format));
}
- if (mode != nir_var_mem_ubo && mode != nir_var_shader_in && mode != nir_var_mem_shared)
+ if (mode != nir_var_shader_in && mode != nir_var_mem_shared)
nir_intrinsic_set_access(load, nir_intrinsic_access(intrin));
unsigned bit_size = intrin->dest.ssa.bit_size;
unsigned stride = glsl_get_explicit_stride(deref->type);
assert(stride > 0);
- assert(addr_format == nir_address_format_32bit_index_offset);
+ assert(addr_format == nir_address_format_32bit_index_offset ||
+ addr_format == nir_address_format_vec2_index_32bit_offset);
nir_ssa_def *addr = &deref->dest.ssa;
nir_ssa_def *index = addr_to_index(b, addr, addr_format);
nir_ssa_def *offset = addr_to_offset(b, addr, addr_format);
[nir_address_format_64bit_global] = {{0}},
[nir_address_format_64bit_bounded_global] = {{0}},
[nir_address_format_32bit_index_offset] = {{.u32 = ~0}, {.u32 = ~0}},
+ [nir_address_format_vec2_index_32bit_offset] = {{.u32 = ~0}, {.u32 = ~0}, {.u32 = ~0}},
[nir_address_format_32bit_offset] = {{.u32 = ~0}},
[nir_address_format_logical] = {{.u32 = ~0}},
};
case nir_address_format_64bit_global:
case nir_address_format_64bit_bounded_global:
case nir_address_format_32bit_index_offset:
+ case nir_address_format_vec2_index_32bit_offset:
case nir_address_format_32bit_offset:
return nir_ball_iequal(b, addr0, addr1);
/* Assume the same buffer index. */
return nir_isub(b, nir_channel(b, addr0, 1), nir_channel(b, addr1, 1));
+ case nir_address_format_vec2_index_32bit_offset:
+ assert(addr0->num_components == 3);
+ assert(addr1->num_components == 3);
+ /* Assume the same buffer index. */
+ return nir_isub(b, nir_channel(b, addr0, 2), nir_channel(b, addr1, 2));
+
case nir_address_format_logical:
unreachable("Unsupported address format");
}