#include <limits.h>
#include <assert.h>
#include <math.h>
+#include "util/u_math.h"
-#include "main/imports.h" /* _mesa_bitcount_64 */
#include "main/menums.h" /* BITFIELD64_MASK */
nir_shader *
}
exec_list_make_empty(&shader->functions);
- exec_list_make_empty(&shader->registers);
exec_list_make_empty(&shader->globals);
exec_list_make_empty(&shader->system_values);
- shader->reg_alloc = 0;
shader->num_inputs = 0;
shader->num_outputs = 0;
reg->num_components = 0;
reg->bit_size = 32;
reg->num_array_elems = 0;
- reg->is_packed = false;
reg->name = NULL;
exec_list_push_tail(list, ®->node);
return reg;
}
-nir_register *
-nir_global_reg_create(nir_shader *shader)
-{
- nir_register *reg = reg_create(shader, &shader->registers);
- reg->index = shader->reg_alloc++;
- reg->is_global = true;
-
- return reg;
-}
-
nir_register *
nir_local_reg_create(nir_function_impl *impl)
{
nir_register *reg = reg_create(ralloc_parent(impl), &impl->registers);
reg->index = impl->reg_alloc++;
- reg->is_global = false;
return reg;
}
assert(!"invalid mode");
break;
- case nir_var_local:
+ case nir_var_function_temp:
assert(!"nir_shader_add_variable cannot be used for local variables");
break;
- case nir_var_global:
+ case nir_var_shader_temp:
exec_list_push_tail(&shader->globals, &var->node);
break;
break;
case nir_var_uniform:
- case nir_var_shader_storage:
+ case nir_var_mem_ubo:
+ case nir_var_mem_ssbo:
exec_list_push_tail(&shader->uniforms, &var->node);
break;
- case nir_var_shared:
- assert(shader->info.stage == MESA_SHADER_COMPUTE);
+ case nir_var_mem_shared:
+ assert(gl_shader_stage_is_compute(shader->info.stage));
exec_list_push_tail(&shader->shared, &var->node);
break;
+ case nir_var_mem_global:
+ assert(!"nir_shader_add_variable cannot be used for global memory");
+ break;
+
case nir_var_system_value:
exec_list_push_tail(&shader->system_values, &var->node);
break;
nir_variable *var = rzalloc(impl->function->shader, nir_variable);
var->name = ralloc_strdup(var, name);
var->type = type;
- var->data.mode = nir_var_local;
+ var->data.mode = nir_var_function_temp;
nir_function_impl_add_variable(impl, var);
func->num_params = 0;
func->params = NULL;
func->impl = NULL;
+ func->is_entrypoint = false;
return func;
}
cf_init(&block->cf_node, nir_cf_node_block);
block->successors[0] = block->successors[1] = NULL;
- block->predecessors = _mesa_set_create(block, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ block->predecessors = _mesa_pointer_set_create(block);
block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This
* way it doesn't get allocated for shader refs that never run
* which is later used to do state specific lowering and futher
* opt. Do any of the references not need dominance metadata?
*/
- block->dom_frontier = _mesa_set_create(block, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ block->dom_frontier = _mesa_pointer_set_create(block);
exec_list_make_empty(&block->instr_list);
{
nir_if *if_stmt = ralloc(shader, nir_if);
+ if_stmt->control = nir_selection_control_none;
+
cf_init(&if_stmt->cf_node, nir_cf_node_if);
src_init(&if_stmt->condition);
if (deref_type != nir_deref_type_var)
src_init(&instr->parent);
- if (deref_type == nir_deref_type_array)
+ if (deref_type == nir_deref_type_array ||
+ deref_type == nir_deref_type_ptr_as_array)
src_init(&instr->arr.index);
dest_init(&instr->dest);
return instr;
}
+static int8_t default_tg4_offsets[4][2] =
+{
+ { 0, 1 },
+ { 1, 1 },
+ { 1, 0 },
+ { 0, 0 },
+};
+
nir_tex_instr *
nir_tex_instr_create(nir_shader *shader, unsigned num_srcs)
{
instr->texture_index = 0;
instr->texture_array_size = 0;
instr->sampler_index = 0;
+ memcpy(instr->tg4_offsets, default_tg4_offsets, sizeof(instr->tg4_offsets));
return instr;
}
tex->num_srcs--;
}
+bool
+nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex)
+{
+ if (tex->op != nir_texop_tg4)
+ return false;
+ return memcmp(tex->tg4_offsets, default_tg4_offsets,
+ sizeof(tex->tg4_offsets)) != 0;
+}
+
nir_phi_instr *
nir_phi_instr_create(nir_shader *shader)
{
{
nir_const_value v;
switch (bit_size) {
+ case 1: v.b[0] = i & 1; break;
case 8: v.i8[0] = i; break;
case 16: v.i16[0] = i; break;
case 32: v.i32[0] = i; break;
impl->reg_alloc = index;
}
-void
-nir_index_global_regs(nir_shader *shader)
-{
- unsigned index = 0;
- foreach_list_typed(nir_register, reg, node, &shader->registers) {
- reg->index = index++;
- }
- shader->reg_alloc = index;
-}
-
static bool
visit_alu_dest(nir_alu_instr *instr, nir_foreach_dest_cb cb, void *state)
{
return false;
}
- if (instr->deref_type == nir_deref_type_array) {
+ if (instr->deref_type == nir_deref_type_array ||
+ instr->deref_type == nir_deref_type_ptr_as_array) {
if (!visit_src(&instr->arr.index, cb, state))
return false;
}
return nir_foreach_dest(instr, visit_dest_indirect, &dest_state);
}
+int64_t
+nir_src_comp_as_int(nir_src src, unsigned comp)
+{
+ assert(nir_src_is_const(src));
+ nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
+
+ assert(comp < load->def.num_components);
+ switch (load->def.bit_size) {
+ /* int1_t uses 0/-1 convention */
+ case 1: return -(int)load->value.b[comp];
+ case 8: return load->value.i8[comp];
+ case 16: return load->value.i16[comp];
+ case 32: return load->value.i32[comp];
+ case 64: return load->value.i64[comp];
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
+uint64_t
+nir_src_comp_as_uint(nir_src src, unsigned comp)
+{
+ assert(nir_src_is_const(src));
+ nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
+
+ assert(comp < load->def.num_components);
+ switch (load->def.bit_size) {
+ case 1: return load->value.b[comp];
+ case 8: return load->value.u8[comp];
+ case 16: return load->value.u16[comp];
+ case 32: return load->value.u32[comp];
+ case 64: return load->value.u64[comp];
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
+bool
+nir_src_comp_as_bool(nir_src src, unsigned comp)
+{
+ int64_t i = nir_src_comp_as_int(src, comp);
+
+ /* Booleans of any size use 0/-1 convention */
+ assert(i == 0 || i == -1);
+
+ return i;
+}
+
+double
+nir_src_comp_as_float(nir_src src, unsigned comp)
+{
+ assert(nir_src_is_const(src));
+ nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
+
+ assert(comp < load->def.num_components);
+ switch (load->def.bit_size) {
+ case 16: return _mesa_half_to_float(load->value.u16[comp]);
+ case 32: return load->value.f32[comp];
+ case 64: return load->value.f64[comp];
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
+int64_t
+nir_src_as_int(nir_src src)
+{
+ assert(nir_src_num_components(src) == 1);
+ return nir_src_comp_as_int(src, 0);
+}
+
+uint64_t
+nir_src_as_uint(nir_src src)
+{
+ assert(nir_src_num_components(src) == 1);
+ return nir_src_comp_as_uint(src, 0);
+}
+
+bool
+nir_src_as_bool(nir_src src)
+{
+ assert(nir_src_num_components(src) == 1);
+ return nir_src_comp_as_bool(src, 0);
+}
+
+double
+nir_src_as_float(nir_src src)
+{
+ assert(nir_src_num_components(src) == 1);
+ return nir_src_comp_as_float(src, 0);
+}
+
nir_const_value *
nir_src_as_const_value(nir_src src)
{
nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
nir_instr *after_me)
{
- assert(!new_src.is_ssa || def != new_src.ssa);
+ if (new_src.is_ssa && def == new_src.ssa)
+ return;
nir_foreach_use_safe(use_src, def) {
assert(use_src->parent_instr != def->parent_instr);
nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
int src_idx = alu_src - &alu->src[0];
assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
-
- for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
- if (!nir_alu_instr_channel_used(alu, src_idx, c))
- continue;
-
- read_mask |= (1 << alu_src->swizzle[c]);
- }
+ read_mask |= nir_alu_instr_src_read_mask(alu, src_idx);
} else {
return (1 << def->num_components) - 1;
}
return nir_intrinsic_load_local_group_size;
case SYSTEM_VALUE_GLOBAL_INVOCATION_ID:
return nir_intrinsic_load_global_invocation_id;
+ case SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX:
+ return nir_intrinsic_load_global_invocation_index;
case SYSTEM_VALUE_WORK_DIM:
return nir_intrinsic_load_work_dim;
default:
/* OpenGL utility method that remaps the location attributes if they are
* doubles. Not needed for vulkan due the differences on the input location
* count for doubles on vulkan vs OpenGL
+ *
+ * The bitfield returned in dual_slot is one bit for each double input slot in
+ * the original OpenGL single-slot input numbering. The mapping from old
+ * locations to new locations is as follows:
+ *
+ * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
*/
void
-nir_remap_attributes(nir_shader *shader,
- const nir_shader_compiler_options *options)
-{
- if (options->vs_inputs_dual_locations) {
- nir_foreach_variable(var, &shader->inputs) {
- var->data.location +=
- _mesa_bitcount_64(shader->info.vs.double_inputs &
- BITFIELD64_MASK(var->data.location));
+nir_remap_dual_slot_attributes(nir_shader *shader, uint64_t *dual_slot)
+{
+ assert(shader->info.stage == MESA_SHADER_VERTEX);
+
+ *dual_slot = 0;
+ nir_foreach_variable(var, &shader->inputs) {
+ if (glsl_type_is_dual_slot(glsl_without_array(var->type))) {
+ unsigned slots = glsl_count_attribute_slots(var->type, true);
+ *dual_slot |= BITFIELD64_MASK(slots) << var->data.location;
}
}
- /* Once the remap is done, reset double_inputs_read, so later it will have
- * which location/slots are doubles */
- shader->info.vs.double_inputs = 0;
+ nir_foreach_variable(var, &shader->inputs) {
+ var->data.location +=
+ util_bitcount64(*dual_slot & BITFIELD64_MASK(var->data.location));
+ }
+}
+
+/* Returns an attribute mask that has been re-compacted using the given
+ * dual_slot mask.
+ */
+uint64_t
+nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot)
+{
+ while (dual_slot) {
+ unsigned loc = u_bit_scan64(&dual_slot);
+ /* mask of all bits up to and including loc */
+ uint64_t mask = BITFIELD64_MASK(loc + 1);
+ attribs = (attribs & mask) | ((attribs & ~mask) >> 1);
+ }
+ return attribs;
+}
+
+void
+nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
+ bool bindless)
+{
+ switch (intrin->intrinsic) {
+#define CASE(op) \
+ case nir_intrinsic_image_deref_##op: \
+ intrin->intrinsic = bindless ? nir_intrinsic_bindless_image_##op \
+ : nir_intrinsic_image_##op; \
+ break;
+ CASE(load)
+ CASE(store)
+ CASE(atomic_add)
+ CASE(atomic_min)
+ CASE(atomic_max)
+ CASE(atomic_and)
+ CASE(atomic_or)
+ CASE(atomic_xor)
+ CASE(atomic_exchange)
+ CASE(atomic_comp_swap)
+ CASE(atomic_fadd)
+ CASE(size)
+ CASE(samples)
+ CASE(load_raw_intel)
+ CASE(store_raw_intel)
+#undef CASE
+ default:
+ unreachable("Unhanded image intrinsic");
+ }
+
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+
+ nir_intrinsic_set_image_dim(intrin, glsl_get_sampler_dim(deref->type));
+ nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(deref->type));
+ nir_intrinsic_set_access(intrin, var->data.image.access);
+ nir_intrinsic_set_format(intrin, var->data.image.format);
+
+ nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
+ nir_src_for_ssa(src));
}