build->cursor = nir_after_cf_list(&build->impl->body);
}
+typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
+
+/**
+ * Iterates over all the instructions in a NIR shader and calls the given pass
+ * on them.
+ *
+ * The pass should return true if it modified the shader. In that case, only
+ * the preserved metadata flags will be preserved in the function impl.
+ *
+ * The builder will be initialized to point at the function impl, but its
+ * cursor is unset.
+ */
+static inline bool
+nir_shader_instructions_pass(nir_shader *shader,
+ nir_instr_pass_cb pass,
+ nir_metadata preserved,
+ void *cb_data)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+
+ nir_foreach_block_safe(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ progress |= pass(&b, instr, cb_data);
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(function->impl, preserved);
+ } else {
+ nir_metadata_preserve(function->impl, nir_metadata_all);
+ }
+ }
+
+ return progress;
+}
+
static inline void
nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
{
nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return x;
_nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return nir_imm_intN_t(build, 0, x->bit_size);
return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
}
+static inline nir_ssa_def *
+nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 0) {
+ return nir_imm_intN_t(build, 0, x->bit_size);
+ } else if (y == BITFIELD64_MASK(x->bit_size)) {
+ return x;
+ } else {
+ return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ishr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ushr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 1) {
+ return x;
+ } else if (util_is_power_of_two_nonzero(y)) {
+ return nir_ushr_imm(build, x, ffsll(y) - 1);
+ } else {
+ return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
static inline nir_ssa_def *
nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
{
/* If we got here, we have no dedicated unpack opcode. */
nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < dest_num_components; i++) {
- nir_ssa_def *val = nir_ushr(b, src, nir_imm_int(b, i * dest_bit_size));
+ nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
dest_comps[i] = nir_u2u(b, val, dest_bit_size);
}
return nir_vec(b, dest_comps, dest_num_components);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_var);
- deref->mode = var->data.mode;
+ deref->mode = (nir_variable_mode)var->data.mode;
deref->type = var->type;
deref->var = var;
nir_build_deref_var(build, src));
}
+static inline nir_ssa_def *
+nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+ unsigned num_components, unsigned bit_size)
+{
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global);
+ load->num_components = num_components;
+ load->src[0] = nir_src_for_ssa(addr);
+ nir_intrinsic_set_align(load, align, 0);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ num_components, bit_size, NULL);
+ nir_builder_instr_insert(build, &load->instr);
+ return &load->dest.ssa;
+}
+
+static inline void
+nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+ nir_ssa_def *value, nir_component_mask_t write_mask)
+{
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
+ store->num_components = value->num_components;
+ store->src[0] = nir_src_for_ssa(value);
+ store->src[1] = nir_src_for_ssa(addr);
+ nir_intrinsic_set_write_mask(store,
+ write_mask & BITFIELD_MASK(value->num_components));
+ nir_intrinsic_set_align(store, align, 0);
+ nir_builder_instr_insert(build, &store->instr);
+}
+
static inline nir_ssa_def *
nir_load_param(nir_builder *build, uint32_t param_idx)
{
return &load->dest.ssa;
}
+static inline nir_ssa_def *
+nir_load_reloc_const_intel(nir_builder *b, uint32_t id)
+{
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_load_reloc_const_intel);
+ nir_intrinsic_set_param_idx(load, id);
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
+ nir_builder_instr_insert(b, &load->instr);
+ return &load->dest.ssa;
+}
+
#include "nir_builder_opcodes.h"
static inline nir_ssa_def *
case COMPARE_FUNC_EQUAL:
return nir_feq(b, src0, src1);
case COMPARE_FUNC_NOTEQUAL:
- return nir_fne(b, src0, src1);
+ return nir_fneu(b, src0, src1);
case COMPARE_FUNC_GREATER:
return nir_flt(b, src1, src0);
case COMPARE_FUNC_GEQUAL: