build->cursor = nir_after_cf_list(&build->impl->body);
}
+typedef bool (*nir_instr_pass_cb)(struct nir_builder *, nir_instr *, void *);
+
+/**
+ * Iterates over all the instructions in a NIR shader and calls the given pass
+ * on them.
+ *
+ * The pass should return true if it modified the shader. In that case, only
+ * the preserved metadata flags will be preserved in the function impl.
+ *
+ * The builder will be initialized to point at the function impl, but its
+ * cursor is unset.
+ */
+static inline bool
+nir_shader_instructions_pass(nir_shader *shader,
+ nir_instr_pass_cb pass,
+ nir_metadata preserved,
+ void *cb_data)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+
+ nir_foreach_block_safe(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ progress |= pass(&b, instr, cb_data);
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(function->impl, preserved);
+ } else {
+ nir_metadata_preserve(function->impl, nir_metadata_all);
+ }
+ }
+
+ return progress;
+}
+
static inline void
nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
{
}
static inline nir_if *
-nir_push_if(nir_builder *build, nir_ssa_def *condition)
+nir_push_if_src(nir_builder *build, nir_src condition)
{
nir_if *nif = nir_if_create(build->shader);
- nif->condition = nir_src_for_ssa(condition);
+ nif->condition = condition;
nir_builder_cf_insert(build, &nif->cf_node);
build->cursor = nir_before_cf_list(&nif->then_list);
return nif;
}
+static inline nir_if *
+nir_push_if(nir_builder *build, nir_ssa_def *condition)
+{
+ return nir_push_if_src(build, nir_src_for_ssa(condition));
+}
+
static inline nir_if *
nir_push_else(nir_builder *build, nir_if *nif)
{
case 2: return nir_fdot2(build, src0, src1);
case 3: return nir_fdot3(build, src0, src1);
case 4: return nir_fdot4(build, src0, src1);
+ case 8: return nir_fdot8(build, src0, src1);
+ case 16: return nir_fdot16(build, src0, src1);
default:
unreachable("bad component size");
}
case 2: return nir_ball_iequal2(b, src0, src1);
case 3: return nir_ball_iequal3(b, src0, src1);
case 4: return nir_ball_iequal4(b, src0, src1);
+ case 8: return nir_ball_iequal8(b, src0, src1);
+ case 16: return nir_ball_iequal16(b, src0, src1);
default:
unreachable("bad component size");
}
}
+static inline nir_ssa_def *
+nir_ball(nir_builder *b, nir_ssa_def *src)
+{
+ return nir_ball_iequal(b, src, nir_imm_true(b));
+}
+
static inline nir_ssa_def *
nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
{
case 2: return nir_bany_inequal2(b, src0, src1);
case 3: return nir_bany_inequal3(b, src0, src1);
case 4: return nir_bany_inequal4(b, src0, src1);
+ case 8: return nir_bany_inequal8(b, src0, src1);
+ case 16: return nir_bany_inequal16(b, src0, src1);
default:
unreachable("bad component size");
}
return nir_channel(b, vec, start);
} else {
unsigned mid = start + (end - start) / 2;
- return nir_bcsel(b, nir_ilt(b, c, nir_imm_int(b, mid)),
+ return nir_bcsel(b, nir_ilt(b, c, nir_imm_intN_t(b, mid, c->bit_size)),
_nir_vector_extract_helper(b, vec, c, start, mid),
_nir_vector_extract_helper(b, vec, c, mid, end));
}
{
nir_src c_src = nir_src_for_ssa(c);
if (nir_src_is_const(c_src)) {
- unsigned c_const = nir_src_as_uint(c_src);
+ uint64_t c_const = nir_src_as_uint(c_src);
if (c_const < vec->num_components)
return nir_channel(b, vec, c_const);
else
}
}
+/** Replaces the component of `vec` specified by `c` with `scalar` */
+static inline nir_ssa_def *
+nir_vector_insert_imm(nir_builder *b, nir_ssa_def *vec,
+ nir_ssa_def *scalar, unsigned c)
+{
+ assert(scalar->num_components == 1);
+ assert(c < vec->num_components);
+
+ nir_op vec_op = nir_op_vec(vec->num_components);
+ nir_alu_instr *vec_instr = nir_alu_instr_create(b->shader, vec_op);
+
+ for (unsigned i = 0; i < vec->num_components; i++) {
+ if (i == c) {
+ vec_instr->src[i].src = nir_src_for_ssa(scalar);
+ vec_instr->src[i].swizzle[0] = 0;
+ } else {
+ vec_instr->src[i].src = nir_src_for_ssa(vec);
+ vec_instr->src[i].swizzle[0] = i;
+ }
+ }
+
+ return nir_builder_alu_instr_finish_and_insert(b, vec_instr);
+}
+
+/** Replaces the component of `vec` specified by `c` with `scalar` */
+static inline nir_ssa_def *
+nir_vector_insert(nir_builder *b, nir_ssa_def *vec, nir_ssa_def *scalar,
+ nir_ssa_def *c)
+{
+ assert(scalar->num_components == 1);
+ assert(c->num_components == 1);
+
+ nir_src c_src = nir_src_for_ssa(c);
+ if (nir_src_is_const(c_src)) {
+ uint64_t c_const = nir_src_as_uint(c_src);
+ if (c_const < vec->num_components)
+ return nir_vector_insert_imm(b, vec, scalar, c_const);
+ else
+ return vec;
+ } else {
+ nir_const_value per_comp_idx_const[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
+ per_comp_idx_const[i] = nir_const_value_for_int(i, c->bit_size);
+ nir_ssa_def *per_comp_idx =
+ nir_build_imm(b, vec->num_components,
+ c->bit_size, per_comp_idx_const);
+
+ /* nir_builder will automatically splat out scalars to vectors so an
+ * insert is as simple as "if I'm the channel, replace me with the
+ * scalar."
+ */
+ return nir_bcsel(b, nir_ieq(b, c, per_comp_idx), scalar, vec);
+ }
+}
+
static inline nir_ssa_def *
nir_i2i(nir_builder *build, nir_ssa_def *x, unsigned dest_bit_size)
{
nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return x;
_nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return nir_imm_intN_t(build, 0, x->bit_size);
} else if (y == 1) {
return x;
- } else if (util_is_power_of_two_or_zero64(y)) {
+ } else if (!build->shader->options->lower_bitops &&
+ util_is_power_of_two_or_zero64(y)) {
return nir_ishl(build, x, nir_imm_int(build, ffsll(y) - 1));
} else if (amul) {
return nir_amul(build, x, nir_imm_intN_t(build, y, x->bit_size));
return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
}
+static inline nir_ssa_def *
+nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 0) {
+ return nir_imm_intN_t(build, 0, x->bit_size);
+ } else if (y == BITFIELD64_MASK(x->bit_size)) {
+ return x;
+ } else {
+ return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ishr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ushr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 1) {
+ return x;
+ } else if (util_is_power_of_two_nonzero(y)) {
+ return nir_ushr_imm(build, x, ffsll(y) - 1);
+ } else {
+ return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
static inline nir_ssa_def *
nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
{
/* If we got here, we have no dedicated unpack opcode. */
nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < dest_num_components; i++) {
- nir_ssa_def *val = nir_ushr(b, src, nir_imm_int(b, i * dest_bit_size));
+ nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
dest_comps[i] = nir_u2u(b, val, dest_bit_size);
}
return nir_vec(b, dest_comps, dest_num_components);
for (unsigned i = 0; i < num_srcs; i++)
common_bit_size = MIN2(common_bit_size, srcs[i]->bit_size);
if (first_bit > 0)
- common_bit_size = MIN2(common_bit_size, (1 << (ffs(first_bit) - 1)));
+ common_bit_size = MIN2(common_bit_size, (1u << (ffs(first_bit) - 1)));
/* We don't want to have to deal with 1-bit values */
assert(common_bit_size >= 8);
const unsigned bit = first_bit + (i * common_bit_size);
while (bit >= src_end_bit) {
src_idx++;
- assert(src_idx < num_srcs);
+ assert(src_idx < (int) num_srcs);
src_start_bit = src_end_bit;
src_end_bit += srcs[src_idx]->bit_size *
srcs[src_idx]->num_components;
static inline nir_ssa_def *
nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
{
- static uint8_t trivial_swizzle[] = { 0, 1, 2, 3 };
+ static uint8_t trivial_swizzle[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
STATIC_ASSERT(ARRAY_SIZE(trivial_swizzle) == NIR_MAX_VEC_COMPONENTS);
nir_alu_src *src = &instr->src[srcn];
}
static inline unsigned
-nir_get_ptr_bitsize(nir_builder *build)
+nir_get_ptr_bitsize(nir_shader *shader)
{
- if (build->shader->info.stage == MESA_SHADER_KERNEL)
- return build->shader->info.cs.ptr_size;
+ if (shader->info.stage == MESA_SHADER_KERNEL)
+ return shader->info.cs.ptr_size;
return 32;
}
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_var);
- deref->mode = var->data.mode;
+ deref->mode = (nir_variable_mode)var->data.mode;
deref->type = var->type;
deref->var = var;
nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
- nir_get_ptr_bitsize(build), NULL);
+ nir_get_ptr_bitsize(build->shader), NULL);
nir_builder_instr_insert(build, &deref->instr);
return nir_ssa_for_src(build, nir_src_for_reg(reg), reg->num_components);
}
+static inline void
+nir_store_reg(nir_builder *build, nir_register *reg,
+ nir_ssa_def *def, nir_component_mask_t write_mask)
+{
+ assert(reg->num_components == def->num_components);
+ assert(reg->bit_size == def->bit_size);
+
+ nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_mov);
+ mov->src[0].src = nir_src_for_ssa(def);
+ mov->dest.dest = nir_dest_for_reg(reg);
+ mov->dest.write_mask = write_mask & BITFIELD_MASK(reg->num_components);
+ nir_builder_instr_insert(build, &mov->instr);
+}
+
static inline nir_ssa_def *
nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
nir_build_deref_var(build, src));
}
+static inline nir_ssa_def *
+nir_load_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+ unsigned num_components, unsigned bit_size)
+{
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_global);
+ load->num_components = num_components;
+ load->src[0] = nir_src_for_ssa(addr);
+ nir_intrinsic_set_align(load, align, 0);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ num_components, bit_size, NULL);
+ nir_builder_instr_insert(build, &load->instr);
+ return &load->dest.ssa;
+}
+
+static inline void
+nir_store_global(nir_builder *build, nir_ssa_def *addr, unsigned align,
+ nir_ssa_def *value, nir_component_mask_t write_mask)
+{
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_global);
+ store->num_components = value->num_components;
+ store->src[0] = nir_src_for_ssa(value);
+ store->src[1] = nir_src_for_ssa(addr);
+ nir_intrinsic_set_write_mask(store,
+ write_mask & BITFIELD_MASK(value->num_components));
+ nir_intrinsic_set_align(store, align, 0);
+ nir_builder_instr_insert(build, &store->instr);
+}
+
static inline nir_ssa_def *
nir_load_param(nir_builder *build, uint32_t param_idx)
{
return &load->dest.ssa;
}
+static inline nir_ssa_def *
+nir_load_reloc_const_intel(nir_builder *b, uint32_t id)
+{
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_load_reloc_const_intel);
+ nir_intrinsic_set_param_idx(load, id);
+ nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
+ nir_builder_instr_insert(b, &load->instr);
+ return &load->dest.ssa;
+}
+
#include "nir_builder_opcodes.h"
static inline nir_ssa_def *
nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
unsigned interp_mode)
{
+ unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
- nir_ssa_dest_init(&bary->instr, &bary->dest, 2, 32, NULL);
+ nir_ssa_dest_init(&bary->instr, &bary->dest, num_components, 32, NULL);
nir_intrinsic_set_interp_mode(bary, interp_mode);
nir_builder_instr_insert(build, &bary->instr);
return &bary->dest.ssa;
static inline void
nir_jump(nir_builder *build, nir_jump_type jump_type)
{
+ assert(jump_type != nir_jump_goto && jump_type != nir_jump_goto_if);
nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
nir_builder_instr_insert(build, &jump->instr);
}
+static inline void
+nir_goto(nir_builder *build, struct nir_block *target)
+{
+ assert(!build->impl->structured);
+ nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto);
+ jump->target = target;
+ nir_builder_instr_insert(build, &jump->instr);
+}
+
+static inline void
+nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
+ struct nir_block *else_target)
+{
+ assert(!build->impl->structured);
+ nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
+ jump->condition = cond;
+ jump->target = target;
+ jump->else_target = else_target;
+ nir_builder_instr_insert(build, &jump->instr);
+}
+
static inline nir_ssa_def *
nir_compare_func(nir_builder *b, enum compare_func func,
nir_ssa_def *src0, nir_ssa_def *src1)
case COMPARE_FUNC_EQUAL:
return nir_feq(b, src0, src1);
case COMPARE_FUNC_NOTEQUAL:
- return nir_fne(b, src0, src1);
+ return nir_fneu(b, src0, src1);
case COMPARE_FUNC_GREATER:
return nir_flt(b, src1, src0);
case COMPARE_FUNC_GEQUAL:
unreachable("bad compare func");
}
+static inline void
+nir_scoped_barrier(nir_builder *b,
+ nir_scope exec_scope,
+ nir_scope mem_scope,
+ nir_memory_semantics mem_semantics,
+ nir_variable_mode mem_modes)
+{
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_barrier);
+ nir_intrinsic_set_execution_scope(intrin, exec_scope);
+ nir_intrinsic_set_memory_scope(intrin, mem_scope);
+ nir_intrinsic_set_memory_semantics(intrin, mem_semantics);
+ nir_intrinsic_set_memory_modes(intrin, mem_modes);
+ nir_builder_instr_insert(b, &intrin->instr);
+}
+
+static inline void
+nir_scoped_memory_barrier(nir_builder *b,
+ nir_scope scope,
+ nir_memory_semantics semantics,
+ nir_variable_mode modes)
+{
+ nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
+}
+
+static inline nir_ssa_def *
+nir_convert_to_bit_size(nir_builder *b,
+ nir_ssa_def *src,
+ nir_alu_type type,
+ unsigned bit_size)
+{
+ nir_alu_type base_type = nir_alu_type_get_base_type(type);
+ nir_alu_type dst_type = (nir_alu_type)(bit_size | base_type);
+
+ nir_op opcode =
+ nir_type_conversion_op(type, dst_type, nir_rounding_mode_undef);
+
+ return nir_build_alu(b, opcode, src, NULL, NULL, NULL);
+}
+
+static inline nir_ssa_def *
+nir_i2iN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_int, bit_size);
+}
+
+static inline nir_ssa_def *
+nir_u2uN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_uint, bit_size);
+}
+
+static inline nir_ssa_def *
+nir_b2bN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_bool, bit_size);
+}
+
+static inline nir_ssa_def *
+nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size)
+{
+ return nir_convert_to_bit_size(b, src, nir_type_float, bit_size);
+}
+
#endif /* NIR_BUILDER_H */