}
static inline nir_if *
-nir_push_if(nir_builder *build, nir_ssa_def *condition)
+nir_push_if_src(nir_builder *build, nir_src condition)
{
nir_if *nif = nir_if_create(build->shader);
- nif->condition = nir_src_for_ssa(condition);
+ nif->condition = condition;
nir_builder_cf_insert(build, &nif->cf_node);
build->cursor = nir_before_cf_list(&nif->then_list);
return nif;
}
+static inline nir_if *
+nir_push_if(nir_builder *build, nir_ssa_def *condition)
+{
+ return nir_push_if_src(build, nir_src_for_ssa(condition));
+}
+
static inline nir_if *
nir_push_else(nir_builder *build, nir_if *nif)
{
case 2: return nir_fdot2(build, src0, src1);
case 3: return nir_fdot3(build, src0, src1);
case 4: return nir_fdot4(build, src0, src1);
+ case 8: return nir_fdot8(build, src0, src1);
+ case 16: return nir_fdot16(build, src0, src1);
default:
unreachable("bad component size");
}
case 2: return nir_ball_iequal2(b, src0, src1);
case 3: return nir_ball_iequal3(b, src0, src1);
case 4: return nir_ball_iequal4(b, src0, src1);
+ case 8: return nir_ball_iequal8(b, src0, src1);
+ case 16: return nir_ball_iequal16(b, src0, src1);
default:
unreachable("bad component size");
}
}
+static inline nir_ssa_def *
+nir_ball(nir_builder *b, nir_ssa_def *src)
+{
+ return nir_ball_iequal(b, src, nir_imm_true(b));
+}
+
static inline nir_ssa_def *
nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
{
case 2: return nir_bany_inequal2(b, src0, src1);
case 3: return nir_bany_inequal3(b, src0, src1);
case 4: return nir_bany_inequal4(b, src0, src1);
+ case 8: return nir_bany_inequal8(b, src0, src1);
+ case 16: return nir_bany_inequal16(b, src0, src1);
default:
unreachable("bad component size");
}
nir_iadd_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return x;
_nir_mul_imm(nir_builder *build, nir_ssa_def *x, uint64_t y, bool amul)
{
assert(x->bit_size <= 64);
- if (x->bit_size < 64)
- y &= (1ull << x->bit_size) - 1;
+ y &= BITFIELD64_MASK(x->bit_size);
if (y == 0) {
return nir_imm_intN_t(build, 0, x->bit_size);
return nir_fmul(build, x, nir_imm_floatN_t(build, y, x->bit_size));
}
+static inline nir_ssa_def *
+nir_iand_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 0) {
+ return nir_imm_intN_t(build, 0, x->bit_size);
+ } else if (y == BITFIELD64_MASK(x->bit_size)) {
+ return x;
+ } else {
+ return nir_iand(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ishr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ishr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_ushr_imm(nir_builder *build, nir_ssa_def *x, uint32_t y)
+{
+ if (y == 0) {
+ return x;
+ } else {
+ return nir_ushr(build, x, nir_imm_int(build, y));
+ }
+}
+
+static inline nir_ssa_def *
+nir_udiv_imm(nir_builder *build, nir_ssa_def *x, uint64_t y)
+{
+ assert(x->bit_size <= 64);
+ y &= BITFIELD64_MASK(x->bit_size);
+
+ if (y == 1) {
+ return x;
+ } else if (util_is_power_of_two_nonzero(y)) {
+ return nir_ushr_imm(build, x, ffsll(y) - 1);
+ } else {
+ return nir_udiv(build, x, nir_imm_intN_t(build, y, x->bit_size));
+ }
+}
+
static inline nir_ssa_def *
nir_pack_bits(nir_builder *b, nir_ssa_def *src, unsigned dest_bit_size)
{
/* If we got here, we have no dedicated unpack opcode. */
nir_ssa_def *dest_comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < dest_num_components; i++) {
- nir_ssa_def *val = nir_ushr(b, src, nir_imm_int(b, i * dest_bit_size));
+ nir_ssa_def *val = nir_ushr_imm(b, src, i * dest_bit_size);
dest_comps[i] = nir_u2u(b, val, dest_bit_size);
}
return nir_vec(b, dest_comps, dest_num_components);
}
static inline unsigned
-nir_get_ptr_bitsize(nir_builder *build)
+nir_get_ptr_bitsize(nir_shader *shader)
{
- if (build->shader->info.stage == MESA_SHADER_KERNEL)
- return build->shader->info.cs.ptr_size;
+ if (shader->info.stage == MESA_SHADER_KERNEL)
+ return shader->info.cs.ptr_size;
return 32;
}
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_var);
- deref->mode = var->data.mode;
+ deref->mode = (nir_variable_mode)var->data.mode;
deref->type = var->type;
deref->var = var;
nir_ssa_dest_init(&deref->instr, &deref->dest, 1,
- nir_get_ptr_bitsize(build), NULL);
+ nir_get_ptr_bitsize(build->shader), NULL);
nir_builder_instr_insert(build, &deref->instr);
static inline void
nir_jump(nir_builder *build, nir_jump_type jump_type)
{
+ assert(jump_type != nir_jump_goto && jump_type != nir_jump_goto_if);
nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
nir_builder_instr_insert(build, &jump->instr);
}
+static inline void
+nir_goto(nir_builder *build, struct nir_block *target)
+{
+ assert(!build->impl->structured);
+ nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto);
+ jump->target = target;
+ nir_builder_instr_insert(build, &jump->instr);
+}
+
+static inline void
+nir_goto_if(nir_builder *build, struct nir_block *target, nir_src cond,
+ struct nir_block *else_target)
+{
+ assert(!build->impl->structured);
+ nir_jump_instr *jump = nir_jump_instr_create(build->shader, nir_jump_goto_if);
+ jump->condition = cond;
+ jump->target = target;
+ jump->else_target = else_target;
+ nir_builder_instr_insert(build, &jump->instr);
+}
+
static inline nir_ssa_def *
nir_compare_func(nir_builder *b, enum compare_func func,
nir_ssa_def *src0, nir_ssa_def *src1)
case COMPARE_FUNC_EQUAL:
return nir_feq(b, src0, src1);
case COMPARE_FUNC_NOTEQUAL:
- return nir_fne(b, src0, src1);
+ return nir_fneu(b, src0, src1);
case COMPARE_FUNC_GREATER:
return nir_flt(b, src1, src0);
case COMPARE_FUNC_GEQUAL: