src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
break;
- case nir_op_b2f:
+ case nir_op_b2f16:
+ case nir_op_b2f32:
+ case nir_op_b2f64:
result = emit_b2f(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
break;
- case nir_op_f2b:
+ case nir_op_f2b32:
result = emit_f2b(&ctx->ac, src[0]);
break;
- case nir_op_b2i:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
+ case nir_op_b2i64:
result = emit_b2i(&ctx->ac, src[0], instr->dest.dest.ssa.bit_size);
break;
- case nir_op_i2b:
+ case nir_op_i2b32:
src[0] = ac_to_integer(&ctx->ac, src[0]);
result = emit_i2b(&ctx->ac, src[0]);
break;
case nir_op_u2f32:
result = vir_UTOF(c, src[0]);
break;
- case nir_op_b2f:
+ case nir_op_b2f32:
result = vir_AND(c, src[0], vir_uniform_f(c, 1.0));
break;
- case nir_op_b2i:
+ case nir_op_b2i32:
result = vir_AND(c, src[0], vir_uniform_ui(c, 1));
break;
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
vir_PF(c, src[0], V3D_QPU_PF_PUSHZ);
result = vir_MOV(c, vir_SEL(c, V3D_QPU_COND_IFNA,
vir_uniform_ui(c, ~0),
result = supports_ints ? nir_u2f32(&b, srcs[0]) : nir_fmov(&b, srcs[0]);
break;
case ir_unop_b2f:
- result = supports_ints ? nir_b2f(&b, srcs[0]) : nir_fmov(&b, srcs[0]);
+ result = supports_ints ? nir_b2f32(&b, srcs[0]) : nir_fmov(&b, srcs[0]);
break;
case ir_unop_f2i:
case ir_unop_f2u:
case nir_op_uge:
case nir_op_ieq:
case nir_op_ine:
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
case nir_op_inot:
case nir_op_fnot:
return true;
'f2i' : 'int',
'u2u' : 'uint',
'i2i' : 'int',
+ 'b2f' : 'float',
+ 'b2i' : 'int',
+ 'i2b' : 'bool',
+ 'f2b' : 'bool',
}
if sys.version_info < (3, 0):
#include "nir_builder_opcodes.h"
+static inline nir_ssa_def *
+nir_f2b(nir_builder *build, nir_ssa_def *f)
+{
+ return nir_f2b32(build, f);
+}
+
+static inline nir_ssa_def *
+nir_i2b(nir_builder *build, nir_ssa_def *i)
+{
+ return nir_i2b32(build, i);
+}
+
static inline nir_ssa_def *
nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
unsigned interp_mode)
r = nir_isub(bld, a, r);
r = nir_uge(bld, r, b);
- r = nir_b2i(bld, r);
+ r = nir_b2i32(bld, r);
q = nir_iadd(bld, q, r);
if (is_signed) {
nir_ssa_def *is_non_zero = nir_i2b(b, nir_ior(b, x_lo, x_hi));
nir_ssa_def *res_hi = nir_ishr(b, x_hi, nir_imm_int(b, 31));
- nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i(b, is_non_zero));
+ nir_ssa_def *res_lo = nir_ior(b, res_hi, nir_b2i32(b, is_non_zero));
return nir_pack_64_2x32_split(b, res_lo, res_hi);
}
# helper variables for strings
tfloat = "float"
tint = "int"
+tbool = "bool"
tbool32 = "bool32"
tuint = "uint"
tuint16 = "uint16"
def type_sizes(type_):
if type_has_size(type_):
return [type_size(type_)]
+ elif type_ == 'bool':
+ return [32]
elif type_ == 'float':
return [16, 32, 64]
else:
unop("flog2", tfloat, "log2f(src0)")
# Generate all of the numeric conversion opcodes
-for src_t in [tint, tuint, tfloat]:
- if src_t in (tint, tuint):
- dst_types = [tfloat, src_t]
+for src_t in [tint, tuint, tfloat, tbool]:
+ if src_t == tbool:
+ dst_types = [tfloat, tint]
+ elif src_t == tint:
+ dst_types = [tfloat, tint, tbool]
+ elif src_t == tuint:
+ dst_types = [tfloat, tuint]
elif src_t == tfloat:
- dst_types = [tint, tuint, tfloat]
+ dst_types = [tint, tuint, tfloat, tbool]
for dst_t in dst_types:
for bit_size in type_sizes(dst_t):
bit_size, rnd_mode),
dst_t + str(bit_size), src_t, "src0")
else:
+ conv_expr = "src0 != 0" if dst_t == tbool else "src0"
unop_convert("{0}2{1}{2}".format(src_t[0], dst_t[0], bit_size),
- dst_t + str(bit_size), src_t, "src0")
-
-# We'll hand-code the to/from bool conversion opcodes. Because bool doesn't
-# have multiple bit-sizes, we can always infer the size from the other type.
-unop_convert("f2b", tbool32, tfloat, "src0 != 0.0")
-unop_convert("i2b", tbool32, tint, "src0 != 0")
-unop_convert("b2f", tfloat, tbool32, "src0 ? 1.0 : 0.0")
-unop_convert("b2i", tint, tbool32, "src0 ? 1 : 0")
+ dst_t + str(bit_size), src_t, conv_expr)
# Unary floating-point rounding operations.
if (src == dst && src_base == nir_type_float) {
return nir_op_fmov;
+ } else if (src == dst && src_base == nir_type_bool) {
+ return nir_op_imov;
} else if ((src_base == nir_type_int || src_base == nir_type_uint) &&
(dst_base == nir_type_int || dst_base == nir_type_uint) &&
src_bit_size == dst_bit_size) {
}
switch (src_base) {
-% for src_t in ['int', 'uint', 'float']:
+% for src_t in ['int', 'uint', 'float', 'bool']:
case nir_type_${src_t}:
switch (dst_base) {
-% for dst_t in ['int', 'uint', 'float']:
+% for dst_t in ['int', 'uint', 'float', 'bool']:
case nir_type_${dst_t}:
% if src_t in ['int', 'uint'] and dst_t in ['int', 'uint']:
% if dst_t == 'int':
% else:
<% dst_t = src_t %>
% endif
+% elif src_t == 'bool' and dst_t in ['int', 'uint', 'bool']:
+% if dst_t == 'int':
+<% continue %>
+% else:
+<% dst_t = 'int' %>
+% endif
+% elif src_t == 'uint' and dst_t == 'bool':
+<% src_t = 'int' %>
% endif
switch (dst_bit_size) {
% for dst_bits in type_sizes(dst_t):
unreachable("Invalid nir alu bit size");
}
% endfor
- case nir_type_bool:
-% if src_t == 'float':
- return nir_op_f2b;
-% else:
- return nir_op_i2b;
-% endif
default:
unreachable("Invalid nir alu base type");
}
% endfor
- case nir_type_bool:
- switch (dst_base) {
- case nir_type_int:
- case nir_type_uint:
- return nir_op_b2i;
- case nir_type_float:
- return nir_op_b2f;
- default:
- unreachable("Invalid nir alu base type");
- }
default:
unreachable("Invalid nir alu base type");
}
(('fsat', ('fadd', ('b2f', 'a@32'), ('b2f', 'b@32'))), ('b2f', ('ior', a, b))),
(('iand', 'a@bool', 1.0), ('b2f', a), '!options->lower_b2f'),
# True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
- (('ineg', ('b2i@32', 'a@32')), a),
+ (('ineg', ('b2i32', 'a@32')), a),
(('flt', ('fneg', ('b2f', 'a@32')), 0), a), # Generated by TGSI KILL_IF.
(('flt', ('fsub', 0.0, ('b2f', 'a@32')), 0), a), # Generated by TGSI KILL_IF.
# Comparison with the same args. Note that these are not done for
(('fcsel', a, b, b), b),
# Conversions
- (('i2b', ('b2i', 'a@32')), a),
- (('i2b', 'a@bool'), a),
+ (('i2b32', ('b2i', 'a@32')), a),
+ (('i2b32', 'a@bool'), a),
(('f2i', ('ftrunc', a)), ('f2i', a)),
(('f2u', ('ftrunc', a)), ('f2u', a)),
(('i2b', ('ineg', a)), ('i2b', a)),
(('i2b', ('iabs', a)), ('i2b', a)),
(('fabs', ('b2f', a)), ('b2f', a)),
(('iabs', ('b2i', a)), ('b2i', a)),
- (('inot', ('f2b', a)), ('feq', a, 0.0)),
+ (('inot', ('f2b32', a)), ('feq', a, 0.0)),
# Ironically, mark these as imprecise because removing the conversions may
# preserve more precision than doing the conversions (e.g.,
('ior', (invert[left], a, b), (invert[right], c, d))))
# Optimize x2yN(b2x(x)) -> b2y
-optimizations.append((('f2b', ('b2f', 'a@32')), a))
-optimizations.append((('i2b', ('b2i', 'a@32')), a))
+optimizations.append((('f2b32', ('b2f', 'a@32')), a))
+optimizations.append((('i2b32', ('b2i', 'a@32')), a))
for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
if x != 'f' and y != 'f' and x != y:
continue
(('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
# Lowered for backends without a dedicated b2f instruction
- (('b2f@32', 'a@32'), ('iand', a, 1.0), 'options->lower_b2f'),
+ (('b2f32', 'a@32'), ('iand', a, 1.0), 'options->lower_b2f'),
]
print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
case nir_op_ior:
case nir_op_iand:
case nir_op_inot:
- case nir_op_b2i:
+ case nir_op_b2i32:
return true;
case nir_op_bcsel:
return src == &alu->src[0].src;
nop == nir_op_##op##32 || \
nop == nir_op_##op##64;
+#define MATCH_BCONV_CASE(op) \
+ case nir_search_op_##op: \
+ return nop == nir_op_##op##32;
+
switch (sop) {
MATCH_FCONV_CASE(i2f)
MATCH_FCONV_CASE(u2f)
MATCH_ICONV_CASE(f2i)
MATCH_ICONV_CASE(u2u)
MATCH_ICONV_CASE(i2i)
+ MATCH_FCONV_CASE(b2f)
+ MATCH_ICONV_CASE(b2i)
+ MATCH_BCONV_CASE(i2b)
+ MATCH_BCONV_CASE(f2b)
default:
unreachable("Invalid nir_search_op");
}
default: unreachable("Invalid bit size"); \
}
+#define RET_BCONV_CASE(op) \
+ case nir_search_op_##op: \
+ switch (bit_size) { \
+ case 32: return nir_op_##op##32; \
+ default: unreachable("Invalid bit size"); \
+ }
+
switch (sop) {
RET_FCONV_CASE(i2f)
RET_FCONV_CASE(u2f)
RET_ICONV_CASE(f2i)
RET_ICONV_CASE(u2u)
RET_ICONV_CASE(i2i)
+ RET_FCONV_CASE(b2f)
+ RET_ICONV_CASE(b2i)
+ RET_BCONV_CASE(i2b)
+ RET_BCONV_CASE(f2b)
default:
unreachable("Invalid nir_search_op");
}
nir_search_op_f2i,
nir_search_op_u2u,
nir_search_op_i2i,
+ nir_search_op_b2f,
+ nir_search_op_b2i,
+ nir_search_op_i2b,
+ nir_search_op_f2b,
};
typedef struct {
/* range-reduction fixup */
tmp = nir_fadd(b, tmp,
nir_fmul(b,
- nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
+ nir_b2f32(b, nir_flt(b, one, abs_y_over_x)),
nir_fadd(b, nir_fmul(b, tmp,
nir_imm_float(b, -2.0f)),
nir_imm_float(b, M_PI_2f))));
/* Calculate the arctangent and fix up the result if we had flipped the
* coordinate system.
*/
- nir_ssa_def *arc = nir_fadd(b, nir_fmul(b, nir_b2f(b, flip),
+ nir_ssa_def *arc = nir_fadd(b, nir_fmul(b, nir_b2f32(b, flip),
nir_imm_float(b, M_PI_2f)),
build_atan(b, tan));
case nir_op_u2u8:
dst[0] = create_cov(ctx, src[0], bs[0], alu->op);
break;
- case nir_op_f2b:
+ case nir_op_f2b32:
dst[0] = ir3_CMPS_F(b, src[0], 0, create_immed(b, fui(0.0)), 0);
dst[0]->cat2.condition = IR3_COND_NE;
dst[0] = ir3_n2b(b, dst[0]);
break;
- case nir_op_b2f:
+ case nir_op_b2f16:
+ case nir_op_b2f32:
dst[0] = ir3_COV(b, ir3_b2n(b, src[0]), TYPE_U32, TYPE_F32);
break;
- case nir_op_b2i:
+ case nir_op_b2i8:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
dst[0] = ir3_b2n(b, src[0]);
break;
- case nir_op_i2b:
+ case nir_op_i2b32:
dst[0] = ir3_CMPS_S(b, src[0], 0, create_immed(b, 0), 0);
dst[0]->cat2.condition = IR3_COND_NE;
dst[0] = ir3_n2b(b, dst[0]);
case nir_op_u2f32:
result = qir_ITOF(c, src[0]);
break;
- case nir_op_b2f:
+ case nir_op_b2f32:
result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
break;
- case nir_op_b2i:
+ case nir_op_b2i32:
result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
break;
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
qir_SF(c, src[0]);
result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
qir_uniform_ui(c, ~0),
inst->saturate = instr->dest.saturate;
break;
- case nir_op_b2i:
- case nir_op_b2f:
+ case nir_op_b2i8:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
+ case nir_op_b2i64:
+ case nir_op_b2f16:
+ case nir_op_b2f32:
+ case nir_op_b2f64:
op[0].type = BRW_REGISTER_TYPE_D;
op[0].negate = !op[0].negate;
/* fallthrough */
inst->saturate = instr->dest.saturate;
break;
- case nir_op_i2b:
- case nir_op_f2b: {
+ case nir_op_i2b32:
+ case nir_op_f2b32: {
uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
if (bit_size == 64) {
/* two-argument instructions can't take 64-bit immediates */
fs_reg zero;
fs_reg tmp;
- if (instr->op == nir_op_f2b) {
+ if (instr->op == nir_op_f2b32) {
zero = vgrf(glsl_type::double_type);
tmp = vgrf(glsl_type::double_type);
bld.MOV(zero, setup_imm_df(bld, 0.0));
} else {
fs_reg zero;
if (bit_size == 32) {
- zero = instr->op == nir_op_f2b ? brw_imm_f(0.0f) : brw_imm_d(0);
+ zero = instr->op == nir_op_f2b32 ? brw_imm_f(0.0f) : brw_imm_d(0);
} else {
assert(bit_size == 16);
- zero = instr->op == nir_op_f2b ?
+ zero = instr->op == nir_op_f2b32 ?
retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0);
}
bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ);
emit(AND(dst, op[0], op[1]));
break;
- case nir_op_b2i:
- case nir_op_b2f:
+ case nir_op_b2i32:
+ case nir_op_b2f32:
+ case nir_op_b2f64:
if (nir_dest_bit_size(instr->dest.dest) > 32) {
assert(dst.type == BRW_REGISTER_TYPE_DF);
emit_conversion_to_double(dst, negate(op[0]), false);
}
break;
- case nir_op_f2b:
+ case nir_op_f2b32:
if (nir_src_bit_size(instr->src[0].src) == 64) {
/* We use a MOV with conditional_mod to check if the provided value is
* 0.0. We want this to flush denormalized numbers to zero, so we set a
}
break;
- case nir_op_i2b:
+ case nir_op_i2b32:
emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
break;
ptn_slt(nir_builder *b, nir_alu_dest dest, nir_ssa_def **src)
{
if (b->shader->options->native_integers) {
- ptn_move_dest(b, dest, nir_b2f(b, nir_flt(b, src[0], src[1])));
+ ptn_move_dest(b, dest, nir_b2f32(b, nir_flt(b, src[0], src[1])));
} else {
ptn_move_dest(b, dest, nir_slt(b, src[0], src[1]));
}
ptn_sge(nir_builder *b, nir_alu_dest dest, nir_ssa_def **src)
{
if (b->shader->options->native_integers) {
- ptn_move_dest(b, dest, nir_b2f(b, nir_fge(b, src[0], src[1])));
+ ptn_move_dest(b, dest, nir_b2f32(b, nir_fge(b, src[0], src[1])));
} else {
ptn_move_dest(b, dest, nir_sge(b, src[0], src[1]));
}