From: Karol Herbst Date: Tue, 18 Aug 2020 17:51:57 +0000 (+0200) Subject: nir: rename nir_op_fne to nir_op_fneu X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=e5899c1e8818f7cfdd23c06c504009e5659794b7;p=mesa.git nir: rename nir_op_fne to nir_op_fneu It was always fneu but naming it fne causes confusion from time to time. So lets rename it. Later we also want to add other unordered and fne, this is a smaller preparation for that. Signed-off-by: Karol Herbst Reviewed-by: Bas Nieuwenhuizen Reviewed-by: Ian Romanick Reviewed-by: Jason Ekstrand Reviewed-by: Connor Abbott Part-of: --- diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp index 5bd389a5be4..42b899e4c70 100644 --- a/src/amd/compiler/aco_instruction_selection.cpp +++ b/src/amd/compiler/aco_instruction_selection.cpp @@ -2918,7 +2918,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64); break; } - case nir_op_fne: { + case nir_op_fneu: { emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64); break; } diff --git a/src/amd/llvm/ac_nir_to_llvm.c b/src/amd/llvm/ac_nir_to_llvm.c index 7643326fde5..37a483e3ba6 100644 --- a/src/amd/llvm/ac_nir_to_llvm.c +++ b/src/amd/llvm/ac_nir_to_llvm.c @@ -771,7 +771,7 @@ static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr) case nir_op_feq32: result = emit_float_cmp(&ctx->ac, LLVMRealOEQ, src[0], src[1]); break; - case nir_op_fne32: + case nir_op_fneu32: result = emit_float_cmp(&ctx->ac, LLVMRealUNE, src[0], src[1]); break; case nir_op_flt32: diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c index 689414551e9..e580e75ef3c 100644 --- a/src/broadcom/compiler/nir_to_vir.c +++ b/src/broadcom/compiler/nir_to_vir.c @@ -800,7 +800,7 @@ ntq_emit_comparison(struct v3d_compile *c, vir_set_pf(vir_XOR_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ); break; - case nir_op_fne32: + case nir_op_fneu32: case nir_op_sne: vir_set_pf(vir_FCMP_dest(c, nop, src0, src1), V3D_QPU_PF_PUSHZ); cond_invert = true; @@ -1044,7 +1044,7 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr) case nir_op_i2b32: case nir_op_f2b32: case nir_op_feq32: - case nir_op_fne32: + case nir_op_fneu32: case nir_op_fge32: case nir_op_flt32: case nir_op_ieq32: diff --git a/src/compiler/glsl/float64.glsl b/src/compiler/glsl/float64.glsl index 185f6dc9f4c..5509b8946df 100644 --- a/src/compiler/glsl/float64.glsl +++ b/src/compiler/glsl/float64.glsl @@ -155,7 +155,7 @@ __feq64(uint64_t a, uint64_t b) * performed according to the IEEE Standard for Floating-Point Arithmetic. */ bool -__fne64(uint64_t a, uint64_t b) +__fneu64(uint64_t a, uint64_t b) { if (__is_nan(a) || __is_nan(b)) return true; @@ -206,16 +206,16 @@ __flt64_nonnan(uint64_t __a, uint64_t __b) * * This is equivalent to * - * fne(a, b) && (both_negative(a, b) ? a >= b : a < b) + * fneu(a, b) && (both_negative(a, b) ? a >= b : a < b) * - * fne(a, b) && (both_negative(a, b) ? !(a < b) : a < b) + * fneu(a, b) && (both_negative(a, b) ? !(a < b) : a < b) * - * fne(a, b) && ((both_negative(a, b) && !(a < b)) || + * fneu(a, b) && ((both_negative(a, b) && !(a < b)) || * (!both_negative(a, b) && (a < b))) * * (A!|B)&(A|!B) is (A xor B) which is implemented here using !=. * - * fne(a, b) && (both_negative(a, b) != (a < b)) + * fneu(a, b) && (both_negative(a, b) != (a < b)) */ bool lt = ilt64(a.y, a.x, b.y, b.x); bool both_negative = (a.y & b.y & 0x80000000u) != 0; diff --git a/src/compiler/glsl/glsl_to_nir.cpp b/src/compiler/glsl/glsl_to_nir.cpp index 6e8cfbecd0e..9665a6c0ccb 100644 --- a/src/compiler/glsl/glsl_to_nir.cpp +++ b/src/compiler/glsl/glsl_to_nir.cpp @@ -2225,7 +2225,7 @@ nir_visitor::visit(ir_expression *ir) break; case ir_binop_nequal: if (type_is_float(types[0])) - result = nir_fne(&b, srcs[0], srcs[1]); + result = nir_fneu(&b, srcs[0], srcs[1]); else result = nir_ine(&b, srcs[0], srcs[1]); break; @@ -2253,7 +2253,7 @@ nir_visitor::visit(ir_expression *ir) case ir_binop_any_nequal: if (type_is_float(types[0])) { switch (ir->operands[0]->type->vector_elements) { - case 1: result = nir_fne(&b, srcs[0], srcs[1]); break; + case 1: result = nir_fneu(&b, srcs[0], srcs[1]); break; case 2: result = nir_bany_fnequal2(&b, srcs[0], srcs[1]); break; case 3: result = nir_bany_fnequal3(&b, srcs[0], srcs[1]); break; case 4: result = nir_bany_fnequal4(&b, srcs[0], srcs[1]); break; diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index a3189f5d3b3..25176a95e57 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1382,7 +1382,7 @@ nir_alu_instr_is_comparison(const nir_alu_instr *instr) case nir_op_flt: case nir_op_fge: case nir_op_feq: - case nir_op_fne: + case nir_op_fneu: case nir_op_ilt: case nir_op_ult: case nir_op_ige: @@ -3024,7 +3024,7 @@ typedef struct nir_shader_compiler_options { /** lowers fsub and isub to fadd+fneg and iadd+ineg. */ bool lower_sub; - /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */ + /* lower {slt,sge,seq,sne} to {flt,fge,feq,fneu} + b2f: */ bool lower_scmp; /* lower fall_equalN/fany_nequalN (ex:fany_nequal4 to sne+fdot4+fsat) */ diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h index bcaeae382b5..31801adbc3c 100644 --- a/src/compiler/nir/nir_builder.h +++ b/src/compiler/nir/nir_builder.h @@ -1400,7 +1400,7 @@ nir_compare_func(nir_builder *b, enum compare_func func, case COMPARE_FUNC_EQUAL: return nir_feq(b, src0, src1); case COMPARE_FUNC_NOTEQUAL: - return nir_fne(b, src0, src1); + return nir_fneu(b, src0, src1); case COMPARE_FUNC_GREATER: return nir_flt(b, src1, src0); case COMPARE_FUNC_GEQUAL: diff --git a/src/compiler/nir/nir_builtin_builder.h b/src/compiler/nir/nir_builtin_builder.h index ef668d2f64a..a7f8fab2736 100644 --- a/src/compiler/nir/nir_builtin_builder.h +++ b/src/compiler/nir/nir_builtin_builder.h @@ -58,7 +58,7 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex); static inline nir_ssa_def * nir_nan_check2(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *res) { - return nir_bcsel(b, nir_fne(b, x, x), x, nir_bcsel(b, nir_fne(b, y, y), y, res)); + return nir_bcsel(b, nir_fneu(b, x, x), x, nir_bcsel(b, nir_fneu(b, y, y), y, res)); } static inline nir_ssa_def * diff --git a/src/compiler/nir/nir_loop_analyze.c b/src/compiler/nir/nir_loop_analyze.c index c326158eda9..fa930a71c70 100644 --- a/src/compiler/nir/nir_loop_analyze.c +++ b/src/compiler/nir/nir_loop_analyze.c @@ -634,7 +634,7 @@ get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step, case nir_op_fge: case nir_op_flt: case nir_op_feq: - case nir_op_fne: + case nir_op_fneu: span = eval_const_binop(nir_op_fsub, bit_size, limit, initial, execution_mode); iter = eval_const_binop(nir_op_fdiv, bit_size, span, @@ -845,10 +845,10 @@ inverse_comparison(nir_op alu_op) case nir_op_ult: return nir_op_uge; case nir_op_feq: - return nir_op_fne; + return nir_op_fneu; case nir_op_ieq: return nir_op_ine; - case nir_op_fne: + case nir_op_fneu: return nir_op_feq; case nir_op_ine: return nir_op_ieq; diff --git a/src/compiler/nir/nir_lower_alu_to_scalar.c b/src/compiler/nir/nir_lower_alu_to_scalar.c index 138318fbf60..f56eb76b02d 100644 --- a/src/compiler/nir/nir_lower_alu_to_scalar.c +++ b/src/compiler/nir/nir_lower_alu_to_scalar.c @@ -217,19 +217,19 @@ lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_data) LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd); LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand); LOWER_REDUCTION(nir_op_ball_iequal, nir_op_ieq, nir_op_iand); - LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fne, nir_op_ior); + LOWER_REDUCTION(nir_op_bany_fnequal, nir_op_fneu, nir_op_ior); LOWER_REDUCTION(nir_op_bany_inequal, nir_op_ine, nir_op_ior); LOWER_REDUCTION(nir_op_b8all_fequal, nir_op_feq8, nir_op_iand); LOWER_REDUCTION(nir_op_b8all_iequal, nir_op_ieq8, nir_op_iand); - LOWER_REDUCTION(nir_op_b8any_fnequal, nir_op_fne8, nir_op_ior); + LOWER_REDUCTION(nir_op_b8any_fnequal, nir_op_fneu8, nir_op_ior); LOWER_REDUCTION(nir_op_b8any_inequal, nir_op_ine8, nir_op_ior); LOWER_REDUCTION(nir_op_b16all_fequal, nir_op_feq16, nir_op_iand); LOWER_REDUCTION(nir_op_b16all_iequal, nir_op_ieq16, nir_op_iand); - LOWER_REDUCTION(nir_op_b16any_fnequal, nir_op_fne16, nir_op_ior); + LOWER_REDUCTION(nir_op_b16any_fnequal, nir_op_fneu16, nir_op_ior); LOWER_REDUCTION(nir_op_b16any_inequal, nir_op_ine16, nir_op_ior); LOWER_REDUCTION(nir_op_b32all_fequal, nir_op_feq32, nir_op_iand); LOWER_REDUCTION(nir_op_b32all_iequal, nir_op_ieq32, nir_op_iand); - LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fne32, nir_op_ior); + LOWER_REDUCTION(nir_op_b32any_fnequal, nir_op_fneu32, nir_op_ior); LOWER_REDUCTION(nir_op_b32any_inequal, nir_op_ine32, nir_op_ior); LOWER_REDUCTION(nir_op_fall_equal, nir_op_seq, nir_op_fmin); LOWER_REDUCTION(nir_op_fany_nequal, nir_op_sne, nir_op_fmax); diff --git a/src/compiler/nir/nir_lower_bool_to_bitsize.c b/src/compiler/nir/nir_lower_bool_to_bitsize.c index ef9aa81f170..e7414fbf3d9 100644 --- a/src/compiler/nir/nir_lower_bool_to_bitsize.c +++ b/src/compiler/nir/nir_lower_bool_to_bitsize.c @@ -196,9 +196,9 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu) bit_size == 16 ? nir_op_feq16 : nir_op_feq32; break; - case nir_op_fne: - opcode = bit_size == 8 ? nir_op_fne8 : - bit_size == 16 ? nir_op_fne16 : nir_op_fne32; + case nir_op_fneu: + opcode = bit_size == 8 ? nir_op_fneu8 : + bit_size == 16 ? nir_op_fneu16 : nir_op_fneu32; break; case nir_op_ilt: diff --git a/src/compiler/nir/nir_lower_bool_to_float.c b/src/compiler/nir/nir_lower_bool_to_float.c index a94e4371375..0dd69c958f9 100644 --- a/src/compiler/nir/nir_lower_bool_to_float.c +++ b/src/compiler/nir/nir_lower_bool_to_float.c @@ -73,7 +73,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu) case nir_op_flt: alu->op = nir_op_slt; break; case nir_op_fge: alu->op = nir_op_sge; break; case nir_op_feq: alu->op = nir_op_seq; break; - case nir_op_fne: alu->op = nir_op_sne; break; + case nir_op_fneu: alu->op = nir_op_sne; break; case nir_op_ilt: alu->op = nir_op_slt; break; case nir_op_ige: alu->op = nir_op_sge; break; case nir_op_ieq: alu->op = nir_op_seq; break; diff --git a/src/compiler/nir/nir_lower_bool_to_int32.c b/src/compiler/nir/nir_lower_bool_to_int32.c index 2bdab5fab83..1ea8d12cb21 100644 --- a/src/compiler/nir/nir_lower_bool_to_int32.c +++ b/src/compiler/nir/nir_lower_bool_to_int32.c @@ -77,7 +77,7 @@ lower_alu_instr(nir_alu_instr *alu) case nir_op_flt: alu->op = nir_op_flt32; break; case nir_op_fge: alu->op = nir_op_fge32; break; case nir_op_feq: alu->op = nir_op_feq32; break; - case nir_op_fne: alu->op = nir_op_fne32; break; + case nir_op_fneu: alu->op = nir_op_fneu32; break; case nir_op_ilt: alu->op = nir_op_ilt32; break; case nir_op_ige: alu->op = nir_op_ige32; break; case nir_op_ieq: alu->op = nir_op_ieq32; break; diff --git a/src/compiler/nir/nir_lower_double_ops.c b/src/compiler/nir/nir_lower_double_ops.c index a6a9b958426..4910e1b8958 100644 --- a/src/compiler/nir/nir_lower_double_ops.c +++ b/src/compiler/nir/nir_lower_double_ops.c @@ -104,7 +104,7 @@ fix_inv_result(nir_builder *b, nir_ssa_def *res, nir_ssa_def *src, nir_imm_double(b, 0.0f), res); /* If the original input was 0, generate the correctly-signed infinity */ - res = nir_bcsel(b, nir_fne(b, src, nir_imm_double(b, 0.0f)), + res = nir_bcsel(b, nir_fneu(b, src, nir_imm_double(b, 0.0f)), res, get_signed_inf(b, src)); return res; @@ -541,8 +541,8 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr, name = "__feq64"; return_type = glsl_bool_type(); break; - case nir_op_fne: - name = "__fne64"; + case nir_op_fneu: + name = "__fneu64"; return_type = glsl_bool_type(); break; case nir_op_flt: diff --git a/src/compiler/nir/nir_lower_frexp.c b/src/compiler/nir/nir_lower_frexp.c index 3b956615c34..ce6c811a965 100644 --- a/src/compiler/nir/nir_lower_frexp.c +++ b/src/compiler/nir/nir_lower_frexp.c @@ -35,7 +35,7 @@ lower_frexp_sig(nir_builder *b, nir_ssa_def *x) nir_ssa_def *abs_x = nir_fabs(b, x); nir_ssa_def *zero = nir_imm_floatN_t(b, 0, x->bit_size); nir_ssa_def *sign_mantissa_mask, *exponent_value; - nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero); + nir_ssa_def *is_not_zero = nir_fneu(b, abs_x, zero); switch (x->bit_size) { case 16: @@ -109,7 +109,7 @@ lower_frexp_exp(nir_builder *b, nir_ssa_def *x) { nir_ssa_def *abs_x = nir_fabs(b, x); nir_ssa_def *zero = nir_imm_floatN_t(b, 0, x->bit_size); - nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero); + nir_ssa_def *is_not_zero = nir_fneu(b, abs_x, zero); nir_ssa_def *exponent; switch (x->bit_size) { diff --git a/src/compiler/nir/nir_lower_int_to_float.c b/src/compiler/nir/nir_lower_int_to_float.c index e526108d904..0b35dc4f3f7 100644 --- a/src/compiler/nir/nir_lower_int_to_float.c +++ b/src/compiler/nir/nir_lower_int_to_float.c @@ -70,7 +70,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu) case nir_op_ilt: alu->op = nir_op_flt; break; case nir_op_ige: alu->op = nir_op_fge; break; case nir_op_ieq: alu->op = nir_op_feq; break; - case nir_op_ine: alu->op = nir_op_fne; break; + case nir_op_ine: alu->op = nir_op_fneu; break; case nir_op_ult: alu->op = nir_op_flt; break; case nir_op_uge: alu->op = nir_op_fge; break; diff --git a/src/compiler/nir/nir_opcodes.py b/src/compiler/nir/nir_opcodes.py index e5655241415..87b5e4efac1 100644 --- a/src/compiler/nir/nir_opcodes.py +++ b/src/compiler/nir/nir_opcodes.py @@ -747,7 +747,7 @@ binop("frem", tfloat, "", "src0 - src1 * truncf(src0 / src1)") binop_compare_all_sizes("flt", tfloat, "", "src0 < src1") binop_compare_all_sizes("fge", tfloat, "", "src0 >= src1") binop_compare_all_sizes("feq", tfloat, _2src_commutative, "src0 == src1") -binop_compare_all_sizes("fne", tfloat, _2src_commutative, "src0 != src1") +binop_compare_all_sizes("fneu", tfloat, _2src_commutative, "src0 != src1") binop_compare_all_sizes("ilt", tint, "", "src0 < src1") binop_compare_all_sizes("ige", tint, "", "src0 >= src1") binop_compare_all_sizes("ieq", tint, _2src_commutative, "src0 == src1") diff --git a/src/compiler/nir/nir_opt_algebraic.py b/src/compiler/nir/nir_opt_algebraic.py index 14e7f30d976..65cff5c04eb 100644 --- a/src/compiler/nir/nir_opt_algebraic.py +++ b/src/compiler/nir/nir_opt_algebraic.py @@ -300,8 +300,8 @@ optimizations.extend([ # Comparison simplifications (('~inot', ('flt', a, b)), ('fge', a, b)), (('~inot', ('fge', a, b)), ('flt', a, b)), - (('inot', ('feq', a, b)), ('fne', a, b)), - (('inot', ('fne', a, b)), ('feq', a, b)), + (('inot', ('feq', a, b)), ('fneu', a, b)), + (('inot', ('fneu', a, b)), ('feq', a, b)), (('inot', ('ilt', a, b)), ('ige', a, b)), (('inot', ('ult', a, b)), ('uge', a, b)), (('inot', ('ige', a, b)), ('ilt', a, b)), @@ -309,7 +309,7 @@ optimizations.extend([ (('inot', ('ieq', a, b)), ('ine', a, b)), (('inot', ('ine', a, b)), ('ieq', a, b)), - (('iand', ('feq', a, b), ('fne', a, b)), False), + (('iand', ('feq', a, b), ('fneu', a, b)), False), (('iand', ('flt', a, b), ('flt', b, a)), False), (('iand', ('ieq', a, b), ('ine', a, b)), False), (('iand', ('ilt', a, b), ('ilt', b, a)), False), @@ -321,12 +321,12 @@ optimizations.extend([ (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)), (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)), (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)), - (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)), + (('fneu', ('fneg', a), ('fneg', b)), ('fneu', b, a)), (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)), (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)), (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)), (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)), - (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)), + (('fneu', ('fneg', a), -1.0), ('fneu', 1.0, a)), (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)), (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)), @@ -334,7 +334,7 @@ optimizations.extend([ (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)), (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)), (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)), - (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)), + (('fneu', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fneu', a, b)), (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)), (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)), @@ -349,16 +349,16 @@ optimizations.extend([ (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)), - (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)), - (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)), - (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)), - (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)), - (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)), - (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)), - (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)), - (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)), - (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)), - (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)), + (('fneu', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)), + (('fneu', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)), + (('fneu', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)), + (('fneu', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)), + (('fneu', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)), + (('fneu', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)), + (('fneu', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)), + (('fneu', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)), + (('fneu', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)), + (('fneu', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)), (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))), (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))), (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))), @@ -397,15 +397,15 @@ optimizations.extend([ (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)), (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)), (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)), - (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)), + (('~fneu', ('fadd', a, b), a), ('fneu', b, 0.0)), (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))), (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)), (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))), (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)), (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))), (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)), - (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))), - (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)), + (('~fneu', ('fadd(is_used_once)', a, '#b'), '#c'), ('fneu', a, ('fadd', c, ('fneg', b)))), + (('~fneu', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fneu', ('fneg', ('fadd', c, b)), a)), # Cannot remove the addition from ilt or ige due to overflow. (('ieq', ('iadd', a, b), a), ('ieq', b, 0)), @@ -425,17 +425,17 @@ optimizations.extend([ (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))), (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)), - (('~fne', ('b2f', 'a@1'), 0.0), a), + (('~fneu', ('b2f', 'a@1'), 0.0), a), (('ieq', ('b2i', 'a@1'), 0), ('inot', a)), (('ine', ('b2i', 'a@1'), 0), a), - (('fne', ('u2f', a), 0.0), ('ine', a, 0)), + (('fneu', ('u2f', a), 0.0), ('ine', a, 0)), (('feq', ('u2f', a), 0.0), ('ieq', a, 0)), (('fge', ('u2f', a), 0.0), True), (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead? (('flt', ('u2f', a), 0.0), False), (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead? - (('fne', ('i2f', a), 0.0), ('ine', a, 0)), + (('fneu', ('i2f', a), 0.0), ('ine', a, 0)), (('feq', ('i2f', a), 0.0), ('ieq', a, 0)), (('fge', ('i2f', a), 0.0), ('ige', a, 0)), (('fge', 0.0, ('i2f', a)), ('ige', 0, a)), @@ -446,11 +446,11 @@ optimizations.extend([ # fabs(a) > 0.0 # fabs(a) != 0.0 because fabs(a) must be >= 0 # a != 0.0 - (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)), + (('~flt', 0.0, ('fabs', a)), ('fneu', a, 0.0)), # -fabs(a) < 0.0 # fabs(a) > 0.0 - (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)), + (('~flt', ('fneg', ('fabs', a)), 0.0), ('fneu', a, 0.0)), # 0.0 >= fabs(a) # 0.0 == fabs(a) because fabs(a) must be >= 0 @@ -469,7 +469,7 @@ optimizations.extend([ # !((a >= 0.0) && (a <= 1.0)) # !(a == fsat(a)) # a != fsat(a) - (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'), + (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fneu', a, ('fsat', a)), '!options->lower_fsat'), (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))), (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))), @@ -726,7 +726,7 @@ optimizations.extend([ (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'), (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'), (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'), - (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'), + (('sne', a, b), ('b2f', ('fneu', a, b)), 'options->lower_scmp'), (('seq', ('seq', a, b), 1.0), ('seq', a, b)), (('seq', ('sne', a, b), 1.0), ('sne', a, b)), (('seq', ('slt', a, b), 1.0), ('slt', a, b)), @@ -749,7 +749,7 @@ optimizations.extend([ (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'), (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'), (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'), - (('fne', ('fneg', a), a), ('fne', a, 0.0)), + (('fneu', ('fneg', a), a), ('fneu', a, 0.0)), (('feq', ('fneg', a), a), ('feq', a, 0.0)), # Emulating booleans (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))), @@ -954,7 +954,7 @@ optimizations.extend([ (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)), (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)), - (('fne', 'a(is_not_zero)', 0.0), True), + (('fneu', 'a(is_not_zero)', 0.0), True), (('feq', 'a(is_not_zero)', 0.0), False), # In this chart, + means value > 0 and - means value < 0. @@ -1463,7 +1463,7 @@ for bit_size in [8, 16, 32, 64]: ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'), ] -invert = OrderedDict([('feq', 'fne'), ('fne', 'feq')]) +invert = OrderedDict([('feq', 'fneu'), ('fneu', 'feq')]) for left, right in itertools.combinations_with_replacement(invert.keys(), 2): optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))), @@ -1556,7 +1556,7 @@ for t in ['int', 'uint', 'float']: aN = 'a@' + str(N) bN = 'b@' + str(N) xeq = 'feq' if t == 'float' else 'ieq' - xne = 'fne' if t == 'float' else 'ine' + xne = 'fneu' if t == 'float' else 'ine' xge = '{0}ge'.format(t[0]) xlt = '{0}lt'.format(t[0]) @@ -1731,7 +1731,7 @@ for op in ['flt', 'fge', 'feq']: # which constant folding will eat for lunch. The resulting ternary will # further get cleaned up by the boolean reductions above and we will be # left with just the original variable "a". -for op in ['flt', 'fge', 'feq', 'fne', +for op in ['flt', 'fge', 'feq', 'fneu', 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']: optimizations += [ ((op, ('bcsel', 'a', '#b', '#c'), '#d'), @@ -1891,7 +1891,7 @@ late_optimizations = [ (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))), (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)), (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))), - (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))), + (('~fneu', ('fadd', a, b), 0.0), ('fneu', a, ('fneg', b))), # nir_lower_to_source_mods will collapse this, but its existence during the # optimization loop can prevent other optimizations. @@ -1913,7 +1913,7 @@ late_optimizations = [ (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)), (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)), (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)), - (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)), + (('fneu', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fneu', a, b)), (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)), (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)), @@ -1923,12 +1923,12 @@ late_optimizations = [ (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)), (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)), (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)), - (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)), + (('fneu', ('fneg', a), ('fneg', b)), ('fneu', b, a)), (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)), (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)), (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)), (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)), - (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)), + (('fneu', ('fneg', a), -1.0), ('fneu', 1.0, a)), (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)), (('ior', a, a), a), diff --git a/src/compiler/nir/nir_opt_comparison_pre.c b/src/compiler/nir/nir_opt_comparison_pre.c index 1bb62f32145..e82e462626c 100644 --- a/src/compiler/nir/nir_opt_comparison_pre.c +++ b/src/compiler/nir/nir_opt_comparison_pre.c @@ -319,7 +319,7 @@ comparison_pre_block(nir_block *block, struct block_queue *bq, nir_builder *bld) case nir_op_flt: case nir_op_fge: - case nir_op_fne: + case nir_op_fneu: case nir_op_feq: /* If the instruction is a comparison that is used by an if-statement * and neither operand is immediate value 0, add it to the set. diff --git a/src/compiler/nir/nir_opt_if.c b/src/compiler/nir/nir_opt_if.c index a7feac1db93..a97126cb0d8 100644 --- a/src/compiler/nir/nir_opt_if.c +++ b/src/compiler/nir/nir_opt_if.c @@ -275,7 +275,7 @@ alu_instr_is_comparison(const nir_alu_instr *alu) case nir_op_flt32: case nir_op_fge32: case nir_op_feq32: - case nir_op_fne32: + case nir_op_fneu32: case nir_op_ilt32: case nir_op_ult32: case nir_op_ige32: diff --git a/src/compiler/nir/nir_opt_rematerialize_compares.c b/src/compiler/nir/nir_opt_rematerialize_compares.c index 9647e361598..b87dad18570 100644 --- a/src/compiler/nir/nir_opt_rematerialize_compares.c +++ b/src/compiler/nir/nir_opt_rematerialize_compares.c @@ -37,8 +37,8 @@ is_two_src_comparison(const nir_alu_instr *instr) case nir_op_fge32: case nir_op_feq: case nir_op_feq32: - case nir_op_fne: - case nir_op_fne32: + case nir_op_fneu: + case nir_op_fneu32: case nir_op_ilt: case nir_op_ilt32: case nir_op_ult: diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c index 0793d67a02f..5ef66ad8922 100644 --- a/src/compiler/nir/nir_range_analysis.c +++ b/src/compiler/nir/nir_range_analysis.c @@ -942,7 +942,7 @@ analyze_expression(const nir_alu_instr *instr, unsigned src, case nir_op_flt: case nir_op_fge: case nir_op_feq: - case nir_op_fne: + case nir_op_fneu: case nir_op_ilt: case nir_op_ige: case nir_op_ieq: diff --git a/src/compiler/spirv/vtn_alu.c b/src/compiler/spirv/vtn_alu.c index e92a48f24a6..ada56dcb67c 100644 --- a/src/compiler/spirv/vtn_alu.c +++ b/src/compiler/spirv/vtn_alu.c @@ -282,8 +282,8 @@ vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, case SpvOpFUnordEqual: return nir_op_feq; case SpvOpINotEqual: return nir_op_ine; case SpvOpLessOrGreater: /* Deprecated, use OrdNotEqual */ - case SpvOpFOrdNotEqual: return nir_op_fne; - case SpvOpFUnordNotEqual: return nir_op_fne; + case SpvOpFOrdNotEqual: return nir_op_fneu; + case SpvOpFUnordNotEqual: return nir_op_fneu; case SpvOpULessThan: return nir_op_ult; case SpvOpSLessThan: return nir_op_ilt; case SpvOpFOrdLessThan: return nir_op_flt; @@ -513,7 +513,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, break; case SpvOpIsNan: - dest->def = nir_fne(&b->nb, src[0], src[0]); + dest->def = nir_fneu(&b->nb, src[0], src[0]); break; case SpvOpOrdered: @@ -522,8 +522,8 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, break; case SpvOpUnordered: - dest->def = nir_ior(&b->nb, nir_fne(&b->nb, src[0], src[0]), - nir_fne(&b->nb, src[1], src[1])); + dest->def = nir_ior(&b->nb, nir_fneu(&b->nb, src[0], src[0]), + nir_fneu(&b->nb, src[1], src[1])); break; case SpvOpIsInf: { @@ -554,8 +554,8 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, nir_ior(&b->nb, nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL), nir_ior(&b->nb, - nir_fne(&b->nb, src[0], src[0]), - nir_fne(&b->nb, src[1], src[1]))); + nir_fneu(&b->nb, src[0], src[0]), + nir_fneu(&b->nb, src[1], src[1]))); break; } diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index a0f1a36545a..04a2dd9cea4 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -503,7 +503,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu) dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_EQ; break; - case nir_op_fne: + case nir_op_fneu: dst[0] = ir3_CMPS_F(b, src[0], 0, src[1], 0); dst[0]->cat2.condition = IR3_COND_NE; break; diff --git a/src/gallium/auxiliary/gallivm/lp_bld_nir.c b/src/gallium/auxiliary/gallivm/lp_bld_nir.c index fe5068b1db9..f407f6204c0 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_nir.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_nir.c @@ -576,7 +576,7 @@ static LLVMValueRef do_alu_action(struct lp_build_nir_context *bld_base, case nir_op_fmax: result = lp_build_max(get_flt_bld(bld_base, src_bit_size[0]), src[0], src[1]); break; - case nir_op_fne32: + case nir_op_fneu32: result = fcmp32(bld_base, PIPE_FUNC_NOTEQUAL, src_bit_size[0], src); break; case nir_op_fneg: diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c index fb6bbb5026c..4d5d22f6b74 100644 --- a/src/gallium/auxiliary/nir/tgsi_to_nir.c +++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c @@ -1155,7 +1155,7 @@ ttn_if(struct ttn_compile *c, nir_ssa_def *src, bool is_uint) if_stmt->condition = nir_src_for_ssa(nir_ine(b, src_x, nir_imm_int(b, 0))); } else { /* equivalent to TGSI IF, src is interpreted as float */ - if_stmt->condition = nir_src_for_ssa(nir_fne(b, src_x, nir_imm_float(b, 0.0))); + if_stmt->condition = nir_src_for_ssa(nir_fneu(b, src_x, nir_imm_float(b, 0.0))); } nir_builder_cf_insert(b, &if_stmt->cf_node); @@ -1966,7 +1966,7 @@ static const nir_op op_trans[TGSI_OPCODE_LAST] = { [TGSI_OPCODE_FSEQ] = nir_op_feq, [TGSI_OPCODE_FSGE] = nir_op_fge, [TGSI_OPCODE_FSLT] = nir_op_flt, - [TGSI_OPCODE_FSNE] = nir_op_fne, + [TGSI_OPCODE_FSNE] = nir_op_fneu, [TGSI_OPCODE_KILL_IF] = 0, diff --git a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_emit.c b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_emit.c index 23eb9c26f9b..c10b9a95b70 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_emit.c +++ b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir_emit.c @@ -101,7 +101,7 @@ static const struct etna_op_info etna_ops[] = { /* compare with int result */ OPC(feq32, CMP, 0_1_X, EQ), - OPC(fne32, CMP, 0_1_X, NE), + OPC(fneu32, CMP, 0_1_X, NE), OPC(fge32, CMP, 0_1_X, GE), OPC(flt32, CMP, 0_1_X, LT), IOPC(ieq32, CMP, 0_1_X, EQ), diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp index 1df1c7753fd..b293e5d60b6 100644 --- a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp +++ b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp @@ -469,7 +469,7 @@ Converter::getOperation(nir_op op) case nir_op_flt32: case nir_op_ilt32: case nir_op_ult32: - case nir_op_fne32: + case nir_op_fneu32: case nir_op_ine32: return OP_SET; case nir_op_ishl: @@ -704,7 +704,7 @@ Converter::getCondCode(nir_op op) case nir_op_ilt32: case nir_op_ult32: return CC_LT; - case nir_op_fne32: + case nir_op_fneu32: return CC_NEU; case nir_op_ine32: return CC_NE; @@ -2574,7 +2574,7 @@ Converter::visit(nir_alu_instr *insn) case nir_op_flt32: case nir_op_ilt32: case nir_op_ult32: - case nir_op_fne32: + case nir_op_fneu32: case nir_op_ine32: { DEFAULT_CHECKS; LValues &newDefs = convert(&insn->dest); diff --git a/src/gallium/drivers/r600/sfn/sfn_emitaluinstruction.cpp b/src/gallium/drivers/r600/sfn/sfn_emitaluinstruction.cpp index e63fa973c17..79cae0c4d6f 100644 --- a/src/gallium/drivers/r600/sfn/sfn_emitaluinstruction.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_emitaluinstruction.cpp @@ -118,7 +118,7 @@ bool EmitAluInstruction::do_emit(nir_instr* ir) case nir_op_flt: return emit_alu_op2(instr, op2_setgt_dx10, op2_opt_reverse); case nir_op_fge: return emit_alu_op2(instr, op2_setge_dx10); - case nir_op_fne: return emit_alu_op2(instr, op2_setne_dx10); + case nir_op_fneu: return emit_alu_op2(instr, op2_setne_dx10); case nir_op_feq: return emit_alu_op2(instr, op2_sete_dx10); case nir_op_fmin: return emit_alu_op2(instr, op2_min_dx10); diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 9c89ef6843f..d2a92518863 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -958,7 +958,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne32: + case nir_op_fneu32: case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; @@ -1213,7 +1213,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) case nir_op_sge: case nir_op_slt: case nir_op_feq32: - case nir_op_fne32: + case nir_op_fneu32: case nir_op_fge32: case nir_op_flt32: case nir_op_ieq32: diff --git a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c index a4106e7439c..a1347f163fd 100644 --- a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c +++ b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c @@ -1318,7 +1318,7 @@ emit_alu(struct ntv_context *ctx, nir_alu_instr *alu) BINOP(nir_op_flt, SpvOpFOrdLessThan) BINOP(nir_op_fge, SpvOpFOrdGreaterThanEqual) BINOP(nir_op_feq, SpvOpFOrdEqual) - BINOP(nir_op_fne, SpvOpFUnordNotEqual) + BINOP(nir_op_fneu, SpvOpFUnordNotEqual) BINOP(nir_op_ishl, SpvOpShiftLeftLogical) BINOP(nir_op_ishr, SpvOpShiftRightArithmetic) BINOP(nir_op_ushr, SpvOpShiftRightLogical) diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp index 243ad37c4e3..b05421746a5 100644 --- a/src/intel/compiler/brw_fs_nir.cpp +++ b/src/intel/compiler/brw_fs_nir.cpp @@ -1385,7 +1385,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr, case nir_op_flt32: case nir_op_fge32: case nir_op_feq32: - case nir_op_fne32: { + case nir_op_fneu32: { fs_reg dest = result; const uint32_t bit_size = nir_src_bit_size(instr->src[0].src); @@ -3435,7 +3435,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld, alu->op != nir_op_bcsel && (devinfo->gen > 5 || (alu->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) != BRW_NIR_BOOLEAN_NEEDS_RESOLVE || - alu->op == nir_op_fne32 || alu->op == nir_op_feq32 || + alu->op == nir_op_fneu32 || alu->op == nir_op_feq32 || alu->op == nir_op_flt32 || alu->op == nir_op_fge32 || alu->op == nir_op_ine32 || alu->op == nir_op_ieq32 || alu->op == nir_op_ilt32 || alu->op == nir_op_ige32 || diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index 510ab0b769c..567bb42c805 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -1187,8 +1187,8 @@ brw_cmod_for_nir_comparison(nir_op op) case nir_op_b32all_iequal4: return BRW_CONDITIONAL_Z; - case nir_op_fne: - case nir_op_fne32: + case nir_op_fneu: + case nir_op_fneu32: case nir_op_ine: case nir_op_ine32: case nir_op_b32any_fnequal2: diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp index 607cfc24d9c..4ba23d15774 100644 --- a/src/intel/compiler/brw_vec4_nir.cpp +++ b/src/intel/compiler/brw_vec4_nir.cpp @@ -1465,7 +1465,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr) case nir_op_flt32: case nir_op_fge32: case nir_op_feq32: - case nir_op_fne32: { + case nir_op_fneu32: { enum brw_conditional_mod conditional_mod = brw_cmod_for_nir_comparison(instr->op); diff --git a/src/panfrost/bifrost/bifrost_compile.c b/src/panfrost/bifrost/bifrost_compile.c index 60188bfb30f..39f9b658040 100644 --- a/src/panfrost/bifrost/bifrost_compile.c +++ b/src/panfrost/bifrost/bifrost_compile.c @@ -525,7 +525,7 @@ bi_class_for_nir_alu(nir_op op) BI_CASE_CMP(nir_op_flt) BI_CASE_CMP(nir_op_fge) BI_CASE_CMP(nir_op_feq) - BI_CASE_CMP(nir_op_fne) + BI_CASE_CMP(nir_op_fneu) BI_CASE_CMP(nir_op_ilt) BI_CASE_CMP(nir_op_ige) BI_CASE_CMP(nir_op_ieq) @@ -630,7 +630,7 @@ bi_cond_for_nir(nir_op op, bool soft) BI_CASE_CMP(nir_op_ieq) return BI_COND_EQ; - BI_CASE_CMP(nir_op_fne) + BI_CASE_CMP(nir_op_fneu) BI_CASE_CMP(nir_op_ine) return BI_COND_NE; default: @@ -848,7 +848,7 @@ emit_alu(bi_context *ctx, nir_alu_instr *instr) BI_CASE_CMP(nir_op_ige) BI_CASE_CMP(nir_op_feq) BI_CASE_CMP(nir_op_ieq) - BI_CASE_CMP(nir_op_fne) + BI_CASE_CMP(nir_op_fneu) BI_CASE_CMP(nir_op_ine) BI_CASE_CMP(nir_op_uge) alu.cond = bi_cond_for_nir(instr->op, false); diff --git a/src/panfrost/bifrost/bifrost_nir_algebraic.py b/src/panfrost/bifrost/bifrost_nir_algebraic.py index 6f3f6eee1a9..974bdf2c899 100644 --- a/src/panfrost/bifrost/bifrost_nir_algebraic.py +++ b/src/panfrost/bifrost/bifrost_nir_algebraic.py @@ -86,7 +86,7 @@ SPECIAL = ['fexp2', 'flog2', 'fsin', 'fcos'] for op in SPECIAL: converts += [((op + '@16', a), ('f2f16', (op, ('f2f32', a))))] -converts += [(('f2b32', a), ('fne32', a, 0.0)), +converts += [(('f2b32', a), ('fneu32', a, 0.0)), (('i2b32', a), ('ine32', a, 0)), (('b2i32', a), ('iand', 'a@32', 1))] diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 249f28fcea3..2ffdee3f162 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -888,7 +888,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) ALU_CASE(mov, imov); ALU_CASE_CMP(feq32, feq, false); - ALU_CASE_CMP(fne32, fne, false); + ALU_CASE_CMP(fneu32, fne, false); ALU_CASE_CMP(flt32, flt, false); ALU_CASE_CMP(ieq32, ieq, true); ALU_CASE_CMP(ine32, ine, true);