From 4d9260171530f7fe9cc7a7ceffa5286355e546fc Mon Sep 17 00:00:00 2001 From: Samuel Pitoiset Date: Mon, 20 Jan 2020 09:00:07 +0100 Subject: [PATCH] aco: implement 64-bit nir_op_ffloor on GFX6 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit GFX6 doesn't have V_FLOOR_F64, it needs to be lowered. Loosely based on the AMDGPU LLVM backend. Introduce a new function because it will be useful for some other 64-bit operations. Signed-off-by: Samuel Pitoiset Reviewed-by: Daniel Schürmann Part-of: --- .../compiler/aco_instruction_selection.cpp | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp index 5ccf4fd48a4..be65900aee2 100644 --- a/src/amd/compiler/aco_instruction_selection.cpp +++ b/src/amd/compiler/aco_instruction_selection.cpp @@ -780,6 +780,37 @@ Temp emit_trunc_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val) return bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst_lo, dst_hi); } +Temp emit_floor_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val) +{ + if (ctx->options->chip_class >= GFX7) + return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val); + + /* GFX6 doesn't support V_FLOOR_F64, lower it. */ + Temp src0 = as_vgpr(ctx, val); + + Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */ + Temp min_val = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(-1u), Operand(0x3fefffffu)); + + Temp isnan = bld.vopc_e64(aco_opcode::v_cmp_class_f64, bld.hint_vcc(bld.def(bld.lm)), src0, mask); + Temp fract = bld.vop1(aco_opcode::v_fract_f64, bld.def(v2), src0); + Temp min = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), fract, min_val); + + Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1); + bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), src0); + Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1); + bld.pseudo(aco_opcode::p_split_vector, Definition(else_lo), Definition(else_hi), min); + + Temp dst0 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_lo, then_lo, isnan); + Temp dst1 = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), else_hi, then_hi, isnan); + + Temp v = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), dst0, dst1); + + Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, v); + static_cast(add)->neg[1] = true; + + return add->definitions[0].getTemp(); +} + void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) { if (!instr->dest.dest.is_ssa) { @@ -1689,7 +1720,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) if (dst.size() == 1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst); } else if (dst.size() == 2) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f64, dst); + emit_floor_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); -- 2.30.2