X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fcompiler%2Faco_lower_to_hw_instr.cpp;h=9af52aad9050b018b3bd246f1a34c4390913d3cb;hb=626081fe4bf653a18cff9e25e6da2636bc58774a;hp=3065f2835498a670882676d4d0df858768ec4541;hpb=7ae227effd245502b0d9433cf7b637aaf3b694ab;p=mesa.git diff --git a/src/amd/compiler/aco_lower_to_hw_instr.cpp b/src/amd/compiler/aco_lower_to_hw_instr.cpp index 3065f283549..9af52aad905 100644 --- a/src/amd/compiler/aco_lower_to_hw_instr.cpp +++ b/src/amd/compiler/aco_lower_to_hw_instr.cpp @@ -41,8 +41,107 @@ struct lower_context { std::vector> instructions; }; +/* used by handle_operands() indirectly through Builder::copy */ +uint8_t int8_mul_table[512] = { + 0, 20, 1, 1, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1, 10, 1, 11, + 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1, 18, 1, 19, 1, 20, 1, 21, + 1, 22, 1, 23, 1, 24, 1, 25, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 31, + 1, 32, 1, 33, 1, 34, 1, 35, 1, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 41, + 1, 42, 1, 43, 1, 44, 1, 45, 1, 46, 1, 47, 1, 48, 1, 49, 1, 50, 1, 51, + 1, 52, 1, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60, 1, 61, + 1, 62, 1, 63, 1, 64, 5, 13, 2, 33, 17, 19, 2, 34, 3, 23, 2, 35, 11, 53, + 2, 36, 7, 47, 2, 37, 3, 25, 2, 38, 7, 11, 2, 39, 53, 243, 2, 40, 3, 27, + 2, 41, 17, 35, 2, 42, 5, 17, 2, 43, 3, 29, 2, 44, 15, 23, 2, 45, 7, 13, + 2, 46, 3, 31, 2, 47, 5, 19, 2, 48, 19, 59, 2, 49, 3, 33, 2, 50, 7, 51, + 2, 51, 15, 41, 2, 52, 3, 35, 2, 53, 11, 33, 2, 54, 23, 27, 2, 55, 3, 37, + 2, 56, 9, 41, 2, 57, 5, 23, 2, 58, 3, 39, 2, 59, 7, 17, 2, 60, 9, 241, + 2, 61, 3, 41, 2, 62, 5, 25, 2, 63, 35, 245, 2, 64, 3, 43, 5, 26, 9, 43, + 3, 44, 7, 19, 10, 39, 3, 45, 4, 34, 11, 59, 3, 46, 9, 243, 4, 35, 3, 47, + 22, 53, 7, 57, 3, 48, 5, 29, 10, 245, 3, 49, 4, 37, 9, 45, 3, 50, 7, 241, + 4, 38, 3, 51, 7, 22, 5, 31, 3, 52, 7, 59, 7, 242, 3, 53, 4, 40, 7, 23, + 3, 54, 15, 45, 4, 41, 3, 55, 6, 241, 9, 47, 3, 56, 13, 13, 5, 34, 3, 57, + 4, 43, 11, 39, 3, 58, 5, 35, 4, 44, 3, 59, 6, 243, 7, 245, 3, 60, 5, 241, + 7, 26, 3, 61, 4, 46, 5, 37, 3, 62, 11, 17, 4, 47, 3, 63, 5, 38, 5, 243, + 3, 64, 7, 247, 9, 50, 5, 39, 4, 241, 33, 37, 6, 33, 13, 35, 4, 242, 5, 245, + 6, 247, 7, 29, 4, 51, 5, 41, 5, 246, 7, 249, 3, 240, 11, 19, 5, 42, 3, 241, + 4, 245, 25, 29, 3, 242, 5, 43, 4, 246, 3, 243, 17, 58, 17, 43, 3, 244, + 5, 249, 6, 37, 3, 245, 2, 240, 5, 45, 2, 241, 21, 23, 2, 242, 3, 247, + 2, 243, 5, 251, 2, 244, 29, 61, 2, 245, 3, 249, 2, 246, 17, 29, 2, 247, + 9, 55, 1, 240, 1, 241, 1, 242, 1, 243, 1, 244, 1, 245, 1, 246, 1, 247, + 1, 248, 1, 249, 1, 250, 1, 251, 1, 252, 1, 253, 1, 254, 1, 255 +}; + + aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) { + /* Because some 16-bit instructions are already VOP3 on GFX10, we use the + * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use + * DPP with the arithmetic instructions. This requires to sign-extend. + */ switch (op) { + case iadd8: + case iadd16: + if (chip >= GFX10) { + return aco_opcode::v_add_u32; + } else if (chip >= GFX8) { + return aco_opcode::v_add_u16; + } else { + return aco_opcode::v_add_co_u32; + } + break; + case imul8: + case imul16: + if (chip >= GFX10) { + return aco_opcode::v_mul_lo_u16_e64; + } else if (chip >= GFX8) { + return aco_opcode::v_mul_lo_u16; + } else { + return aco_opcode::v_mul_u32_u24; + } + break; + case fadd16: return aco_opcode::v_add_f16; + case fmul16: return aco_opcode::v_mul_f16; + case imax8: + case imax16: + if (chip >= GFX10) { + return aco_opcode::v_max_i32; + } else if (chip >= GFX8) { + return aco_opcode::v_max_i16; + } else { + return aco_opcode::v_max_i32; + } + break; + case imin8: + case imin16: + if (chip >= GFX10) { + return aco_opcode::v_min_i32; + } else if (chip >= GFX8) { + return aco_opcode::v_min_i16; + } else { + return aco_opcode::v_min_i32; + } + break; + case umin8: + case umin16: + if (chip >= GFX10) { + return aco_opcode::v_min_u32; + } else if (chip >= GFX8) { + return aco_opcode::v_min_u16; + } else { + return aco_opcode::v_min_u32; + } + break; + case umax8: + case umax16: + if (chip >= GFX10) { + return aco_opcode::v_max_u32; + } else if (chip >= GFX8) { + return aco_opcode::v_max_u16; + } else { + return aco_opcode::v_max_u32; + } + break; + case fmin16: return aco_opcode::v_min_f16; + case fmax16: return aco_opcode::v_max_f16; case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32; case imul32: return aco_opcode::v_mul_lo_u32; case fadd32: return aco_opcode::v_add_f32; @@ -53,8 +152,14 @@ aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) { case umax32: return aco_opcode::v_max_u32; case fmin32: return aco_opcode::v_min_f32; case fmax32: return aco_opcode::v_max_f32; + case iand8: + case iand16: case iand32: return aco_opcode::v_and_b32; + case ixor8: + case ixor16: case ixor32: return aco_opcode::v_xor_b32; + case ior8: + case ior16: case ior32: return aco_opcode::v_or_b32; case iadd64: return aco_opcode::num_opcodes; case imul64: return aco_opcode::num_opcodes; @@ -73,6 +178,15 @@ aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) { } } +bool is_vop3_reduce_opcode(aco_opcode opcode) +{ + /* 64-bit reductions are VOP3. */ + if (opcode == aco_opcode::num_opcodes) + return true; + + return instr_info.format[(int)opcode] == Format::VOP3; +} + void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1) { Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true); @@ -195,15 +309,15 @@ void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysRe { Builder bld(ctx->program, &ctx->instructions); Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)}; - RegClass src0_rc = src0_reg.reg >= 256 ? v1 : s1; + RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1; Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)}; Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)}; - Operand src0_64 = Operand(src0_reg, src0_reg.reg >= 256 ? v2 : s2); + Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2); Operand src1_64 = Operand(src1_reg, v2); if (src0_rc == s1 && (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) { - assert(vtmp.reg != 0); + assert(vtmp.reg() != 0); bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]); bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); src0_reg = vtmp; @@ -211,7 +325,7 @@ void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysRe src0[1] = Operand(PhysReg{vtmp+1}, v1); src0_64 = Operand(vtmp, v2); } else if (src0_rc == s1 && op == iadd64) { - assert(vtmp.reg != 0); + assert(vtmp.reg() != 0); bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]); src0[1] = Operand(PhysReg{vtmp+1}, v1); } @@ -296,7 +410,7 @@ void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg Operand src1(src1_reg, rc); aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op); - bool vop3 = op == imul32 || size == 2; + bool vop3 = is_vop3_reduce_opcode(opcode); if (!vop3) { if (opcode == aco_opcode::v_add_co_u32) @@ -330,11 +444,11 @@ void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1 Builder bld(ctx->program, &ctx->instructions); RegClass rc = RegClass(RegType::vgpr, size); Definition dst(dst_reg, rc); - Operand src0(src0_reg, RegClass(src0_reg.reg >= 256 ? RegType::vgpr : RegType::sgpr, size)); + Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size)); Operand src1(src1_reg, rc); aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op); - bool vop3 = op == imul32 || size == 2; + bool vop3 = is_vop3_reduce_opcode(opcode); if (opcode == aco_opcode::num_opcodes) { emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op); @@ -363,41 +477,71 @@ void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size, uint32_t get_reduction_identity(ReduceOp op, unsigned idx) { switch (op) { + case iadd8: + case iadd16: case iadd32: case iadd64: + case fadd16: case fadd32: case fadd64: + case ior8: + case ior16: case ior32: case ior64: + case ixor8: + case ixor16: case ixor32: case ixor64: + case umax8: + case umax16: case umax32: case umax64: return 0; + case imul8: + case imul16: case imul32: case imul64: return idx ? 0 : 1; + case fmul16: + return 0x3c00u; /* 1.0 */ case fmul32: return 0x3f800000u; /* 1.0 */ case fmul64: return idx ? 0x3ff00000u : 0u; /* 1.0 */ + case imin8: + return INT8_MAX; + case imin16: + return INT16_MAX; case imin32: return INT32_MAX; case imin64: return idx ? 0x7fffffffu : 0xffffffffu; + case imax8: + return INT8_MIN; + case imax16: + return INT16_MIN; case imax32: return INT32_MIN; case imax64: return idx ? 0x80000000u : 0; + case umin8: + case umin16: + case iand8: + case iand16: + return 0xffffffffu; case umin32: case umin64: case iand32: case iand64: return 0xffffffffu; + case fmin16: + return 0x7c00u; /* infinity */ case fmin32: return 0x7f800000u; /* infinity */ case fmin64: return idx ? 0x7ff00000u : 0u; /* infinity */ + case fmax16: + return 0xfc00u; /* negative infinity */ case fmax32: return 0xff800000u; /* negative infinity */ case fmax64: @@ -454,6 +598,54 @@ void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsig Operand(stmp, bld.lm)); } + if (src.regClass() == v1b) { + if (ctx->program->chip_class >= GFX8) { + aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; + sdwa->operands[0] = Operand(PhysReg{tmp}, v1); + sdwa->definitions[0] = Definition(PhysReg{tmp}, v1); + if (reduce_op == imin8 || reduce_op == imax8) + sdwa->sel[0] = sdwa_sbyte; + else + sdwa->sel[0] = sdwa_ubyte; + sdwa->dst_sel = sdwa_udword; + bld.insert(std::move(sdwa)); + } else { + aco_opcode opcode; + + if (reduce_op == imin8 || reduce_op == imax8) + opcode = aco_opcode::v_bfe_i32; + else + opcode = aco_opcode::v_bfe_u32; + + bld.vop3(opcode, Definition(PhysReg{tmp}, v1), + Operand(PhysReg{tmp}, v1), Operand(0u), Operand(8u)); + } + } else if (src.regClass() == v2b) { + if (ctx->program->chip_class >= GFX10 && + (reduce_op == iadd16 || reduce_op == imax16 || + reduce_op == imin16 || reduce_op == umin16 || reduce_op == umax16)) { + aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; + sdwa->operands[0] = Operand(PhysReg{tmp}, v1); + sdwa->definitions[0] = Definition(PhysReg{tmp}, v1); + if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16) + sdwa->sel[0] = sdwa_sword; + else + sdwa->sel[0] = sdwa_uword; + sdwa->dst_sel = sdwa_udword; + bld.insert(std::move(sdwa)); + } else if (ctx->program->chip_class == GFX6 || ctx->program->chip_class == GFX7) { + aco_opcode opcode; + + if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16) + opcode = aco_opcode::v_bfe_i32; + else + opcode = aco_opcode::v_bfe_u32; + + bld.vop3(opcode, Definition(PhysReg{tmp}, v1), + Operand(PhysReg{tmp}, v1), Operand(0u), Operand(16u)); + } + } + bool reduction_needs_last_op = false; switch (op) { case aco_opcode::p_reduce: @@ -533,7 +725,7 @@ void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsig Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0xffffffffu), Operand(0xffffffffu)).instr; - static_cast(perm)->opsel[0] = true; /* FI (Fetch Inactive) */ + static_cast(perm)->opsel = 1; /* FI (Fetch Inactive) */ } bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(UINT64_MAX)); @@ -545,11 +737,44 @@ void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsig } } std::swap(tmp, vtmp); - } else { + } else if (ctx->program->chip_class >= GFX8) { emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true); + } else { + // TODO: use LDS on CS with a single write and shifted read + /* wavefront shift_right by 1 on SI/CI */ + emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2)); + emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */ + bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10101010u)); + bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1)); + for (unsigned i = 0; i < src.size(); i++) + bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1)); + + bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX)); + emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */ + bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x01000100u)); + bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1)); + for (unsigned i = 0; i < src.size(); i++) + bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1)); + + bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX)); + emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */ + bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(1u), Operand(16u)); + bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(1u), Operand(16u)); + for (unsigned i = 0; i < src.size(); i++) + bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1)); + + bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX)); + for (unsigned i = 0; i < src.size(); i++) { + bld.writelane(Definition(PhysReg{vtmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{vtmp+i}, v1)); + bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u)); + bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1)); + identity[i] = Operand(0u); /* prevent further uses of identity */ + } + std::swap(tmp, vtmp); } + for (unsigned i = 0; i < src.size(); i++) { - if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take case of this overwise */ + if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */ if (ctx->program->chip_class < GFX10) assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i}); bld.writelane(Definition(PhysReg{tmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{tmp+i}, v1)); @@ -611,7 +836,7 @@ void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsig Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0xffffffffu), Operand(0xffffffffu)).instr; - static_cast(perm)->opsel[0] = true; /* FI (Fetch Inactive) */ + static_cast(perm)->opsel = 1; /* FI (Fetch Inactive) */ } emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size()); @@ -660,16 +885,364 @@ void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsig } } +void emit_gfx10_wave64_bpermute(Program *program, aco_ptr &instr, Builder &bld) +{ + /* Emulates proper bpermute on GFX10 in wave64 mode. + * + * This is necessary because on GFX10 the bpermute instruction only works + * on half waves (you can think of it as having a cluster size of 32), so we + * manually swap the data between the two halves using two shared VGPRs. + */ + + assert(program->chip_class >= GFX10); + assert(program->info->wave_size == 64); + + unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256; + Definition dst = instr->definitions[0]; + Definition tmp_exec = instr->definitions[1]; + Definition clobber_scc = instr->definitions[2]; + Operand index_x4 = instr->operands[0]; + Operand input_data = instr->operands[1]; + Operand same_half = instr->operands[2]; + + assert(dst.regClass() == v1); + assert(tmp_exec.regClass() == bld.lm); + assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc); + assert(same_half.regClass() == bld.lm); + assert(index_x4.regClass() == v1); + assert(input_data.regClass().type() == RegType::vgpr); + assert(input_data.bytes() <= 4); + assert(dst.physReg() != index_x4.physReg()); + assert(dst.physReg() != input_data.physReg()); + assert(tmp_exec.physReg() != same_half.physReg()); + + PhysReg shared_vgpr_lo(shared_vgpr_reg_0); + PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1); + + /* Permute the input within the same half-wave */ + bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data); + + /* HI: Copy data from high lanes 32-63 to shared vgpr */ + bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false); + /* Save EXEC */ + bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2)); + /* Set EXEC to enable LO lanes only */ + bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(0u)); + /* LO: Copy data from low lanes 0-31 to shared vgpr */ + bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data); + /* LO: bpermute shared vgpr (high lanes' data) */ + bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4, Operand(shared_vgpr_hi, v1)); + /* Set EXEC to enable HI lanes only */ + bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u)); + /* HI: bpermute shared vgpr (low lanes' data) */ + bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4, Operand(shared_vgpr_lo, v1)); + + /* Only enable lanes which use the other half's data */ + bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc, Operand(tmp_exec.physReg(), s2), same_half); + /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */ + bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false); + /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */ + bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false); + + /* Restore saved EXEC */ + bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2)); + + /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */ + if (input_data.physReg().byte()) { + unsigned right_shift = input_data.physReg().byte() * 8; + bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand(right_shift), Operand(dst.physReg(), v1)); + } +} + +void emit_gfx6_bpermute(Program *program, aco_ptr &instr, Builder &bld) +{ + /* Emulates bpermute using readlane instructions */ + + Operand index = instr->operands[0]; + Operand input = instr->operands[1]; + Definition dst = instr->definitions[0]; + Definition temp_exec = instr->definitions[1]; + Definition clobber_vcc = instr->definitions[2]; + + assert(dst.regClass() == v1); + assert(temp_exec.regClass() == bld.lm); + assert(clobber_vcc.regClass() == bld.lm); + assert(clobber_vcc.physReg() == vcc); + assert(index.regClass() == v1); + assert(index.physReg() != dst.physReg()); + assert(input.regClass().type() == RegType::vgpr); + assert(input.bytes() <= 4); + assert(input.physReg() != dst.physReg()); + + /* Save original EXEC */ + bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2)); + + /* An "unrolled loop" that is executed per each lane. + * This takes only a few instructions per lane, as opposed to a "real" loop + * with branching, where the branch instruction alone would take 16+ cycles. + */ + for (unsigned n = 0; n < program->wave_size; ++n) { + /* Activate the lane which has N for its source index */ + bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand(n), index); + /* Read the data from lane N */ + bld.readlane(Definition(vcc, s1), input, Operand(n)); + /* On the active lane, move the data we read from lane N to the destination VGPR */ + bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1)); + /* Restore original EXEC */ + bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2)); + } +} + struct copy_operation { Operand op; Definition def; - unsigned uses; - unsigned size; + unsigned bytes; + union { + uint8_t uses[8]; + uint64_t is_used = 0; + }; }; +void split_copy(unsigned offset, Definition *def, Operand *op, const copy_operation& src, bool ignore_uses, unsigned max_size) +{ + PhysReg def_reg = src.def.physReg(); + PhysReg op_reg = src.op.physReg(); + def_reg.reg_b += offset; + op_reg.reg_b += offset; + + max_size = MIN2(max_size, src.def.regClass().type() == RegType::vgpr ? 4 : 8); + + /* make sure the size is a power of two and reg % bytes == 0 */ + unsigned bytes = 1; + for (; bytes <= max_size; bytes *= 2) { + unsigned next = bytes * 2u; + bool can_increase = def_reg.reg_b % next == 0 && + offset + next <= src.bytes && next <= max_size; + if (!src.op.isConstant() && can_increase) + can_increase = op_reg.reg_b % next == 0; + for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++) + can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0); + if (!can_increase) + break; + } + + RegClass def_cls = bytes % 4 == 0 ? RegClass(src.def.regClass().type(), bytes / 4u) : + RegClass(src.def.regClass().type(), bytes).as_subdword(); + *def = Definition(src.def.tempId(), def_reg, def_cls); + if (src.op.isConstant()) { + assert(bytes >= 1 && bytes <= 8); + if (bytes == 8) + *op = Operand(src.op.constantValue64() >> (offset * 8u)); + else if (bytes == 4) + *op = Operand(uint32_t(src.op.constantValue64() >> (offset * 8u))); + else if (bytes == 2) + *op = Operand(uint16_t(src.op.constantValue64() >> (offset * 8u))); + else if (bytes == 1) + *op = Operand(uint8_t(src.op.constantValue64() >> (offset * 8u))); + } else { + RegClass op_cls = bytes % 4 == 0 ? RegClass(src.op.regClass().type(), bytes / 4u) : + RegClass(src.op.regClass().type(), bytes).as_subdword(); + *op = Operand(op_reg, op_cls); + op->setTemp(Temp(src.op.tempId(), op_cls)); + } +} + +uint32_t get_intersection_mask(int a_start, int a_size, + int b_start, int b_size) +{ + int intersection_start = MAX2(b_start - a_start, 0); + int intersection_end = MAX2(b_start + b_size - a_start, 0); + if (intersection_start >= a_size || intersection_end == 0) + return 0; + + uint32_t mask = u_bit_consecutive(0, a_size); + return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask; +} + +bool do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool *preserve_scc, PhysReg scratch_sgpr) +{ + bool did_copy = false; + for (unsigned offset = 0; offset < copy.bytes;) { + if (copy.uses[offset]) { + offset++; + continue; + } + + Definition def; + Operand op; + split_copy(offset, &def, &op, copy, false, 8); + + if (def.physReg() == scc) { + bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand(0u)); + *preserve_scc = true; + } else if (def.bytes() == 8 && def.getTemp().type() == RegType::sgpr) { + bld.sop1(aco_opcode::s_mov_b64, def, Operand(op.physReg(), s2)); + } else if (def.regClass().is_subdword() && ctx->program->chip_class < GFX8) { + if (op.physReg().byte()) { + assert(def.physReg().byte() == 0); + bld.vop2(aco_opcode::v_lshrrev_b32, def, Operand(op.physReg().byte() * 8), op); + } else if (def.physReg().byte() == 2) { + assert(op.physReg().byte() == 0); + /* preserve the target's lower half */ + def = Definition(def.physReg().advance(-2), v1); + bld.vop2(aco_opcode::v_and_b32, Definition(op.physReg(), v1), Operand(0xFFFFu), op); + if (def.physReg().reg() != op.physReg().reg()) + bld.vop2(aco_opcode::v_and_b32, def, Operand(0xFFFFu), Operand(def.physReg(), v2b)); + bld.vop2(aco_opcode::v_cvt_pk_u16_u32, def, Operand(def.physReg(), v2b), op); + } else if (def.physReg().byte()) { + unsigned bits = def.physReg().byte() * 8; + assert(op.physReg().byte() == 0); + def = Definition(def.physReg().advance(-def.physReg().byte()), v1); + bld.vop2(aco_opcode::v_and_b32, def, Operand((1 << bits) - 1u), Operand(def.physReg(), op.regClass())); + if (def.physReg().reg() == op.physReg().reg()) { + if (bits < 24) { + bld.vop2(aco_opcode::v_mul_u32_u24, def, Operand((1 << bits) + 1u), op); + } else { + bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1), Operand((1 << bits) + 1u)); + bld.vop3(aco_opcode::v_mul_lo_u32, def, Operand(scratch_sgpr, s1), op); + } + } else { + bld.vop2(aco_opcode::v_lshlrev_b32, Definition(op.physReg(), def.regClass()), Operand(bits), op); + bld.vop2(aco_opcode::v_or_b32, def, Operand(def.physReg(), op.regClass()), op); + bld.vop2(aco_opcode::v_lshrrev_b32, Definition(op.physReg(), def.regClass()), Operand(bits), op); + } + } else { + bld.vop1(aco_opcode::v_mov_b32, def, op); + } + } else { + bld.copy(def, op); + } + + did_copy = true; + offset += def.bytes(); + } + return did_copy; +} + +void do_swap(lower_context *ctx, Builder& bld, const copy_operation& copy, bool preserve_scc, Pseudo_instruction *pi) +{ + unsigned offset = 0; + + if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) && + (copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) { + /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */ + PhysReg op = copy.op.physReg(); + PhysReg def = copy.def.physReg(); + op.reg_b &= ~0x3; + def.reg_b &= ~0x3; + + copy_operation tmp; + tmp.op = Operand(op, v1); + tmp.def = Definition(def, v1); + tmp.bytes = 4; + memset(tmp.uses, 1, 4); + do_swap(ctx, bld, tmp, preserve_scc, pi); + + op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0; + def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0; + tmp.op = Operand(op, v1b); + tmp.def = Definition(def, v1b); + tmp.bytes = 1; + tmp.uses[0] = 1; + do_swap(ctx, bld, tmp, preserve_scc, pi); + + offset = copy.bytes; + } + + for (; offset < copy.bytes;) { + Definition def; + Operand op; + split_copy(offset, &def, &op, copy, true, 8); + + assert(op.regClass() == def.regClass()); + Operand def_as_op = Operand(def.physReg(), def.regClass()); + Definition op_as_def = Definition(op.physReg(), op.regClass()); + if (ctx->program->chip_class >= GFX9 && def.regClass() == v1) { + bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op); + } else if (def.regClass() == v1) { + assert(def.physReg().byte() == 0 && op.physReg().byte() == 0); + bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op); + bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op); + bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op); + } else if (op.physReg() == scc || def.physReg() == scc) { + /* we need to swap scc and another sgpr */ + assert(!preserve_scc); + + PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg(); + + bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1)); + bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u)); + bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1)); + } else if (def.regClass() == s1) { + if (preserve_scc) { + bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op); + bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op); + bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1)); + } else { + bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op); + bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op); + bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op); + } + } else if (def.regClass() == s2) { + if (preserve_scc) + bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1)); + bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op); + bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op); + bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op); + if (preserve_scc) + bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1), Operand(0u)); + } else if (ctx->program->chip_class >= GFX9 && def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) { + aco_ptr vop3p{create_instruction(aco_opcode::v_pk_add_u16, Format::VOP3P, 2, 1)}; + vop3p->operands[0] = Operand(PhysReg{op.physReg().reg()}, v1); + vop3p->operands[1] = Operand(0u); + vop3p->definitions[0] = Definition(PhysReg{op.physReg().reg()}, v1); + vop3p->opsel_lo = 0x1; + vop3p->opsel_hi = 0x2; + bld.insert(std::move(vop3p)); + } else { + assert(def.regClass().is_subdword()); + bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op); + bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op); + bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op); + } + + offset += def.bytes(); + } + + if (ctx->program->chip_class <= GFX7) + return; + + /* fixup in case we swapped bytes we shouldn't have */ + copy_operation tmp_copy = copy; + tmp_copy.op.setFixed(copy.def.physReg()); + tmp_copy.def.setFixed(copy.op.physReg()); + do_copy(ctx, bld, tmp_copy, &preserve_scc, pi->scratch_sgpr); +} + +void do_pack_2x16(lower_context *ctx, Builder& bld, Definition def, Operand lo, Operand hi) +{ + if (ctx->program->chip_class >= GFX9) { + Instruction* instr = bld.vop3(aco_opcode::v_pack_b32_f16, def, lo, hi); + /* opsel: 0 = select low half, 1 = select high half. [0] = src0, [1] = src1 */ + static_cast(instr)->opsel = hi.physReg().byte() | (lo.physReg().byte() >> 1); + } else if (ctx->program->chip_class >= GFX8) { + // TODO: optimize with v_mov_b32 / v_lshlrev_b32 + PhysReg reg = def.physReg(); + bld.copy(Definition(reg, v2b), lo); + reg.reg_b += 2; + bld.copy(Definition(reg, v2b), hi); + } else { + assert(lo.physReg().byte() == 0 && hi.physReg().byte() == 0); + bld.vop2(aco_opcode::v_and_b32, Definition(lo.physReg(), v1), Operand(0xFFFFu), lo); + bld.vop2(aco_opcode::v_and_b32, Definition(hi.physReg(), v1), Operand(0xFFFFu), hi); + bld.vop2(aco_opcode::v_cvt_pk_u16_u32, def, lo, hi); + } +} + void handle_operands(std::map& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi) { Builder bld(ctx->program, &ctx->instructions); + unsigned num_instructions_before = ctx->instructions.size(); aco_ptr mov; std::map::iterator it = copy_map.begin(); std::map::iterator target; @@ -677,10 +1250,6 @@ void handle_operands(std::map& copy_map, lower_context* /* count the number of uses for each dst reg */ while (it != copy_map.end()) { - if (it->second.op.isConstant()) { - ++it; - continue; - } if (it->second.def.physReg() == scc) writes_scc = true; @@ -692,10 +1261,53 @@ void handle_operands(std::map& copy_map, lower_context* it = copy_map.erase(it); continue; } - /* check if the operand reg may be overwritten by another copy operation */ - target = copy_map.find(it->second.op.physReg()); - if (target != copy_map.end()) { - target->second.uses++; + + /* split large copies */ + if (it->second.bytes > 8) { + assert(!it->second.op.isConstant()); + assert(!it->second.def.regClass().is_subdword()); + RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2); + Definition hi_def = Definition(PhysReg{it->first + 2}, rc); + rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2); + Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc); + copy_operation copy = {hi_op, hi_def, it->second.bytes - 8}; + copy_map[hi_def.physReg()] = copy; + assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0); + it->second.op = Operand(it->second.op.physReg(), it->second.op.regClass().type() == RegType::sgpr ? s2 : v2); + it->second.def = Definition(it->second.def.physReg(), it->second.def.regClass().type() == RegType::sgpr ? s2 : v2); + it->second.bytes = 8; + } + + /* try to coalesce copies */ + if (it->second.bytes < 8 && !it->second.op.isConstant() && + it->first.reg_b % util_next_power_of_two(it->second.bytes + 1) == 0 && + it->second.op.physReg().reg_b % util_next_power_of_two(it->second.bytes + 1) == 0) { + // TODO try more relaxed alignment for subdword copies + PhysReg other_def_reg = it->first; + other_def_reg.reg_b += it->second.bytes; + PhysReg other_op_reg = it->second.op.physReg(); + other_op_reg.reg_b += it->second.bytes; + std::map::iterator other = copy_map.find(other_def_reg); + if (other != copy_map.end() && + other->second.op.physReg() == other_op_reg && + it->second.bytes + other->second.bytes <= 8) { + it->second.bytes += other->second.bytes; + it->second.def = Definition(it->first, RegClass::get(it->second.def.regClass().type(), it->second.bytes)); + it->second.op = Operand(it->second.op.physReg(), RegClass::get(it->second.op.regClass().type(), it->second.bytes)); + copy_map.erase(other); + } + } + + /* check if the definition reg is used by another copy operation */ + for (std::pair& copy : copy_map) { + if (copy.second.op.isConstant()) + continue; + for (uint16_t i = 0; i < it->second.bytes; i++) { + /* distance might underflow */ + unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b; + if (distance < copy.second.bytes) + it->second.uses[i] += 1; + } } ++it; @@ -703,68 +1315,212 @@ void handle_operands(std::map& copy_map, lower_context* /* first, handle paths in the location transfer graph */ bool preserve_scc = pi->tmp_in_scc && !writes_scc; + bool skip_partial_copies = true; it = copy_map.begin(); - while (it != copy_map.end()) { - - /* the target reg is not used as operand for any other copy */ - if (it->second.uses == 0) { - - /* try to coalesce 32-bit sgpr copies to 64-bit copies */ - if (it->second.def.getTemp().type() == RegType::sgpr && it->second.size == 1 && - !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) { + while (true) { + if (copy_map.empty()) { + ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before; + return; + } + if (it == copy_map.end()) { + if (!skip_partial_copies) + break; + skip_partial_copies = false; + it = copy_map.begin(); + } - PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1}; - PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1}; - std::map::iterator other = copy_map.find(other_def_reg); + /* check if we can pack one register at once */ + if (it->first.byte() == 0 && it->second.bytes == 2) { + PhysReg reg_hi = it->first.advance(2); + std::map::iterator other = copy_map.find(reg_hi); + if (other != copy_map.end() && other->second.bytes == 2) { + /* check if the target register is otherwise unused */ + // TODO: also do this for self-intersecting registers + bool unused_lo = !it->second.is_used; + bool unused_hi = !other->second.is_used; + if (unused_lo && unused_hi) { + Operand lo = it->second.op; + Operand hi = other->second.op; + do_pack_2x16(ctx, bld, Definition(it->first, v1), lo, hi); + copy_map.erase(it); + copy_map.erase(other); + + for (std::pair& other : copy_map) { + for (uint16_t i = 0; i < other.second.bytes; i++) { + /* distance might underflow */ + unsigned distance_lo = other.first.reg_b + i - lo.physReg().reg_b; + unsigned distance_hi = other.first.reg_b + i - hi.physReg().reg_b; + if (distance_lo < 2 || distance_hi < 2) + other.second.uses[i] -= 1; + } + } + it = copy_map.begin(); + continue; + } + } + } - if (other != copy_map.end() && !other->second.uses && other->second.size == 1 && - other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) { - std::map::iterator to_erase = it->first % 2 ? it : other; - it = it->first % 2 ? other : it; - copy_map.erase(to_erase); - it->second.size = 2; + /* on GFX6/7, we need some small workarounds as there is no + * SDWA instruction to do partial register writes */ + if (ctx->program->chip_class < GFX8 && it->second.bytes < 4) { + if (it->first.byte() == 0 && it->second.op.physReg().byte() == 0 && + !it->second.is_used && pi->opcode == aco_opcode::p_split_vector) { + /* Other operations might overwrite the high bits, so change all users + * of the high bits to the new target where they are still available. + * This mechanism depends on also emitting dead definitions. */ + PhysReg reg_hi = it->second.op.physReg().advance(it->second.bytes); + while (reg_hi != PhysReg(it->second.op.physReg().reg() + 1)) { + std::map::iterator other = copy_map.begin(); + for (other = copy_map.begin(); other != copy_map.end(); other++) { + /* on GFX6/7, if the high bits are used as operand, they cannot be a target */ + if (other->second.op.physReg() == reg_hi) { + other->second.op.setFixed(it->first.advance(reg_hi.byte())); + break; /* break because an operand can only be used once */ + } + } + reg_hi = reg_hi.advance(it->second.bytes); + } + } else if (it->first.byte()) { + assert(pi->opcode == aco_opcode::p_create_vector); + /* on GFX6/7, if we target an upper half where the lower half hasn't yet been handled, + * move to the target operand's high bits. This is save to do as it cannot be an operand */ + PhysReg lo = PhysReg(it->first.reg()); + std::map::iterator other = copy_map.find(lo); + if (other != copy_map.end()) { + assert(other->second.bytes == it->first.byte()); + PhysReg new_reg_hi = other->second.op.physReg().advance(it->first.byte()); + it->second.def = Definition(new_reg_hi, it->second.def.regClass()); + it->second.is_used = 0; + other->second.bytes += it->second.bytes; + other->second.def.setTemp(Temp(other->second.def.tempId(), RegClass::get(RegType::vgpr, other->second.bytes))); + other->second.op.setTemp(Temp(other->second.op.tempId(), RegClass::get(RegType::vgpr, other->second.bytes))); + /* if the new target's high bits are also a target, change uses */ + std::map::iterator target = copy_map.find(new_reg_hi); + if (target != copy_map.end()) { + for (unsigned i = 0; i < it->second.bytes; i++) + target->second.uses[i]++; + } } } + } - if (it->second.def.physReg() == scc) { - bld.sopc(aco_opcode::s_cmp_lg_i32, it->second.def, it->second.op, Operand(0u)); - preserve_scc = true; - } else if (it->second.size == 2 && it->second.def.getTemp().type() == RegType::sgpr) { - bld.sop1(aco_opcode::s_mov_b64, it->second.def, Operand(it->second.op.physReg(), s2)); - } else { - bld.copy(it->second.def, it->second.op); + /* find portions where the target reg is not used as operand for any other copy */ + if (it->second.is_used) { + if (it->second.op.isConstant() || skip_partial_copies) { + /* we have to skip constants until is_used=0. + * we also skip partial copies at the beginning to help coalescing */ + ++it; + continue; } - /* reduce the number of uses of the operand reg by one */ - if (!it->second.op.isConstant()) { - for (unsigned i = 0; i < it->second.size; i++) { - target = copy_map.find(PhysReg{it->second.op.physReg() + i}); - if (target != copy_map.end()) - target->second.uses--; + unsigned has_zero_use_bytes = 0; + for (unsigned i = 0; i < it->second.bytes; i++) + has_zero_use_bytes |= (it->second.uses[i] == 0) << i; + + if (has_zero_use_bytes) { + /* Skipping partial copying and doing a v_swap_b32 and then fixup + * copies is usually beneficial for sub-dword copies, but if doing + * a partial copy allows further copies, it should be done instead. */ + bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0); + for (std::pair& copy : copy_map) { + /* on GFX6/7, we can only do copies with full registers */ + if (partial_copy || ctx->program->chip_class <= GFX7) + break; + for (uint16_t i = 0; i < copy.second.bytes; i++) { + /* distance might underflow */ + unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b; + if (distance < it->second.bytes && copy.second.uses[i] == 1 && + !it->second.uses[distance]) + partial_copy = true; + } + } + + if (!partial_copy) { + ++it; + continue; } + } else { + /* full target reg is used: register swapping needed */ + ++it; + continue; } + } + + bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc, pi->scratch_sgpr); + skip_partial_copies = did_copy; + std::pair copy = *it; + if (it->second.is_used == 0) { + /* the target reg is not used as operand for any other copy, so we + * copied to all of it */ copy_map.erase(it); it = copy_map.begin(); - continue; } else { - /* the target reg is used as operand, check the next entry */ - ++it; + /* we only performed some portions of this copy, so split it to only + * leave the portions that still need to be done */ + copy_operation original = it->second; /* the map insertion below can overwrite this */ + copy_map.erase(it); + for (unsigned offset = 0; offset < original.bytes;) { + if (original.uses[offset] == 0) { + offset++; + continue; + } + Definition def; + Operand op; + split_copy(offset, &def, &op, original, false, 8); + + copy_operation copy = {op, def, def.bytes()}; + for (unsigned i = 0; i < copy.bytes; i++) + copy.uses[i] = original.uses[i + offset]; + copy_map[def.physReg()] = copy; + + offset += def.bytes(); + } + + it = copy_map.begin(); } - } - if (copy_map.empty()) - return; + /* Reduce the number of uses of the operand reg by one. Do this after + * splitting the copy or removing it in case the copy writes to it's own + * operand (for example, v[7:8] = v[8:9]) */ + if (did_copy && !copy.second.op.isConstant()) { + for (std::pair& other : copy_map) { + for (uint16_t i = 0; i < other.second.bytes; i++) { + /* distance might underflow */ + unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b; + if (distance < copy.second.bytes && !copy.second.uses[distance]) + other.second.uses[i] -= 1; + } + } + } + } /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */ - bool constants = false; - for (it = copy_map.begin(); it != copy_map.end(); ++it) { + unsigned largest = 0; + for (const std::pair& op : copy_map) + largest = MAX2(largest, op.second.bytes); + + while (!copy_map.empty()) { + + /* Perform larger swaps first, because larger swaps swaps can make other + * swaps unnecessary. */ + auto it = copy_map.begin(); + for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) { + if (it2->second.bytes > it->second.bytes) { + it = it2; + if (it->second.bytes == largest) + break; + } + } + + /* should already be done */ + assert(!it->second.op.isConstant()); + assert(it->second.op.isFixed()); - if (it->first == it->second.op.physReg()) - continue; - /* do constants later */ - if (it->second.op.isConstant()) { - constants = true; + assert(it->second.def.regClass() == it->second.op.regClass()); + + if (it->first == it->second.op.physReg()) { + copy_map.erase(it); continue; } @@ -773,59 +1529,127 @@ void handle_operands(std::map& copy_map, lower_context* /* to resolve the cycle, we have to swap the src reg with the dst reg */ copy_operation swap = it->second; - assert(swap.op.regClass() == swap.def.regClass()); - Operand def_as_op = Operand(swap.def.physReg(), swap.def.regClass()); - Definition op_as_def = Definition(swap.op.physReg(), swap.op.regClass()); - if (chip_class >= GFX9 && swap.def.getTemp().type() == RegType::vgpr) { - bld.vop1(aco_opcode::v_swap_b32, swap.def, op_as_def, swap.op, def_as_op); - } else if (swap.op.physReg() == scc || swap.def.physReg() == scc) { - /* we need to swap scc and another sgpr */ - assert(!preserve_scc); - - PhysReg other = swap.op.physReg() == scc ? swap.def.physReg() : swap.op.physReg(); - bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1)); - bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u)); - bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1)); - } else if (swap.def.getTemp().type() == RegType::sgpr) { - if (preserve_scc) { - bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), swap.op); - bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op); - bld.sop1(aco_opcode::s_mov_b32, swap.def, Operand(pi->scratch_sgpr, s1)); - } else { - bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op); - bld.sop2(aco_opcode::s_xor_b32, swap.def, Definition(scc, s1), swap.op, def_as_op); - bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op); - } - } else { - bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op); - bld.vop2(aco_opcode::v_xor_b32, swap.def, swap.op, def_as_op); - bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op); + /* if this is self-intersecting, we have to split it because + * self-intersecting swaps don't make sense */ + PhysReg lower = swap.def.physReg(); + PhysReg higher = swap.op.physReg(); + if (lower.reg_b > higher.reg_b) + std::swap(lower, higher); + if (higher.reg_b - lower.reg_b < (int)swap.bytes) { + unsigned offset = higher.reg_b - lower.reg_b; + RegType type = swap.def.regClass().type(); + + copy_operation middle; + lower.reg_b += offset; + higher.reg_b += offset; + middle.bytes = swap.bytes - offset * 2; + memcpy(middle.uses, swap.uses + offset, middle.bytes); + middle.op = Operand(lower, RegClass::get(type, middle.bytes)); + middle.def = Definition(higher, RegClass::get(type, middle.bytes)); + copy_map[higher] = middle; + + copy_operation end; + lower.reg_b += middle.bytes; + higher.reg_b += middle.bytes; + end.bytes = swap.bytes - (offset + middle.bytes); + memcpy(end.uses, swap.uses + offset + middle.bytes, end.bytes); + end.op = Operand(lower, RegClass::get(type, end.bytes)); + end.def = Definition(higher, RegClass::get(type, end.bytes)); + copy_map[higher] = end; + + memset(swap.uses + offset, 0, swap.bytes - offset); + swap.bytes = offset; } - /* change the operand reg of the target's use */ - assert(swap.uses == 1); - target = it; - for (++target; target != copy_map.end(); ++target) { - if (target->second.op.physReg() == it->first) { + /* GFX6-7 can only swap full registers */ + if (ctx->program->chip_class <= GFX7) + swap.bytes = align(swap.bytes, 4); + + do_swap(ctx, bld, swap, preserve_scc, pi); + + /* remove from map */ + copy_map.erase(it); + + /* change the operand reg of the target's uses and split uses if needed */ + target = copy_map.begin(); + uint32_t bytes_left = u_bit_consecutive(0, swap.bytes); + for (; target != copy_map.end(); ++target) { + if (target->second.op.physReg() == swap.def.physReg() && swap.bytes == target->second.bytes) { target->second.op.setFixed(swap.op.physReg()); break; } - } - } - /* copy constants into a registers which were operands */ - if (constants) { - for (it = copy_map.begin(); it != copy_map.end(); ++it) { - if (!it->second.op.isConstant()) + uint32_t imask = get_intersection_mask(swap.def.physReg().reg_b, swap.bytes, + target->second.op.physReg().reg_b, target->second.bytes); + + if (!imask) continue; - if (it->second.def.physReg() == scc) { - bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(0u), Operand(it->second.op.constantValue() ? 1u : 0u)); - } else { - bld.copy(it->second.def, it->second.op); + + int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b; + + /* split and update the middle (the portion that reads the swap's + * definition) to read the swap's operand instead */ + int target_op_end = target->second.op.physReg().reg_b + target->second.bytes; + int swap_def_end = swap.def.physReg().reg_b + swap.bytes; + int before_bytes = MAX2(-offset, 0); + int after_bytes = MAX2(target_op_end - swap_def_end, 0); + int middle_bytes = target->second.bytes - before_bytes - after_bytes; + + if (after_bytes) { + unsigned after_offset = before_bytes + middle_bytes; + assert(after_offset > 0); + copy_operation copy; + copy.bytes = after_bytes; + memcpy(copy.uses, target->second.uses + after_offset, copy.bytes); + RegClass rc = RegClass::get(target->second.op.regClass().type(), after_bytes); + copy.op = Operand(target->second.op.physReg().advance(after_offset), rc); + copy.def = Definition(target->second.def.physReg().advance(after_offset), rc); + copy_map[copy.def.physReg()] = copy; } + + if (middle_bytes) { + copy_operation copy; + copy.bytes = middle_bytes; + memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes); + RegClass rc = RegClass::get(target->second.op.regClass().type(), middle_bytes); + copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc); + copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc); + copy_map[copy.def.physReg()] = copy; + } + + if (before_bytes) { + copy_operation copy; + target->second.bytes = before_bytes; + RegClass rc = RegClass::get(target->second.op.regClass().type(), before_bytes); + target->second.op = Operand(target->second.op.physReg(), rc); + target->second.def = Definition(target->second.def.physReg(), rc); + memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes); + } + + /* break early since we know each byte of the swap's definition is used + * at most once */ + bytes_left &= ~imask; + if (!bytes_left) + break; } } + ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before; +} + +void emit_set_mode(Builder& bld, float_mode new_mode, bool set_round, bool set_denorm) +{ + if (bld.program->chip_class >= GFX10) { + if (set_round) + bld.sopp(aco_opcode::s_round_mode, -1, new_mode.round); + if (set_denorm) + bld.sopp(aco_opcode::s_denorm_mode, -1, new_mode.denorm); + } else if (set_round || set_denorm) { + /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */ + Instruction *instr = bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(new_mode.val), (7 << 11) | 1).instr; + /* has to be a literal */ + instr->operands[0].setFixed(PhysReg{255}); + } } void lower_to_hw_instr(Program* program) @@ -839,21 +1663,23 @@ void lower_to_hw_instr(Program* program) ctx.program = program; Builder bld(program, &ctx.instructions); - bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode; - for (unsigned pred : block->linear_preds) { - if (program->blocks[pred].fp_mode.val != block->fp_mode.val) { - set_mode = true; - break; + float_mode config_mode; + config_mode.val = program->config->float_mode; + + bool set_round = i == 0 && block->fp_mode.round != config_mode.round; + bool set_denorm = i == 0 && block->fp_mode.denorm != config_mode.denorm; + if (block->kind & block_kind_top_level) { + for (unsigned pred : block->linear_preds) { + if (program->blocks[pred].fp_mode.round != block->fp_mode.round) + set_round = true; + if (program->blocks[pred].fp_mode.denorm != block->fp_mode.denorm) + set_denorm = true; } } - if (set_mode) { - /* only allow changing modes at top-level blocks so this doesn't break - * the "jump over empty blocks" optimization */ - assert(block->kind & block_kind_top_level); - uint32_t mode = block->fp_mode.val; - /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */ - bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1); - } + /* only allow changing modes at top-level blocks so this doesn't break + * the "jump over empty blocks" optimization */ + assert((!set_round && !set_denorm) || (block->kind & block_kind_top_level)); + emit_set_mode(bld, block->fp_mode, set_round, set_denorm); for (size_t j = 0; j < block->instructions.size(); j++) { aco_ptr& instr = block->instructions[j]; @@ -865,42 +1691,43 @@ void lower_to_hw_instr(Program* program) { case aco_opcode::p_extract_vector: { - unsigned reg = instr->operands[0].physReg() + instr->operands[1].constantValue() * instr->definitions[0].size(); - RegClass rc = RegClass(instr->operands[0].getTemp().type(), 1); - RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1); - if (reg == instr->definitions[0].physReg()) + PhysReg reg = instr->operands[0].physReg(); + Definition& def = instr->definitions[0]; + reg.reg_b += instr->operands[1].constantValue() * def.bytes(); + + if (reg == def.physReg()) break; + RegClass op_rc = def.regClass().is_subdword() ? def.regClass() : + RegClass(instr->operands[0].getTemp().type(), def.size()); std::map copy_operations; - for (unsigned i = 0; i < instr->definitions[0].size(); i++) { - Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, rc_def); - copy_operations[def.physReg()] = {Operand(PhysReg{reg + i}, rc), def, 0, 1}; - } + copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()}; handle_operands(copy_operations, &ctx, program->chip_class, pi); break; } case aco_opcode::p_create_vector: { std::map copy_operations; - RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1); - unsigned reg_idx = 0; + PhysReg reg = instr->definitions[0].physReg(); + for (const Operand& op : instr->operands) { if (op.isConstant()) { - const PhysReg reg = PhysReg{instr->definitions[0].physReg() + reg_idx}; - const Definition def = Definition(reg, rc_def); - copy_operations[reg] = {op, def, 0, 1}; - reg_idx++; + const Definition def = Definition(reg, RegClass(instr->definitions[0].getTemp().type(), op.size())); + copy_operations[reg] = {op, def, op.bytes()}; + reg.reg_b += op.bytes(); continue; } - - RegClass rc_op = RegClass(op.getTemp().type(), 1); - for (unsigned j = 0; j < op.size(); j++) - { - const Operand copy_op = Operand(PhysReg{op.physReg() + j}, rc_op); - const Definition def = Definition(PhysReg{instr->definitions[0].physReg() + reg_idx}, rc_def); - copy_operations[def.physReg()] = {copy_op, def, 0, 1}; - reg_idx++; + if (op.isUndefined()) { + // TODO: coalesce subdword copies if dst byte is 0 + reg.reg_b += op.bytes(); + continue; } + + RegClass rc_def = op.regClass().is_subdword() ? op.regClass() : + RegClass(instr->definitions[0].getTemp().type(), op.size()); + const Definition def = Definition(reg, rc_def); + copy_operations[def.physReg()] = {op, def, op.bytes()}; + reg.reg_b += op.bytes(); } handle_operands(copy_operations, &ctx, program->chip_class, pi); break; @@ -908,15 +1735,14 @@ void lower_to_hw_instr(Program* program) case aco_opcode::p_split_vector: { std::map copy_operations; - RegClass rc_op = instr->operands[0].isConstant() ? s1 : RegClass(instr->operands[0].regClass().type(), 1); - for (unsigned i = 0; i < instr->definitions.size(); i++) { - unsigned k = instr->definitions[i].size(); - RegClass rc_def = RegClass(instr->definitions[i].getTemp().type(), 1); - for (unsigned j = 0; j < k; j++) { - Operand op = Operand(PhysReg{instr->operands[0].physReg() + (i*k+j)}, rc_op); - Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, rc_def); - copy_operations[def.physReg()] = {op, def, 0, 1}; - } + PhysReg reg = instr->operands[0].physReg(); + + for (const Definition& def : instr->definitions) { + RegClass rc_op = def.regClass().is_subdword() ? def.regClass() : + RegClass(instr->operands[0].getTemp().type(), def.size()); + const Operand op = Operand(reg, rc_op); + copy_operations[def.physReg()] = {op, def, def.bytes()}; + reg.reg_b += def.bytes(); } handle_operands(copy_operations, &ctx, program->chip_class, pi); break; @@ -925,32 +1751,39 @@ void lower_to_hw_instr(Program* program) case aco_opcode::p_wqm: { std::map copy_operations; - for (unsigned i = 0; i < instr->operands.size(); i++) - { - Operand operand = instr->operands[i]; - if (operand.isConstant() || operand.size() == 1) { - assert(instr->definitions[i].size() == 1); - copy_operations[instr->definitions[i].physReg()] = {operand, instr->definitions[i], 0, 1}; - } else { - RegClass def_rc = RegClass(instr->definitions[i].regClass().type(), 1); - RegClass op_rc = RegClass(operand.getTemp().type(), 1); - for (unsigned j = 0; j < operand.size(); j++) - { - Operand op = Operand(PhysReg{instr->operands[i].physReg() + j}, op_rc); - Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, def_rc); - copy_operations[def.physReg()] = {op, def, 0, 1}; - } - } + for (unsigned i = 0; i < instr->operands.size(); i++) { + assert(instr->definitions[i].bytes() == instr->operands[i].bytes()); + copy_operations[instr->definitions[i].physReg()] = {instr->operands[i], instr->definitions[i], instr->operands[i].bytes()}; } handle_operands(copy_operations, &ctx, program->chip_class, pi); break; } case aco_opcode::p_exit_early_if: { - /* don't bother with an early exit at the end of the program */ - if (block->instructions[j + 1]->opcode == aco_opcode::p_logical_end && - block->instructions[j + 2]->opcode == aco_opcode::s_endpgm) { - break; + /* don't bother with an early exit near the end of the program */ + if ((block->instructions.size() - 1 - j) <= 4 && + block->instructions.back()->opcode == aco_opcode::s_endpgm) { + unsigned null_exp_dest = (ctx.program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS; + bool ignore_early_exit = true; + + for (unsigned k = j + 1; k < block->instructions.size(); ++k) { + const aco_ptr &instr = block->instructions[k]; + if (instr->opcode == aco_opcode::s_endpgm || + instr->opcode == aco_opcode::p_logical_end) + continue; + else if (instr->opcode == aco_opcode::exp && + static_cast(instr.get())->dest == null_exp_dest) + continue; + else if (instr->opcode == aco_opcode::p_parallelcopy && + instr->definitions[0].isFixed() && + instr->definitions[0].physReg() == exec) + continue; + + ignore_early_exit = false; + } + + if (ignore_early_exit) + break; } if (!discard_block) { @@ -999,19 +1832,7 @@ void lower_to_hw_instr(Program* program) { if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) { std::map copy_operations; - Operand operand = instr->operands[0]; - if (operand.isConstant() || operand.size() == 1) { - assert(instr->definitions[0].size() == 1); - copy_operations[instr->definitions[0].physReg()] = {operand, instr->definitions[0], 0, 1}; - } else { - for (unsigned i = 0; i < operand.size(); i++) - { - Operand op = Operand(PhysReg{operand.physReg() + i}, s1); - Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, s1); - copy_operations[def.physReg()] = {op, def, 0, 1}; - } - } - + copy_operations[instr->definitions[0].physReg()] = {instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()}; handle_operands(copy_operations, &ctx, program->chip_class, pi); } else { assert(instr->operands[0].regClass().type() == RegType::vgpr); @@ -1025,6 +1846,15 @@ void lower_to_hw_instr(Program* program) } break; } + case aco_opcode::p_bpermute: + { + if (ctx.program->chip_class <= GFX7) + emit_gfx6_bpermute(program, instr, bld); + else if (ctx.program->chip_class >= GFX10 && ctx.program->wave_size == 64) + emit_gfx10_wave64_bpermute(program, instr, bld); + else + unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute."); + } default: break; } @@ -1072,63 +1902,23 @@ void lower_to_hw_instr(Program* program) } else if (instr->format == Format::PSEUDO_REDUCTION) { Pseudo_reduction_instruction* reduce = static_cast(instr.get()); - if (reduce->reduce_op == gfx10_wave64_bpermute) { - /* Only makes sense on GFX10 wave64 */ - assert(program->chip_class >= GFX10); - assert(program->info->wave_size == 64); - assert(instr->definitions[0].regClass() == v1); /* Destination */ - assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */ - assert(instr->definitions[1].physReg() != vcc); - assert(instr->definitions[2].physReg() == scc); /* SCC clobber */ - assert(instr->operands[0].physReg() == vcc); /* Compare */ - assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */ - assert(instr->operands[2].regClass() == v1); /* Indices x4 */ - assert(instr->operands[3].regClass() == v1); /* Input data */ - - PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256); - PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1); - Operand compare = instr->operands[0]; - Operand tmp1(instr->operands[1].physReg(), v1); - Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1); - Operand index_x4 = instr->operands[2]; - Operand input_data = instr->operands[3]; - Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1); - Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1); - Definition def_temp1(tmp1.physReg(), v1); - Definition def_temp2(tmp2.physReg(), v1); - - /* Save EXEC and set it for all lanes */ - bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2], - Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2)); - - /* HI: Copy data from high lanes 32-63 to shared vgpr */ - bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false); - - /* LO: Copy data from low lanes 0-31 to shared vgpr */ - bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false); - /* LO: Copy shared vgpr (high lanes' data) to output vgpr */ - bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false); - - /* HI: Copy shared vgpr (low lanes' data) to output vgpr */ - bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false); - - /* Permute the original input */ - bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data); - /* Permute the swapped input */ - bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1); - - /* Restore saved EXEC */ - bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2)); - /* Choose whether to use the original or swapped */ - bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare); - } else { - emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size, - reduce->operands[1].physReg(), // tmp - reduce->definitions[1].physReg(), // stmp - reduce->operands[2].physReg(), // vtmp - reduce->definitions[2].physReg(), // sitmp - reduce->operands[0], reduce->definitions[0]); - } + emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size, + reduce->operands[1].physReg(), // tmp + reduce->definitions[1].physReg(), // stmp + reduce->operands[2].physReg(), // vtmp + reduce->definitions[2].physReg(), // sitmp + reduce->operands[0], reduce->definitions[0]); + } else if (instr->opcode == aco_opcode::p_cvt_f16_f32_rtne) { + float_mode new_mode = block->fp_mode; + new_mode.round16_64 = fp_round_ne; + bool set_round = new_mode.round != block->fp_mode.round; + + emit_set_mode(bld, new_mode, set_round, false); + + instr->opcode = aco_opcode::v_cvt_f16_f32; + ctx.instructions.emplace_back(std::move(instr)); + + emit_set_mode(bld, block->fp_mode, set_round, false); } else { ctx.instructions.emplace_back(std::move(instr)); }