+bool is_vop3_reduce_opcode(aco_opcode opcode)
+{
+ /* 64-bit reductions are VOP3. */
+ if (opcode == aco_opcode::num_opcodes)
+ return true;
+
+ return instr_info.format[(int)opcode] == Format::VOP3;
+}
+
+void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
+{
+ Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
+ if (instr->definitions.size() >= 2) {
+ assert(instr->definitions[1].regClass() == bld.lm);
+ instr->definitions[1].setFixed(vcc);
+ }
+}
+
+void emit_int64_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
+ PhysReg vtmp_reg, ReduceOp op,
+ unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
+ Operand *identity=NULL)
+{
+ Builder bld(ctx->program, &ctx->instructions);
+ Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
+ Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg+1}, v1)};
+ Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg+1}, v1)};
+ Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
+ Operand src1_64 = Operand(src1_reg, v2);
+ Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg+1}, v1)};
+ Operand vtmp_op64 = Operand(vtmp_reg, v2);
+ if (op == iadd64) {
+ if (ctx->program->chip_class >= GFX10) {
+ if (identity)
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
+ } else {
+ bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ }
+ bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm),
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ } else if (op == iand64) {
+ bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ } else if (op == ior64) {
+ bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ } else if (op == ixor64) {
+ bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
+ aco_opcode cmp = aco_opcode::num_opcodes;
+ switch (op) {
+ case umin64:
+ cmp = aco_opcode::v_cmp_gt_u64;
+ break;
+ case umax64:
+ cmp = aco_opcode::v_cmp_lt_u64;
+ break;
+ case imin64:
+ cmp = aco_opcode::v_cmp_gt_i64;
+ break;
+ case imax64:
+ cmp = aco_opcode::v_cmp_lt_i64;
+ break;
+ default:
+ break;
+ }
+
+ if (identity) {
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
+ }
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+
+ bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
+ bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
+ bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
+ } else if (op == imul64) {
+ /* t4 = dpp(x_hi)
+ * t1 = umul_lo(t4, y_lo)
+ * t3 = dpp(x_lo)
+ * t0 = umul_lo(t3, y_hi)
+ * t2 = iadd(t0, t1)
+ * t5 = umul_hi(t3, y_lo)
+ * res_hi = iadd(t2, t5)
+ * res_lo = umul_lo(t3, y_lo)
+ * Requires that res_hi != src0[0] and res_hi != src1[0]
+ * and that vtmp[0] != res_hi.
+ */
+ if (identity)
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
+ if (identity)
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
+ emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
+ if (identity)
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
+ emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
+ if (identity)
+ bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
+ bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
+ dpp_ctrl, row_mask, bank_mask, bound_ctrl);
+ bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
+ }
+}
+
+void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op)
+{
+ Builder bld(ctx->program, &ctx->instructions);
+ Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
+ RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
+ Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)};
+ Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
+ Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
+ Operand src1_64 = Operand(src1_reg, v2);
+
+ if (src0_rc == s1 &&
+ (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
+ assert(vtmp.reg() != 0);
+ bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
+ bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
+ src0_reg = vtmp;
+ src0[0] = Operand(vtmp, v1);
+ src0[1] = Operand(PhysReg{vtmp+1}, v1);
+ src0_64 = Operand(vtmp, v2);
+ } else if (src0_rc == s1 && op == iadd64) {
+ assert(vtmp.reg() != 0);
+ bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
+ src0[1] = Operand(PhysReg{vtmp+1}, v1);
+ }
+
+ if (op == iadd64) {
+ if (ctx->program->chip_class >= GFX10) {
+ bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
+ } else {
+ bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
+ }
+ bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm));
+ } else if (op == iand64) {
+ bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
+ bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
+ } else if (op == ior64) {
+ bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
+ bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
+ } else if (op == ixor64) {
+ bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
+ bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
+ } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
+ aco_opcode cmp = aco_opcode::num_opcodes;
+ switch (op) {
+ case umin64:
+ cmp = aco_opcode::v_cmp_gt_u64;
+ break;
+ case umax64:
+ cmp = aco_opcode::v_cmp_lt_u64;
+ break;
+ case imin64:
+ cmp = aco_opcode::v_cmp_gt_i64;
+ break;
+ case imax64:
+ cmp = aco_opcode::v_cmp_lt_i64;
+ break;
+ default:
+ break;
+ }
+
+ bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
+ bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
+ bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
+ } else if (op == imul64) {
+ if (src1_reg == dst_reg) {
+ /* it's fine if src0==dst but not if src1==dst */
+ std::swap(src0_reg, src1_reg);
+ std::swap(src0[0], src1[0]);
+ std::swap(src0[1], src1[1]);
+ std::swap(src0_64, src1_64);
+ }
+ assert(!(src0_reg == src1_reg));
+ /* t1 = umul_lo(x_hi, y_lo)
+ * t0 = umul_lo(x_lo, y_hi)
+ * t2 = iadd(t0, t1)
+ * t5 = umul_hi(x_lo, y_lo)
+ * res_hi = iadd(t2, t5)
+ * res_lo = umul_lo(x_lo, y_lo)
+ * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
+ */
+ Definition tmp0_def(PhysReg{src0_reg+1}, v1);
+ Definition tmp1_def(PhysReg{src1_reg+1}, v1);
+ Operand tmp0_op = src0[1];
+ Operand tmp1_op = src1[1];
+ bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
+ bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
+ emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
+ bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
+ emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
+ bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
+ }
+}
+