};
aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
+ /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
+ * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
+ * DPP with the arithmetic instructions. This requires to sign-extend.
+ */
switch (op) {
+ case iadd8:
+ case iadd16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_add_u32;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_add_u16;
+ } else {
+ return aco_opcode::v_add_co_u32;
+ }
+ break;
+ case imul8:
+ case imul16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_mul_lo_u16_e64;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_mul_lo_u16;
+ } else {
+ return aco_opcode::v_mul_u32_u24;
+ }
+ break;
+ case fadd16: return aco_opcode::v_add_f16;
+ case fmul16: return aco_opcode::v_mul_f16;
+ case imax8:
+ case imax16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_max_i32;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_max_i16;
+ } else {
+ return aco_opcode::v_max_i32;
+ }
+ break;
+ case imin8:
+ case imin16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_min_i32;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_min_i16;
+ } else {
+ return aco_opcode::v_min_i32;
+ }
+ break;
+ case umin8:
+ case umin16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_min_u32;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_min_u16;
+ } else {
+ return aco_opcode::v_min_u32;
+ }
+ break;
+ case umax8:
+ case umax16:
+ if (chip >= GFX10) {
+ return aco_opcode::v_max_u32;
+ } else if (chip >= GFX8) {
+ return aco_opcode::v_max_u16;
+ } else {
+ return aco_opcode::v_max_u32;
+ }
+ break;
+ case fmin16: return aco_opcode::v_min_f16;
+ case fmax16: return aco_opcode::v_max_f16;
case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
case imul32: return aco_opcode::v_mul_lo_u32;
case fadd32: return aco_opcode::v_add_f32;
case umax32: return aco_opcode::v_max_u32;
case fmin32: return aco_opcode::v_min_f32;
case fmax32: return aco_opcode::v_max_f32;
+ case iand8:
+ case iand16:
case iand32: return aco_opcode::v_and_b32;
+ case ixor8:
+ case ixor16:
case ixor32: return aco_opcode::v_xor_b32;
+ case ior8:
+ case ior16:
case ior32: return aco_opcode::v_or_b32;
case iadd64: return aco_opcode::num_opcodes;
case imul64: return aco_opcode::num_opcodes;
}
}
+bool is_vop3_reduce_opcode(aco_opcode opcode)
+{
+ /* 64-bit reductions are VOP3. */
+ if (opcode == aco_opcode::num_opcodes)
+ return true;
+
+ return instr_info.format[(int)opcode] == Format::VOP3;
+}
+
void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
{
Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
Operand src1(src1_reg, rc);
aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
- bool vop3 = op == imul32 || size == 2;
+ bool vop3 = is_vop3_reduce_opcode(opcode);
if (!vop3) {
if (opcode == aco_opcode::v_add_co_u32)
Operand src1(src1_reg, rc);
aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
- bool vop3 = op == imul32 || size == 2;
+ bool vop3 = is_vop3_reduce_opcode(opcode);
if (opcode == aco_opcode::num_opcodes) {
emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
{
switch (op) {
+ case iadd8:
+ case iadd16:
case iadd32:
case iadd64:
+ case fadd16:
case fadd32:
case fadd64:
+ case ior8:
+ case ior16:
case ior32:
case ior64:
+ case ixor8:
+ case ixor16:
case ixor32:
case ixor64:
+ case umax8:
+ case umax16:
case umax32:
case umax64:
return 0;
+ case imul8:
+ case imul16:
case imul32:
case imul64:
return idx ? 0 : 1;
+ case fmul16:
+ return 0x3c00u; /* 1.0 */
case fmul32:
return 0x3f800000u; /* 1.0 */
case fmul64:
return idx ? 0x3ff00000u : 0u; /* 1.0 */
+ case imin8:
+ return INT8_MAX;
+ case imin16:
+ return INT16_MAX;
case imin32:
return INT32_MAX;
case imin64:
return idx ? 0x7fffffffu : 0xffffffffu;
+ case imax8:
+ return INT8_MIN;
+ case imax16:
+ return INT16_MIN;
case imax32:
return INT32_MIN;
case imax64:
return idx ? 0x80000000u : 0;
+ case umin8:
+ case umin16:
+ case iand8:
+ case iand16:
+ return 0xffffffffu;
case umin32:
case umin64:
case iand32:
case iand64:
return 0xffffffffu;
+ case fmin16:
+ return 0x7c00u; /* infinity */
case fmin32:
return 0x7f800000u; /* infinity */
case fmin64:
return idx ? 0x7ff00000u : 0u; /* infinity */
+ case fmax16:
+ return 0xfc00u; /* negative infinity */
case fmax32:
return 0xff800000u; /* negative infinity */
case fmax64:
Operand(stmp, bld.lm));
}
+ if (src.regClass() == v1b) {
+ aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
+ sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
+ sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
+ if (reduce_op == imin8 || reduce_op == imax8)
+ sdwa->sel[0] = sdwa_sbyte;
+ else
+ sdwa->sel[0] = sdwa_ubyte;
+ sdwa->dst_sel = sdwa_udword;
+ bld.insert(std::move(sdwa));
+ } else if (src.regClass() == v2b) {
+ if (ctx->program->chip_class >= GFX10 &&
+ (reduce_op == iadd16 || reduce_op == imax16 ||
+ reduce_op == imin16 || reduce_op == umin16 || reduce_op == umax16)) {
+ aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
+ sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
+ sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
+ if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
+ sdwa->sel[0] = sdwa_sword;
+ else
+ sdwa->sel[0] = sdwa_uword;
+ sdwa->dst_sel = sdwa_udword;
+ bld.insert(std::move(sdwa));
+ }
+ }
+
bool reduction_needs_last_op = false;
switch (op) {
case aco_opcode::p_reduce:
}
}
+void emit_gfx10_wave64_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
+{
+ /* Emulates proper bpermute on GFX10 in wave64 mode.
+ *
+ * This is necessary because on GFX10 the bpermute instruction only works
+ * on half waves (you can think of it as having a cluster size of 32), so we
+ * manually swap the data between the two halves using two shared VGPRs.
+ */
+
+ assert(program->chip_class >= GFX10);
+ assert(program->info->wave_size == 64);
+
+ unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256;
+ Definition dst = instr->definitions[0];
+ Definition tmp_exec = instr->definitions[1];
+ Definition clobber_scc = instr->definitions[2];
+ Operand index_x4 = instr->operands[0];
+ Operand input_data = instr->operands[1];
+ Operand same_half = instr->operands[2];
+
+ assert(dst.regClass() == v1);
+ assert(tmp_exec.regClass() == bld.lm);
+ assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc);
+ assert(same_half.regClass() == bld.lm);
+ assert(index_x4.regClass() == v1);
+ assert(input_data.regClass().type() == RegType::vgpr);
+ assert(input_data.bytes() <= 4);
+ assert(dst.physReg() != index_x4.physReg());
+ assert(dst.physReg() != input_data.physReg());
+ assert(tmp_exec.physReg() != same_half.physReg());
+
+ PhysReg shared_vgpr_lo(shared_vgpr_reg_0);
+ PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1);
+
+ /* Permute the input within the same half-wave */
+ bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data);
+
+ /* HI: Copy data from high lanes 32-63 to shared vgpr */
+ bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
+ /* Save EXEC */
+ bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2));
+ /* Set EXEC to enable LO lanes only */
+ bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(0u));
+ /* LO: Copy data from low lanes 0-31 to shared vgpr */
+ bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data);
+ /* LO: bpermute shared vgpr (high lanes' data) */
+ bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4, Operand(shared_vgpr_hi, v1));
+ /* Set EXEC to enable HI lanes only */
+ bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
+ /* HI: bpermute shared vgpr (low lanes' data) */
+ bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4, Operand(shared_vgpr_lo, v1));
+
+ /* Only enable lanes which use the other half's data */
+ bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc, Operand(tmp_exec.physReg(), s2), same_half);
+ /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
+ bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
+ /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
+ bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
+
+ /* Restore saved EXEC */
+ bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2));
+
+ /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */
+ if (input_data.physReg().byte()) {
+ unsigned right_shift = input_data.physReg().byte() * 8;
+ bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand(right_shift), Operand(dst.physReg(), v1));
+ }
+}
+
+void emit_gfx6_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
+{
+ /* Emulates bpermute using readlane instructions */
+
+ Operand index = instr->operands[0];
+ Operand input = instr->operands[1];
+ Definition dst = instr->definitions[0];
+ Definition temp_exec = instr->definitions[1];
+ Definition clobber_vcc = instr->definitions[2];
+
+ assert(dst.regClass() == v1);
+ assert(temp_exec.regClass() == bld.lm);
+ assert(clobber_vcc.regClass() == bld.lm);
+ assert(clobber_vcc.physReg() == vcc);
+ assert(index.regClass() == v1);
+ assert(index.physReg() != dst.physReg());
+ assert(input.regClass().type() == RegType::vgpr);
+ assert(input.bytes() <= 4);
+ assert(input.physReg() != dst.physReg());
+
+ /* Save original EXEC */
+ bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2));
+
+ /* An "unrolled loop" that is executed per each lane.
+ * This takes only a few instructions per lane, as opposed to a "real" loop
+ * with branching, where the branch instruction alone would take 16+ cycles.
+ */
+ for (unsigned n = 0; n < program->wave_size; ++n) {
+ /* Activate the lane which has N for its source index */
+ bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand(n), index);
+ /* Read the data from lane N */
+ bld.readlane(Definition(vcc, s1), input, Operand(n));
+ /* On the active lane, move the data we read from lane N to the destination VGPR */
+ bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1));
+ /* Restore original EXEC */
+ bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2));
+ }
+}
+
struct copy_operation {
Operand op;
Definition def;
bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc);
- /* reduce the number of uses of the operand reg by one */
- if (did_copy && !it->second.op.isConstant()) {
- for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
- for (uint16_t i = 0; i < copy.second.bytes; i++) {
- /* distance might underflow */
- unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
- if (distance < it->second.bytes && !it->second.uses[distance])
- copy.second.uses[i] -= 1;
- }
- }
- }
+ std::pair<PhysReg, copy_operation> copy = *it;
if (it->second.is_used == 0) {
/* the target reg is not used as operand for any other copy, so we
it = copy_map.begin();
}
+
+ /* Reduce the number of uses of the operand reg by one. Do this after
+ * splitting the copy or removing it in case the copy writes to it's own
+ * operand (for example, v[7:8] = v[8:9]) */
+ if (did_copy && !copy.second.op.isConstant()) {
+ for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
+ for (uint16_t i = 0; i < other.second.bytes; i++) {
+ /* distance might underflow */
+ unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
+ if (distance < copy.second.bytes && !copy.second.uses[distance])
+ other.second.uses[i] -= 1;
+ }
+ }
+ }
}
if (copy_map.empty())
/* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
unsigned largest = 0;
- for (const std::pair<PhysReg, copy_operation>& op : copy_map)
+ for (const std::pair<const PhysReg, copy_operation>& op : copy_map)
largest = MAX2(largest, op.second.bytes);
while (!copy_map.empty()) {
- /* Perform larger swaps first, so that we don't have to split the uses of
- * registers we swap (we don't have to because of alignment restrictions) and
- * larger swaps swaps can make other swaps unnecessary. */
+ /* Perform larger swaps first, because larger swaps swaps can make other
+ * swaps unnecessary. */
auto it = copy_map.begin();
for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
if (it2->second.bytes > it->second.bytes) {
/* to resolve the cycle, we have to swap the src reg with the dst reg */
copy_operation swap = it->second;
+
+ /* if this is self-intersecting, we have to split it because
+ * self-intersecting swaps don't make sense */
+ PhysReg lower = swap.def.physReg();
+ PhysReg higher = swap.op.physReg();
+ if (lower.reg_b > higher.reg_b)
+ std::swap(lower, higher);
+ if (higher.reg_b - lower.reg_b < (int)swap.bytes) {
+ unsigned offset = higher.reg_b - lower.reg_b;
+ RegType type = swap.def.regClass().type();
+
+ copy_operation middle;
+ lower.reg_b += offset;
+ higher.reg_b += offset;
+ middle.bytes = swap.bytes - offset * 2;
+ memcpy(middle.uses, swap.uses + offset, middle.bytes);
+ middle.op = Operand(lower, RegClass::get(type, middle.bytes));
+ middle.def = Definition(higher, RegClass::get(type, middle.bytes));
+ copy_map[higher] = middle;
+
+ copy_operation end;
+ lower.reg_b += middle.bytes;
+ higher.reg_b += middle.bytes;
+ end.bytes = swap.bytes - (offset + middle.bytes);
+ memcpy(end.uses, swap.uses + offset + middle.bytes, end.bytes);
+ end.op = Operand(lower, RegClass::get(type, end.bytes));
+ end.def = Definition(higher, RegClass::get(type, end.bytes));
+ copy_map[higher] = end;
+
+ memset(swap.uses + offset, 0, swap.bytes - offset);
+ swap.bytes = offset;
+ }
+
do_swap(ctx, bld, swap, preserve_scc, pi);
/* remove from map */
copy_map.erase(it);
- /* change the operand reg of the target's use and split uses if needed */
+ /* change the operand reg of the target's uses and split uses if needed */
target = copy_map.begin();
uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
for (; target != copy_map.end(); ++target) {
assert(target->second.bytes < swap.bytes);
- PhysReg new_reg = swap.op.physReg();
- new_reg.reg_b += target->second.op.physReg().reg_b - swap.def.physReg().reg_b;
- target->second.op.setFixed(new_reg);
+ int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b;
+
+ /* split and update the middle (the portion that reads the swap's
+ * definition) to read the swap's operand instead */
+ int target_op_end = target->second.op.physReg().reg_b + target->second.bytes;
+ int swap_def_end = swap.def.physReg().reg_b + swap.bytes;
+ int before_bytes = MAX2(-offset, 0);
+ int after_bytes = MAX2(target_op_end - swap_def_end, 0);
+ int middle_bytes = target->second.bytes - before_bytes - after_bytes;
+
+ if (after_bytes) {
+ unsigned after_offset = before_bytes + middle_bytes;
+ assert(after_offset > 0);
+ copy_operation copy;
+ copy.bytes = after_bytes;
+ memcpy(copy.uses, target->second.uses + after_offset, copy.bytes);
+ RegClass rc = RegClass::get(target->second.op.regClass().type(), after_bytes);
+ copy.op = Operand(target->second.op.physReg().advance(after_offset), rc);
+ copy.def = Definition(target->second.def.physReg().advance(after_offset), rc);
+ copy_map[copy.def.physReg()] = copy;
+ }
+ if (middle_bytes) {
+ copy_operation copy;
+ copy.bytes = middle_bytes;
+ memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes);
+ RegClass rc = RegClass::get(target->second.op.regClass().type(), middle_bytes);
+ copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc);
+ copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc);
+ copy_map[copy.def.physReg()] = copy;
+ }
+
+ if (before_bytes) {
+ copy_operation copy;
+ target->second.bytes = before_bytes;
+ RegClass rc = RegClass::get(target->second.op.regClass().type(), before_bytes);
+ target->second.op = Operand(target->second.op.physReg(), rc);
+ target->second.def = Definition(target->second.def.physReg(), rc);
+ memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes);
+ }
+
+ /* break early since we know each byte of the swap's definition is used
+ * at most once */
bytes_left &= ~imask;
if (!bytes_left)
break;
}
break;
}
+ case aco_opcode::p_bpermute:
+ {
+ if (ctx.program->chip_class <= GFX7)
+ emit_gfx6_bpermute(program, instr, bld);
+ else if (ctx.program->chip_class == GFX10 && ctx.program->wave_size == 64)
+ emit_gfx10_wave64_bpermute(program, instr, bld);
+ else
+ unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
+ }
default:
break;
}
} else if (instr->format == Format::PSEUDO_REDUCTION) {
Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
- if (reduce->reduce_op == gfx10_wave64_bpermute) {
- /* Only makes sense on GFX10 wave64 */
- assert(program->chip_class >= GFX10);
- assert(program->info->wave_size == 64);
- assert(instr->definitions[0].regClass() == v1); /* Destination */
- assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */
- assert(instr->definitions[1].physReg() != vcc);
- assert(instr->definitions[2].physReg() == scc); /* SCC clobber */
- assert(instr->operands[0].physReg() == vcc); /* Compare */
- assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */
- assert(instr->operands[2].regClass() == v1); /* Indices x4 */
- assert(instr->operands[3].regClass() == v1); /* Input data */
-
- PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256);
- PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1);
- Operand compare = instr->operands[0];
- Operand tmp1(instr->operands[1].physReg(), v1);
- Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1);
- Operand index_x4 = instr->operands[2];
- Operand input_data = instr->operands[3];
- Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1);
- Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1);
- Definition def_temp1(tmp1.physReg(), v1);
- Definition def_temp2(tmp2.physReg(), v1);
-
- /* Save EXEC and set it for all lanes */
- bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2],
- Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2));
-
- /* HI: Copy data from high lanes 32-63 to shared vgpr */
- bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
-
- /* LO: Copy data from low lanes 0-31 to shared vgpr */
- bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
- /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
- bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
-
- /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
- bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
-
- /* Permute the original input */
- bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data);
- /* Permute the swapped input */
- bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1);
-
- /* Restore saved EXEC */
- bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2));
- /* Choose whether to use the original or swapped */
- bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare);
- } else {
- emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
- reduce->operands[1].physReg(), // tmp
- reduce->definitions[1].physReg(), // stmp
- reduce->operands[2].physReg(), // vtmp
- reduce->definitions[2].physReg(), // sitmp
- reduce->operands[0], reduce->definitions[0]);
- }
+ emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
+ reduce->operands[1].physReg(), // tmp
+ reduce->definitions[1].physReg(), // stmp
+ reduce->operands[2].physReg(), // vtmp
+ reduce->definitions[2].physReg(), // sitmp
+ reduce->operands[0], reduce->definitions[0]);
} else {
ctx.instructions.emplace_back(std::move(instr));
}