return ctx->allocated[def->index];
}
+Temp emit_mbcnt(isel_context *ctx, Definition dst,
+ Operand mask_lo = Operand((uint32_t) -1), Operand mask_hi = Operand((uint32_t) -1))
+{
+ Builder bld(ctx->program, ctx->block);
+ Definition lo_def = ctx->program->wave_size == 32 ? dst : bld.def(v1);
+ Temp thread_id_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, lo_def, mask_lo, Operand(0u));
+
+ if (ctx->program->wave_size == 32) {
+ return thread_id_lo;
+ } else {
+ Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ return thread_id_hi;
+ }
+}
+
Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_needs_wqm = false)
{
Builder bld(ctx->program, ctx->block);
ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
}
- Temp lane_id = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u));
- lane_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), lane_id);
+ Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
Temp lane_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), lane_id);
Temp index_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), index);
Temp cmp = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2, vcc), lane_is_hi, index_is_hi);
{
Builder bld(ctx->program, ctx->block);
if (!dst.id())
- dst = bld.tmp(s2);
+ dst = bld.tmp(bld.lm);
assert(val.regClass() == s1);
- assert(dst.regClass() == s2);
+ assert(dst.regClass() == bld.lm);
- return bld.sop2(aco_opcode::s_cselect_b64, bld.hint_vcc(Definition(dst)), Operand((uint32_t) -1), Operand(0u), bld.scc(val));
+ return bld.sop2(Builder::s_cselect, bld.hint_vcc(Definition(dst)), Operand((uint32_t) -1), Operand(0u), bld.scc(val));
}
Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1))
if (!dst.id())
dst = bld.tmp(s1);
- assert(val.regClass() == s2);
+ assert(val.regClass() == bld.lm);
assert(dst.regClass() == s1);
/* if we're currently in WQM mode, ensure that the source is also computed in WQM */
Temp tmp = bld.tmp(s1);
- bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.scc(Definition(tmp)), val, Operand(exec, s2));
+ bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(tmp)), val, Operand(exec, bld.lm));
return emit_wqm(ctx, tmp, dst);
}
{
Temp src0 = get_alu_src(ctx, instr->src[0]);
Temp src1 = get_alu_src(ctx, instr->src[1]);
+ assert(src0.size() == src1.size());
+
aco_ptr<Instruction> vopc;
if (src1.type() == RegType::sgpr) {
if (src0.type() == RegType::vgpr) {
{
Temp src0 = get_alu_src(ctx, instr->src[0]);
Temp src1 = get_alu_src(ctx, instr->src[1]);
+ Builder bld(ctx->program, ctx->block);
- assert(dst.regClass() == s2);
+ assert(dst.regClass() == bld.lm);
assert(src0.type() == RegType::sgpr);
assert(src1.type() == RegType::sgpr);
+ assert(src0.regClass() == src1.regClass());
- Builder bld(ctx->program, ctx->block);
/* Emit the SALU comparison instruction */
Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1);
/* Turn the result into a per-lane bool */
emit_sopc_instruction(ctx, instr, op, dst);
}
-void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, aco_opcode op32, aco_opcode op64, Temp dst)
+void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, Builder::WaveSpecificOpcode op, Temp dst)
{
Builder bld(ctx->program, ctx->block);
Temp src0 = get_alu_src(ctx, instr->src[0]);
Temp src1 = get_alu_src(ctx, instr->src[1]);
- assert(dst.regClass() == s2);
- assert(src0.regClass() == s2);
- assert(src1.regClass() == s2);
+ assert(dst.regClass() == bld.lm);
+ assert(src0.regClass() == bld.lm);
+ assert(src1.regClass() == bld.lm);
- bld.sop2(op64, Definition(dst), bld.def(s1, scc), src0, src1);
+ bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1);
}
void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst)
Temp then = get_alu_src(ctx, instr->src[1]);
Temp els = get_alu_src(ctx, instr->src[2]);
- assert(cond.regClass() == s2);
+ assert(cond.regClass() == bld.lm);
if (dst.type() == RegType::vgpr) {
aco_ptr<Instruction> bcsel;
}
if (instr->dest.dest.ssa.bit_size == 1) {
- assert(dst.regClass() == s2);
- assert(then.regClass() == s2);
- assert(els.regClass() == s2);
+ assert(dst.regClass() == bld.lm);
+ assert(then.regClass() == bld.lm);
+ assert(els.regClass() == bld.lm);
}
if (!ctx->divergent_vals[instr->src[0].src.ssa->index]) { /* uniform condition and values in sgpr */
if (dst.regClass() == s1 || dst.regClass() == s2) {
assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass());
+ assert(dst.size() == then.size());
aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
} else {
assert(instr->dest.dest.ssa.bit_size == 1);
if (cond.id() != then.id())
- then = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), cond, then);
+ then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then);
if (cond.id() == els.id())
- bld.sop1(aco_opcode::s_mov_b64, Definition(dst), then);
+ bld.sop1(Builder::s_mov, Definition(dst), then);
else
- bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), then,
- bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), els, cond));
+ bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then,
+ bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond));
}
void emit_scaled_op(isel_context *ctx, Builder& bld, Definition dst, Temp val,
aco_opcode op, uint32_t undo)
{
/* multiply by 16777216 to handle denormals */
- Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(s2)),
+ Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(bld.lm)),
as_vgpr(ctx, val), bld.copy(bld.def(v1), Operand((1u << 7) | (1u << 4))));
Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x4b800000u), val);
scaled = bld.vop1(op, bld.def(v1), scaled);
case nir_op_inot: {
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->dest.dest.ssa.bit_size == 1) {
- assert(src.regClass() == s2);
- assert(dst.regClass() == s2);
- bld.sop2(aco_opcode::s_andn2_b64, Definition(dst), bld.def(s1, scc), Operand(exec, s2), src);
+ assert(src.regClass() == bld.lm);
+ assert(dst.regClass() == bld.lm);
+ bld.sop2(Builder::s_andn2, Definition(dst), bld.def(s1, scc), Operand(exec, bld.lm), src);
} else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
} else if (dst.type() == RegType::sgpr) {
bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, neqz);
} else if (dst.regClass() == v1) {
Temp tmp = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
- Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(1u), tmp, gtz);
} else if (dst.regClass() == v2) {
Temp upper = emit_extract_vector(ctx, src, 1, v1);
Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper);
- Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(1u), neg, gtz);
upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
}
case nir_op_ior: {
if (instr->dest.dest.ssa.bit_size == 1) {
- emit_boolean_logic(ctx, instr, aco_opcode::s_or_b32, aco_opcode::s_or_b64, dst);
+ emit_boolean_logic(ctx, instr, Builder::s_or, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
} else if (dst.regClass() == s1) {
}
case nir_op_iand: {
if (instr->dest.dest.ssa.bit_size == 1) {
- emit_boolean_logic(ctx, instr, aco_opcode::s_and_b32, aco_opcode::s_and_b64, dst);
+ emit_boolean_logic(ctx, instr, Builder::s_and, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
} else if (dst.regClass() == s1) {
}
case nir_op_ixor: {
if (instr->dest.dest.ssa.bit_size == 1) {
- emit_boolean_logic(ctx, instr, aco_opcode::s_xor_b32, aco_opcode::s_xor_b64, dst);
+ emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
} else if (dst.regClass() == s1) {
case nir_op_fsign: {
Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
if (dst.size() == 1) {
- Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond);
- cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond);
} else if (dst.size() == 2) {
- Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
- cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
+ cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u));
upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond);
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) {
Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
- Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(s2)), Operand(64u), exponent);
+ Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent);
exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent);
Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa);
}
case nir_op_b2f32: {
Temp src = get_alu_src(ctx, instr->src[0]);
- assert(src.regClass() == s2);
+ assert(src.regClass() == bld.lm);
if (dst.regClass() == s1) {
src = bool_to_scalar_condition(ctx, src);
}
case nir_op_b2f64: {
Temp src = get_alu_src(ctx, instr->src[0]);
- assert(src.regClass() == s2);
+ assert(src.regClass() == bld.lm);
if (dst.regClass() == s2) {
src = bool_to_scalar_condition(ctx, src);
}
case nir_op_i2i64: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32) {
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, Operand(0u));
+ if (src.regClass() == s1) {
+ Temp high = bld.sopc(aco_opcode::s_ashr_i32, bld.def(s1, scc), src, Operand(31u));
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
+ } else if (src.regClass() == v1) {
+ Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
nir_print_instr(&instr->instr, stderr);
}
case nir_op_b2i32: {
Temp src = get_alu_src(ctx, instr->src[0]);
- assert(src.regClass() == s2);
+ assert(src.regClass() == bld.lm);
if (dst.regClass() == s1) {
// TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ
}
case nir_op_i2b1: {
Temp src = get_alu_src(ctx, instr->src[0]);
- assert(dst.regClass() == s2);
+ assert(dst.regClass() == bld.lm);
if (src.type() == RegType::vgpr) {
assert(src.regClass() == v1 || src.regClass() == v2);
case nir_op_fquantize2f16: {
Temp src = get_alu_src(ctx, instr->src[0]);
Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src);
+ Temp f32, cmp_res;
- Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */
-
- Temp cmp_res = bld.tmp(s2);
- bld.vopc_e64(aco_opcode::v_cmp_class_f16, Definition(cmp_res), f16, mask).def(0).setHint(vcc);
-
- Temp f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
+ if (ctx->program->chip_class >= GFX8) {
+ Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */
+ cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.hint_vcc(bld.def(bld.lm)), f16, mask);
+ f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
+ } else {
+ /* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
+ * so compare the result and flush to 0 if it's smaller.
+ */
+ f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16);
+ Temp smallest = bld.copy(bld.def(s1), Operand(0x38800000u));
+ Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(s2)), f32, smallest);
+ static_cast<VOP3A_instruction*>(vop3)->abs[0] = true;
+ cmp_res = vop3->definitions[0].getTemp();
+ }
- if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32) {
+ if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32 || ctx->program->chip_class < GFX8) {
Temp copysign_0 = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0u), as_vgpr(ctx, src));
bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res);
} else {
}
case nir_op_ieq: {
if (instr->src[0].src.ssa->bit_size == 1)
- emit_boolean_logic(ctx, instr, aco_opcode::s_xnor_b32, aco_opcode::s_xnor_b64, dst);
+ emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
else
emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32, aco_opcode::s_cmp_eq_u64);
break;
}
case nir_op_ine: {
if (instr->src[0].src.ssa->bit_size == 1)
- emit_boolean_logic(ctx, instr, aco_opcode::s_xor_b32, aco_opcode::s_xor_b64, dst);
+ emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
else
emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32, aco_opcode::s_cmp_lg_u64);
break;
Builder bld(ctx->program, ctx->block);
if (instr->def.bit_size == 1) {
- assert(dst.regClass() == s2);
- bld.sop1(aco_opcode::s_mov_b64, Definition(dst), Operand((uint64_t)(instr->value[0].b ? -1 : 0)));
+ assert(dst.regClass() == bld.lm);
+ int val = instr->value[0].b ? -1 : 0;
+ Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val);
+ bld.sop1(Builder::s_mov, Definition(dst), op);
} else if (dst.size() == 1) {
bld.copy(Definition(dst), Operand(instr->value[0].u32));
} else {
void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr)
{
+ Builder bld(ctx->program, ctx->block);
unsigned write_mask = nir_intrinsic_write_mask(instr);
Operand values[4];
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
values[2] = values[0];
values[0] = Operand(v1);
} else {
- aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
- exp->valid_mask = false;
- exp->done = false;
- exp->compressed = true;
- exp->dest = V_008DFC_SQ_EXP_MRTZ;
- exp->enabled_mask = 0xc;
- for (int i = 0; i < 4; i++)
- exp->operands[i] = Operand(v1);
- exp->operands[1] = Operand(values[0]);
- ctx->block->instructions.emplace_back(std::move(exp));
+ bld.exp(aco_opcode::exp, Operand(v1), Operand(values[0]), Operand(v1), Operand(v1),
+ 0xc, V_008DFC_SQ_EXP_MRTZ, true);
return;
}
values[1] = values[0];
values[0] = Operand(v1);
} else {
- aco_ptr<Instruction> shift{create_instruction<VOP2_instruction>(aco_opcode::v_lshlrev_b32, Format::VOP2, 2, 1)};
- shift->operands[0] = Operand((uint32_t) 16);
- shift->operands[1] = values[0];
- Temp tmp = {ctx->program->allocateId(), v1};
- shift->definitions[0] = Definition(tmp);
- ctx->block->instructions.emplace_back(std::move(shift));
-
- aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
- exp->valid_mask = false;
- exp->done = false;
- exp->compressed = true;
- exp->dest = V_008DFC_SQ_EXP_MRTZ;
- exp->enabled_mask = 0x3;
- exp->operands[0] = Operand(tmp);
- for (int i = 1; i < 4; i++)
- exp->operands[i] = Operand(v1);
- ctx->block->instructions.emplace_back(std::move(exp));
+ values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u), values[0]);
+ bld.exp(aco_opcode::exp, values[0], Operand(v1), Operand(v1), Operand(v1),
+ 0x3, V_008DFC_SQ_EXP_MRTZ, true);
return;
}
target = V_008DFC_SQ_EXP_MRT + index;
col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
}
- ASSERTED bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
- ASSERTED bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
- assert(!is_int8 && !is_int10);
+ bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
+ bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1;
switch (col_format)
{
/* Special case: on GFX10, the outputs are different for 32_AR */
enabled_channels = 0x3;
values[1] = values[3];
+ values[3] = Operand(v1);
} else {
enabled_channels = 0x9;
}
compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
break;
- case V_028714_SPI_SHADER_UINT16_ABGR:
+ case V_028714_SPI_SHADER_UINT16_ABGR: {
enabled_channels = 0x5;
compr_op = aco_opcode::v_cvt_pk_u16_u32;
+ if (is_int8 || is_int10) {
+ /* clamp */
+ uint32_t max_rgb = is_int8 ? 255 : is_int10 ? 1023 : 0;
+ Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
+
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask >> i) & 1) {
+ values[i] = bld.vop2(aco_opcode::v_min_u32, bld.def(v1),
+ i == 3 && is_int10 ? Operand(3u) : Operand(max_rgb_val),
+ values[i]);
+ }
+ }
+ }
break;
+ }
case V_028714_SPI_SHADER_SINT16_ABGR:
enabled_channels = 0x5;
compr_op = aco_opcode::v_cvt_pk_i16_i32;
+ if (is_int8 || is_int10) {
+ /* clamp */
+ uint32_t max_rgb = is_int8 ? 127 : is_int10 ? 511 : 0;
+ uint32_t min_rgb = is_int8 ? -128 :is_int10 ? -512 : 0;
+ Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb));
+ Temp min_rgb_val = bld.copy(bld.def(s1), Operand(min_rgb));
+
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask >> i) & 1) {
+ values[i] = bld.vop2(aco_opcode::v_min_i32, bld.def(v1),
+ i == 3 && is_int10 ? Operand(1u) : Operand(max_rgb_val),
+ values[i]);
+ values[i] = bld.vop2(aco_opcode::v_max_i32, bld.def(v1),
+ i == 3 && is_int10 ? Operand(-2u) : Operand(min_rgb_val),
+ values[i]);
+ }
+ }
+ }
break;
case V_028714_SPI_SHADER_32_ABGR:
if (target == V_008DFC_SQ_EXP_NULL)
return;
- if ((bool)compr_op)
- {
- for (int i = 0; i < 2; i++)
- {
+ if ((bool) compr_op) {
+ for (int i = 0; i < 2; i++) {
/* check if at least one of the values to be compressed is enabled */
unsigned enabled = (write_mask >> (i*2) | write_mask >> (i*2+1)) & 0x1;
if (enabled) {
enabled_channels |= enabled << (i*2);
- aco_ptr<VOP3A_instruction> compr{create_instruction<VOP3A_instruction>(compr_op, Format::VOP3A, 2, 1)};
- Temp tmp{ctx->program->allocateId(), v1};
- compr->operands[0] = values[i*2].isUndefined() ? Operand(0u) : values[i*2];
- compr->operands[1] = values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1];
- compr->definitions[0] = Definition(tmp);
- values[i] = Operand(tmp);
- ctx->block->instructions.emplace_back(std::move(compr));
+ values[i] = bld.vop3(compr_op, bld.def(v1),
+ values[i*2].isUndefined() ? Operand(0u) : values[i*2],
+ values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1]);
} else {
values[i] = Operand(v1);
}
}
- }
-
- aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
- exp->valid_mask = false;
- exp->done = false;
- exp->compressed = (bool) compr_op;
- exp->dest = target;
- exp->enabled_mask = enabled_channels;
- if ((bool) compr_op) {
- for (int i = 0; i < 2; i++)
- exp->operands[i] = enabled_channels & (3 << (i * 2)) ? values[i] : Operand(v1);
- exp->operands[2] = Operand(v1);
- exp->operands[3] = Operand(v1);
+ values[2] = Operand(v1);
+ values[3] = Operand(v1);
} else {
for (int i = 0; i < 4; i++)
- exp->operands[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
+ values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1);
}
- ctx->block->instructions.emplace_back(std::move(exp));
+ bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
+ enabled_channels, target, (bool) compr_op);
}
Operand load_lds_size_m0(isel_context *ctx)
/* Convert back to the right type. */
if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
- Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0xbf800000u), alpha);
+ Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0xbf800000u), alpha);
alpha = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xbf800000u), alpha, clamp);
} else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha);
bool dlc = glc && ctx->options->chip_class >= GFX10;
aco_opcode op;
- if (dst.type() == RegType::vgpr || (glc && ctx->options->chip_class < GFX8)) {
+ if (dst.type() == RegType::vgpr || (ctx->options->chip_class < GFX8 && !readonly)) {
if (ctx->options->chip_class < GFX8)
offset = as_vgpr(ctx, offset);
// TODO: optimize uniform conditions
Builder bld(ctx->program, ctx->block);
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- assert(src.regClass() == s2);
- src = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
+ assert(src.regClass() == bld.lm);
+ src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
bld.pseudo(aco_opcode::p_discard_if, src);
ctx->block->kind |= block_kind_uses_discard_if;
return;
ctx->program->needs_exact = true;
/* save exec somewhere temporarily so that it doesn't get
* overwritten before the discard from outer exec masks */
- Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, s2));
+ Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, bld.lm));
bld.pseudo(aco_opcode::p_discard_if, cond);
ctx->block->kind |= block_kind_uses_discard_if;
return;
/* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
* resource descriptor is 0 (invalid),
*/
- Temp compare = bld.tmp(s2);
+ Temp compare = bld.tmp(bld.lm);
bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(compare),
Operand(0u), emit_extract_vector(ctx, fmask_desc_ptr, 1, s1)).def(0).setHint(vcc);
flat->operands[1] = Operand(s1);
flat->glc = glc;
flat->dlc = dlc;
+ flat->barrier = barrier_buffer;
if (dst.type() == RegType::sgpr) {
Temp vec = bld.tmp(RegType::vgpr, dst.size());
if (offset > 0 && ctx->options->chip_class < GFX9) {
Temp addr0 = bld.tmp(v1), addr1 = bld.tmp(v1);
Temp new_addr0 = bld.tmp(v1), new_addr1 = bld.tmp(v1);
- Temp carry = bld.tmp(s2);
+ Temp carry = bld.tmp(bld.lm);
bld.pseudo(aco_opcode::p_split_vector, Definition(addr0), Definition(addr1), addr);
bld.vop2(aco_opcode::v_add_co_u32, Definition(new_addr0), bld.hint_vcc(Definition(carry)),
Operand(offset), addr0);
- bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(s2),
+ bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(bld.lm),
Operand(0u), addr1,
carry).def(1).setHint(vcc);
flat->glc = glc;
flat->dlc = false;
flat->offset = offset;
+ flat->disable_wqm = true;
+ flat->barrier = barrier_buffer;
+ ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(flat));
}
}
+void visit_global_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
+{
+ /* return the previous value if dest is ever used */
+ bool return_previous = false;
+ nir_foreach_use_safe(use_src, &instr->dest.ssa) {
+ return_previous = true;
+ break;
+ }
+ nir_foreach_if_use_safe(use_src, &instr->dest.ssa) {
+ return_previous = true;
+ break;
+ }
+
+ Builder bld(ctx->program, ctx->block);
+ Temp addr = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
+ Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
+
+ if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap)
+ data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2),
+ get_ssa_temp(ctx, instr->src[2].ssa), data);
+
+ Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+
+ bool global = ctx->options->chip_class >= GFX9;
+ aco_opcode op32, op64;
+ switch (instr->intrinsic) {
+ case nir_intrinsic_global_atomic_add:
+ op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add;
+ op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2;
+ break;
+ case nir_intrinsic_global_atomic_imin:
+ op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin;
+ op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2;
+ break;
+ case nir_intrinsic_global_atomic_umin:
+ op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin;
+ op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2;
+ break;
+ case nir_intrinsic_global_atomic_imax:
+ op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax;
+ op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2;
+ break;
+ case nir_intrinsic_global_atomic_umax:
+ op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax;
+ op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2;
+ break;
+ case nir_intrinsic_global_atomic_and:
+ op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and;
+ op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2;
+ break;
+ case nir_intrinsic_global_atomic_or:
+ op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or;
+ op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2;
+ break;
+ case nir_intrinsic_global_atomic_xor:
+ op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor;
+ op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2;
+ break;
+ case nir_intrinsic_global_atomic_exchange:
+ op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap;
+ op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2;
+ break;
+ case nir_intrinsic_global_atomic_comp_swap:
+ op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap;
+ op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2;
+ break;
+ default:
+ unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions.");
+ }
+ aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64;
+ aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)};
+ flat->operands[0] = Operand(addr);
+ flat->operands[1] = Operand(s1);
+ flat->operands[2] = Operand(data);
+ if (return_previous)
+ flat->definitions[0] = Definition(dst);
+ flat->glc = return_previous;
+ flat->dlc = false; /* Not needed for atomics */
+ flat->offset = 0;
+ flat->disable_wqm = true;
+ flat->barrier = barrier_buffer;
+ ctx->program->needs_exact = true;
+ ctx->block->instructions.emplace_back(std::move(flat));
+}
+
void emit_memory_barrier(isel_context *ctx, nir_intrinsic_instr *instr) {
Builder bld(ctx->program, ctx->block);
switch(instr->intrinsic) {
return src;
} if (op == nir_op_iand && cluster_size == 4) {
//subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val)
- Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src);
- return bld.sop1(aco_opcode::s_not_b64, bld.def(s2), bld.def(s1, scc),
- bld.sop1(aco_opcode::s_wqm_b64, bld.def(s2), bld.def(s1, scc), tmp));
+ Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
+ return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc),
+ bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp));
} else if (op == nir_op_ior && cluster_size == 4) {
//subgroupClusteredOr(val, 4) -> wqm(val & exec)
- return bld.sop1(aco_opcode::s_wqm_b64, bld.def(s2), bld.def(s1, scc),
- bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)));
- } else if (op == nir_op_iand && cluster_size == 64) {
+ return bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc),
+ bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)));
+ } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) {
//subgroupAnd(val) -> (exec & ~val) == 0
- Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp();
- return bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(0u), Operand(-1u), bld.scc(tmp));
- } else if (op == nir_op_ior && cluster_size == 64) {
+ Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
+ return bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(0u), Operand(-1u), bld.scc(tmp));
+ } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) {
//subgroupOr(val) -> (val & exec) != 0
- Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)).def(1).getTemp();
+ Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)).def(1).getTemp();
return bool_to_vector_condition(ctx, tmp);
- } else if (op == nir_op_ixor && cluster_size == 64) {
+ } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) {
//subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1
- Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
- tmp = bld.sop1(aco_opcode::s_bcnt1_i32_b64, bld.def(s2), bld.def(s1, scc), tmp);
+ Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
+ tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp);
tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand(1u)).def(1).getTemp();
return bool_to_vector_condition(ctx, tmp);
} else {
//subgroupClustered{And,Or,Xor}(val, n) ->
- //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0))
+ //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) ; just v_mbcnt_lo_u32_b32 on wave32
//cluster_offset = ~(n - 1) & lane_id
//cluster_mask = ((1 << n) - 1)
//subgroupClusteredAnd():
// return ((val & exec) >> cluster_offset) & cluster_mask != 0
//subgroupClusteredXor():
// return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0
- Temp lane_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
+ Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(~uint32_t(cluster_size - 1)), lane_id);
Temp tmp;
if (op == nir_op_iand)
- tmp = bld.sop2(aco_opcode::s_orn2_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
+ tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
else
- tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
+ tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u;
- tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
+ if (ctx->program->wave_size == 64)
+ tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp);
+ else
+ tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp);
tmp = emit_extract_vector(ctx, tmp, 0, v1);
if (cluster_mask != 0xffffffff)
tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(cluster_mask), tmp);
Definition cmp_def = Definition();
if (op == nir_op_iand) {
- cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2), Operand(cluster_mask), tmp).def(0);
+ cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(cluster_mask), tmp).def(0);
} else if (op == nir_op_ior) {
- cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp).def(0);
+ cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
} else if (op == nir_op_ixor) {
tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u),
bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand(0u)));
- cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp).def(0);
+ cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0);
}
cmp_def.setHint(vcc);
return cmp_def.getTemp();
//subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0
Temp tmp;
if (op == nir_op_iand)
- tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src);
+ tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src);
else
- tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
+ tmp = bld.sop2(Builder::s_and, bld.def(s2), bld.def(s1, scc), src, Operand(exec, bld.lm));
Builder::Result lohi = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), tmp);
Temp lo = lohi.def(0).getTemp();
Temp hi = lohi.def(1).getTemp();
- Temp mbcnt = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), hi,
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), lo, Operand(0u)));
+ Temp mbcnt = emit_mbcnt(ctx, bld.def(v1), Operand(lo), Operand(hi));
Definition cmp_def = Definition();
if (op == nir_op_iand)
- cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2), Operand(0u), mbcnt).def(0);
+ cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
else if (op == nir_op_ior)
- cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), mbcnt).def(0);
+ cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0);
else if (op == nir_op_ixor)
- cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u),
+ cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u),
bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), mbcnt)).def(0);
cmp_def.setHint(vcc);
return cmp_def.getTemp();
//subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val
Temp tmp = emit_boolean_exclusive_scan(ctx, op, src);
if (op == nir_op_iand)
- return bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), tmp, src);
+ return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
else if (op == nir_op_ior)
- return bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), tmp, src);
+ return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
else if (op == nir_op_ixor)
- return bld.sop2(aco_opcode::s_xor_b64, bld.def(s2), bld.def(s1, scc), tmp, src);
+ return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src);
assert(false);
return Temp();
Temp pck0 = bld.tmp(v1);
Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp();
tmp1 = as_vgpr(ctx, tmp1);
- Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(s2)), tmp1, Operand(0u), carry);
+ Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(bld.lm)), tmp1, Operand(0u), carry);
addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1);
/* sample_pos = flat_load_dwordx2 addr */
case nir_intrinsic_store_global:
visit_store_global(ctx, instr);
break;
+ case nir_intrinsic_global_atomic_add:
+ case nir_intrinsic_global_atomic_imin:
+ case nir_intrinsic_global_atomic_umin:
+ case nir_intrinsic_global_atomic_imax:
+ case nir_intrinsic_global_atomic_umax:
+ case nir_intrinsic_global_atomic_and:
+ case nir_intrinsic_global_atomic_or:
+ case nir_intrinsic_global_atomic_xor:
+ case nir_intrinsic_global_atomic_exchange:
+ case nir_intrinsic_global_atomic_comp_swap:
+ visit_global_atomic(ctx, instr);
+ break;
case nir_intrinsic_ssbo_atomic_add:
case nir_intrinsic_ssbo_atomic_imin:
case nir_intrinsic_ssbo_atomic_umin:
case nir_intrinsic_barrier: {
unsigned* bsize = ctx->program->info->cs.block_size;
unsigned workgroup_size = bsize[0] * bsize[1] * bsize[2];
- if (workgroup_size > 64)
+ if (workgroup_size > ctx->program->wave_size)
bld.sopp(aco_opcode::s_barrier);
break;
}
break;
}
case nir_intrinsic_load_local_invocation_index: {
- Temp id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
- Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
- get_arg(ctx, ctx->args->ac.tg_size));
- bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id);
+ Temp id = emit_mbcnt(ctx, bld.def(v1));
+
+ /* The tg_size bits [6:11] contain the subgroup id,
+ * we need this multiplied by the wave size, and then OR the thread id to it.
+ */
+ if (ctx->program->wave_size == 64) {
+ /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just feed that to v_or */
+ Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
+ get_arg(ctx, ctx->args->ac.tg_size));
+ bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id);
+ } else {
+ /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */
+ Temp tg_num = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
+ bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, Operand(0x5u), id);
+ }
break;
}
case nir_intrinsic_load_subgroup_id: {
if (ctx->stage == compute_cs) {
- Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u),
- get_arg(ctx, ctx->args->ac.tg_size));
- bld.sop2(aco_opcode::s_lshr_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), tg_num, Operand(0x6u));
+ bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16)));
} else {
bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x0u));
}
break;
}
case nir_intrinsic_load_subgroup_invocation: {
- bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand((uint32_t) -1),
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
+ emit_mbcnt(ctx, Definition(get_ssa_temp(ctx, &instr->dest.ssa)));
break;
}
case nir_intrinsic_load_num_subgroups: {
break;
}
case nir_intrinsic_ballot: {
- Definition tmp = bld.def(s2);
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
+ Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ Definition tmp = bld.def(dst.regClass());
+ Definition lanemask_tmp = dst.size() == bld.lm.size() ? tmp : bld.def(src.regClass());
if (instr->src[0].ssa->bit_size == 1) {
- assert(src.regClass() == s2);
- bld.sop2(aco_opcode::s_and_b64, tmp, bld.def(s1, scc), Operand(exec, s2), src);
+ assert(src.regClass() == bld.lm);
+ bld.sop2(Builder::s_and, lanemask_tmp, bld.def(s1, scc), Operand(exec, bld.lm), src);
} else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) {
- bld.vopc(aco_opcode::v_cmp_lg_u32, tmp, Operand(0u), src);
+ bld.vopc(aco_opcode::v_cmp_lg_u32, lanemask_tmp, Operand(0u), src);
} else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
- bld.vopc(aco_opcode::v_cmp_lg_u64, tmp, Operand(0u), src);
+ bld.vopc(aco_opcode::v_cmp_lg_u64, lanemask_tmp, Operand(0u), src);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
nir_print_instr(&instr->instr, stderr);
fprintf(stderr, "\n");
}
- emit_wqm(ctx, tmp.getTemp(), get_ssa_temp(ctx, &instr->dest.ssa));
+ if (dst.size() != bld.lm.size()) {
+ /* Wave32 with ballot size set to 64 */
+ bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lanemask_tmp.getTemp(), Operand(0u));
+ }
+ emit_wqm(ctx, tmp.getTemp(), dst);
break;
}
case nir_intrinsic_shuffle:
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) {
- assert(src.regClass() == s2);
- Temp tmp = bld.sopc(aco_opcode::s_bitcmp1_b64, bld.def(s1, scc), src, tid);
+ assert(src.regClass() == bld.lm);
+ Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid);
bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
} else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) {
- assert(src.regClass() == s2);
- Temp tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
+ assert(src.regClass() == bld.lm);
+ Temp tmp;
+ if (ctx->program->wave_size == 64)
+ tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src);
+ else
+ tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src);
tmp = emit_extract_vector(ctx, tmp, 0, v1);
tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), tmp);
- emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp), dst);
+ emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp), dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
nir_print_instr(&instr->instr, stderr);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else if (instr->dest.ssa.bit_size == 1) {
- assert(src.regClass() == s2);
- Temp tmp = bld.sopc(aco_opcode::s_bitcmp1_b64, bld.def(s1, scc), src,
- bld.sop1(aco_opcode::s_ff1_i32_b64, bld.def(s1), Operand(exec, s2)));
+ assert(src.regClass() == bld.lm);
+ Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src,
+ bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)));
bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst);
} else if (src.regClass() == s1) {
bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
case nir_intrinsic_vote_all: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- assert(src.regClass() == s2);
- assert(dst.regClass() == s2);
+ assert(src.regClass() == bld.lm);
+ assert(dst.regClass() == bld.lm);
- Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp();
- Temp val = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(0u), Operand(-1u), bld.scc(tmp));
+ Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
+ Temp val = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(0u), Operand(-1u), bld.scc(tmp));
emit_wqm(ctx, val, dst);
break;
}
case nir_intrinsic_vote_any: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- assert(src.regClass() == s2);
- assert(dst.regClass() == s2);
+ assert(src.regClass() == bld.lm);
+ assert(dst.regClass() == bld.lm);
- Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp();
- Temp val = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(-1u), Operand(0u), bld.scc(tmp));
+ Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp();
+ Temp val = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), Operand(0u), bld.scc(tmp));
emit_wqm(ctx, val, dst);
break;
}
nir_op op = (nir_op) nir_intrinsic_reduction_op(instr);
unsigned cluster_size = instr->intrinsic == nir_intrinsic_reduce ?
nir_intrinsic_cluster_size(instr) : 0;
- cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : 64, 64));
+ cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
if (!ctx->divergent_vals[instr->src[0].ssa->index] && (op == nir_op_ior || op == nir_op_iand)) {
emit_uniform_subgroup(ctx, instr, src);
Temp tmp_dst = bld.tmp(dst.regClass());
reduce->definitions[0] = Definition(tmp_dst);
- reduce->definitions[1] = bld.def(s2); // used internally
+ reduce->definitions[1] = bld.def(ctx->program->lane_mask); // used internally
reduce->definitions[2] = Definition();
reduce->definitions[3] = Definition(scc, s1);
reduce->definitions[4] = Definition();
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
unsigned lane = nir_src_as_const_value(instr->src[1])->u32;
if (instr->dest.ssa.bit_size == 1) {
- assert(src.regClass() == s2);
+ assert(src.regClass() == bld.lm);
+ assert(dst.regClass() == bld.lm);
uint32_t half_mask = 0x11111111u << lane;
Temp mask_tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(half_mask), Operand(half_mask));
- Temp tmp = bld.tmp(s2);
- bld.sop1(aco_opcode::s_wqm_b64, Definition(tmp),
- bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), mask_tmp,
- bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2))));
+ Temp tmp = bld.tmp(bld.lm);
+ bld.sop1(Builder::s_wqm, Definition(tmp),
+ bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp,
+ bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))));
emit_wqm(ctx, tmp, dst);
} else if (instr->dest.ssa.bit_size == 32) {
emit_wqm(ctx,
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
if (instr->dest.ssa.bit_size == 1) {
- assert(src.regClass() == s2);
+ assert(src.regClass() == bld.lm);
src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src);
src = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
- Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), src);
+ Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
emit_wqm(ctx, tmp, dst);
} else if (instr->dest.ssa.bit_size == 32) {
Temp tmp = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
RegClass rc = RegClass(src.type(), 1);
Temp mask_lo = bld.tmp(rc), mask_hi = bld.tmp(rc);
bld.pseudo(aco_opcode::p_split_vector, Definition(mask_lo), Definition(mask_hi), src);
- Temp tmp = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), mask_lo, Operand(0u));
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- Temp wqm_tmp = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), mask_hi, tmp);
+ Temp wqm_tmp = emit_mbcnt(ctx, bld.def(v1), Operand(mask_lo), Operand(mask_hi));
emit_wqm(ctx, wqm_tmp, dst);
break;
}
break;
case nir_intrinsic_demote_if: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- assert(src.regClass() == s2);
- Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2));
+ assert(src.regClass() == bld.lm);
+ Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm));
bld.pseudo(aco_opcode::p_demote_to_helper, cond);
ctx->block->kind |= block_kind_uses_demote;
ctx->program->needs_exact = true;
break;
}
case nir_intrinsic_first_invocation: {
- emit_wqm(ctx, bld.sop1(aco_opcode::s_ff1_i32_b64, bld.def(s1), Operand(exec, s2)),
+ emit_wqm(ctx, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)),
get_ssa_temp(ctx, &instr->dest.ssa));
break;
}
}
if (samp_ptr) {
*samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, ACO_DESC_SAMPLER, instr, false, false);
+
if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT && ctx->options->chip_class < GFX8) {
- fprintf(stderr, "Unimplemented sampler descriptor: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- abort();
- // TODO: build samp_ptr = and(samp_ptr, res_ptr)
+ /* fix sampler aniso on SI/CI: samp[0] = samp[0] & img[7] */
+ Builder bld(ctx->program, ctx->block);
+
+ /* to avoid unnecessary moves, we split and recombine sampler and image */
+ Temp img[8] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1),
+ bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
+ Temp samp[4] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)};
+ bld.pseudo(aco_opcode::p_split_vector, Definition(img[0]), Definition(img[1]),
+ Definition(img[2]), Definition(img[3]), Definition(img[4]),
+ Definition(img[5]), Definition(img[6]), Definition(img[7]), *res_ptr);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(samp[0]), Definition(samp[1]),
+ Definition(samp[2]), Definition(samp[3]), *samp_ptr);
+
+ samp[0] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), samp[0], img[7]);
+ *res_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8),
+ img[0], img[1], img[2], img[3],
+ img[4], img[5], img[6], img[7]);
+ *samp_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
+ samp[0], samp[1], samp[2], samp[3]);
}
}
if (fmask_ptr && (instr->op == nir_texop_txf_ms ||
Operand two(0x40000000u);
Operand four(0x40800000u);
- Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), ma);
+ Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), ma);
Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive);
Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0u), sgn_ma);
- Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), four, id);
+ Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), four, id);
Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(s2), two, id);
- is_ma_y = bld.sop2(aco_opcode::s_andn2_b64, bld.hint_vcc(bld.def(s2)), is_ma_y, is_ma_z);
- Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), is_ma_z, is_ma_y);
+ is_ma_y = bld.sop2(Builder::s_andn2, bld.hint_vcc(bld.def(bld.lm)), is_ma_y, is_ma_z);
+ Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(bld.lm)), bld.def(s1, scc), is_ma_z, is_ma_y);
// select sc
Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x);
Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SINT),
bld.scc(compare_cube_wa));
}
- tg4_compare_cube_wa64 = bld.tmp(s2);
+ tg4_compare_cube_wa64 = bld.tmp(bld.lm);
bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64);
nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt, Operand(26u));
assert(dmask == 1 && dst.regClass() == v1);
assert(dst.id() != tmp_dst.id());
- Temp tmp = bld.tmp(s2);
+ Temp tmp = bld.tmp(bld.lm);
bld.vopc(aco_opcode::v_cmp_eq_u32, Definition(tmp), Operand(0u), tmp_dst).def(0).setHint(vcc);
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand((uint32_t)-1), tmp);
void visit_phi(isel_context *ctx, nir_phi_instr *instr)
{
aco_ptr<Pseudo_instruction> phi;
- unsigned num_src = exec_list_length(&instr->srcs);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == s2);
+ assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
- aco_opcode opcode = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index] ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
+ bool logical = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index];
+ logical |= ctx->block->kind & block_kind_merge;
+ aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
+ /* we want a sorted list of sources, since the predecessor list is also sorted */
std::map<unsigned, nir_ssa_def*> phi_src;
- bool all_undef = true;
- nir_foreach_phi_src(src, instr) {
+ nir_foreach_phi_src(src, instr)
phi_src[src->pred->index] = src->src.ssa;
- if (src->src.ssa->parent_instr->type != nir_instr_type_ssa_undef)
- all_undef = false;
+
+ std::vector<unsigned>& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds;
+ unsigned num_operands = 0;
+ Operand operands[std::max(exec_list_length(&instr->srcs), (unsigned)preds.size())];
+ unsigned num_defined = 0;
+ unsigned cur_pred_idx = 0;
+ for (std::pair<unsigned, nir_ssa_def *> src : phi_src) {
+ if (cur_pred_idx < preds.size()) {
+ /* handle missing preds (IF merges with discard/break) and extra preds (loop exit with discard) */
+ unsigned block = ctx->cf_info.nir_to_aco[src.first];
+ unsigned skipped = 0;
+ while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block)
+ skipped++;
+ if (cur_pred_idx + skipped < preds.size()) {
+ for (unsigned i = 0; i < skipped; i++)
+ operands[num_operands++] = Operand(dst.regClass());
+ cur_pred_idx += skipped;
+ } else {
+ continue;
+ }
+ }
+ cur_pred_idx++;
+ Operand op = get_phi_operand(ctx, src.second);
+ operands[num_operands++] = op;
+ num_defined += !op.isUndefined();
}
- if (all_undef) {
+ /* handle block_kind_continue_or_break at loop exit blocks */
+ while (cur_pred_idx++ < preds.size())
+ operands[num_operands++] = Operand(dst.regClass());
+
+ if (num_defined == 0) {
Builder bld(ctx->program, ctx->block);
if (dst.regClass() == s1) {
bld.sop1(aco_opcode::s_mov_b32, Definition(dst), Operand(0u));
return;
}
+ /* we can use a linear phi in some cases if one src is undef */
+ if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) {
+ phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, num_operands, 1));
+
+ Block *linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]];
+ Block *invert = &ctx->program->blocks[linear_else->linear_preds[0]];
+ assert(invert->kind & block_kind_invert);
+
+ unsigned then_block = invert->linear_preds[0];
+
+ Block* insert_block = NULL;
+ for (unsigned i = 0; i < num_operands; i++) {
+ Operand op = operands[i];
+ if (op.isUndefined())
+ continue;
+ insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block;
+ phi->operands[0] = op;
+ break;
+ }
+ assert(insert_block); /* should be handled by the "num_defined == 0" case above */
+ phi->operands[1] = Operand(dst.regClass());
+ phi->definitions[0] = Definition(dst);
+ insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi));
+ return;
+ }
+
/* try to scalarize vector phis */
if (instr->dest.ssa.bit_size != 1 && dst.size() > 1) {
// TODO: scalarize linear phis on divergent ifs
bool can_scalarize = (opcode == aco_opcode::p_phi || !(ctx->block->kind & block_kind_merge));
std::array<Temp, 4> new_vec;
- for (std::pair<const unsigned, nir_ssa_def*>& pair : phi_src) {
- Operand src = get_phi_operand(ctx, pair.second);
- if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end()) {
+ for (unsigned i = 0; can_scalarize && (i < num_operands); i++) {
+ Operand src = operands[i];
+ if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end())
can_scalarize = false;
- break;
- }
}
if (can_scalarize) {
unsigned num_components = instr->dest.ssa.num_components;
aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
for (unsigned k = 0; k < num_components; k++) {
- phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_src, 1));
- std::map<unsigned, nir_ssa_def*>::iterator it = phi_src.begin();
- for (unsigned i = 0; i < num_src; i++) {
- Operand src = get_phi_operand(ctx, it->second);
+ phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
+ for (unsigned i = 0; i < num_operands; i++) {
+ Operand src = operands[i];
phi->operands[i] = src.isTemp() ? Operand(ctx->allocated_vec[src.tempId()][k]) : Operand(rc);
- ++it;
}
Temp phi_dst = {ctx->program->allocateId(), rc};
phi->definitions[0] = Definition(phi_dst);
}
}
- unsigned extra_src = 0;
- if (opcode == aco_opcode::p_linear_phi && (ctx->block->kind & block_kind_loop_exit) &&
- ctx->program->blocks[ctx->block->index-2].kind & block_kind_continue_or_break) {
- extra_src++;
- }
-
- phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_src + extra_src, 1));
-
- /* if we have a linear phi on a divergent if, we know that one src is undef */
- if (opcode == aco_opcode::p_linear_phi && ctx->block->kind & block_kind_merge) {
- assert(extra_src == 0);
- Block* block;
- /* we place the phi either in the invert-block or in the current block */
- if (phi_src.begin()->second->parent_instr->type != nir_instr_type_ssa_undef) {
- assert((++phi_src.begin())->second->parent_instr->type == nir_instr_type_ssa_undef);
- Block& linear_else = ctx->program->blocks[ctx->block->linear_preds[1]];
- block = &ctx->program->blocks[linear_else.linear_preds[0]];
- assert(block->kind & block_kind_invert);
- phi->operands[0] = get_phi_operand(ctx, phi_src.begin()->second);
- } else {
- assert((++phi_src.begin())->second->parent_instr->type != nir_instr_type_ssa_undef);
- block = ctx->block;
- phi->operands[0] = get_phi_operand(ctx, (++phi_src.begin())->second);
- }
- phi->operands[1] = Operand(dst.regClass());
- phi->definitions[0] = Definition(dst);
- block->instructions.emplace(block->instructions.begin(), std::move(phi));
- return;
- }
-
- std::map<unsigned, nir_ssa_def*>::iterator it = phi_src.begin();
- for (unsigned i = 0; i < num_src; i++) {
- phi->operands[i] = get_phi_operand(ctx, it->second);
- ++it;
- }
- for (unsigned i = 0; i < extra_src; i++)
- phi->operands[num_src + i] = Operand(dst.regClass());
+ phi.reset(create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, num_operands, 1));
+ for (unsigned i = 0; i < num_operands; i++)
+ phi->operands[i] = operands[i];
phi->definitions[0] = Definition(dst);
ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi));
}
return;
}
ctx->cf_info.parent_loop.has_divergent_branch = true;
+ ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
break;
case nir_jump_continue:
logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
we must ensure that they are handled correctly */
ctx->cf_info.parent_loop.has_divergent_continue = true;
ctx->cf_info.parent_loop.has_divergent_branch = true;
+ ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index;
} else {
/* uniform continue - directly jump to the loop header */
ctx->block->kind |= block_kind_uniform;
//abort();
}
}
+
+ if (!ctx->cf_info.parent_loop.has_divergent_branch)
+ ctx->cf_info.nir_to_aco[block->index] = ctx->block->index;
}
ctx->block->kind |= block_kind_branch;
/* branch to linear then block */
- assert(cond.regClass() == s2);
+ assert(cond.regClass() == ctx->program->lane_mask);
aco_ptr<Pseudo_branch_instruction> branch;
branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0));
branch->operands[0] = Operand(cond);
ctx->block->kind |= block_kind_uniform;
/* emit branch */
- assert(cond.regClass() == s2);
+ assert(cond.regClass() == bld.lm);
// TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction
cond = bool_to_scalar_condition(ctx, cond);
Temp so_vtx_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
get_arg(ctx, ctx->args->streamout_config), Operand(0x70010u));
- Temp tid = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
+ Temp tid = emit_mbcnt(ctx, bld.def(v1));
Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(s2), so_vtx_count, tid);
ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid);
ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid);
if (uses_center && uses_centroid) {
- Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(s2)),
+ Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)),
get_arg(ctx, ctx->args->ac.prim_mask), Operand(0u));
if (G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena)) {
if (shader_count >= 2) {
Builder bld(ctx.program, ctx.block);
Temp count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), ctx.merged_wave_info, Operand((8u << 16) | (i * 8u)));
- Temp thread_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1),
- bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)));
- Temp cond = bld.vopc(aco_opcode::v_cmp_gt_u32, bld.hint_vcc(bld.def(s2)), count, thread_id);
+ Temp thread_id = emit_mbcnt(&ctx, bld.def(v1));
+ Temp cond = bld.vopc(aco_opcode::v_cmp_gt_u32, bld.hint_vcc(bld.def(bld.lm)), count, thread_id);
begin_divergent_if_then(&ctx, &ic, cond);
}