X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fcompiler%2Faco_instruction_selection.cpp;h=85ed7b3bf6186e24081a4a0da2076cbb58779687;hb=da7ff58835b27d10fb643b303ce6a3145517fc3b;hp=a7c3c7034038f2bd4f5bbdcac7d429c968ab43fb;hpb=8995c0b30a696c709fac9e5f761c101913dc92ec;p=mesa.git diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp index a7c3c703403..85ed7b3bf61 100644 --- a/src/amd/compiler/aco_instruction_selection.cpp +++ b/src/amd/compiler/aco_instruction_selection.cpp @@ -123,6 +123,21 @@ Temp get_ssa_temp(struct isel_context *ctx, nir_ssa_def *def) return ctx->allocated[def->index]; } +Temp emit_mbcnt(isel_context *ctx, Definition dst, + Operand mask_lo = Operand((uint32_t) -1), Operand mask_hi = Operand((uint32_t) -1)) +{ + Builder bld(ctx->program, ctx->block); + Definition lo_def = ctx->program->wave_size == 32 ? dst : bld.def(v1); + Temp thread_id_lo = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, lo_def, mask_lo, Operand(0u)); + + if (ctx->program->wave_size == 32) { + return thread_id_lo; + } else { + Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo); + return thread_id_hi; + } +} + Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_needs_wqm = false) { Builder bld(ctx->program, ctx->block); @@ -148,7 +163,7 @@ Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_ne static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data) { if (index.regClass() == s1) - return bld.vop3(aco_opcode::v_readlane_b32, bld.def(s1), data, index); + return bld.readlane(bld.def(s1), data, index); Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index); @@ -170,8 +185,7 @@ static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */ } - Temp lane_id = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u)); - lane_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), lane_id); + Temp lane_id = emit_mbcnt(ctx, bld.def(v1)); Temp lane_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), lane_id); Temp index_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), index); Temp cmp = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2, vcc), lane_is_hi, index_is_hi); @@ -337,12 +351,12 @@ Temp bool_to_vector_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s2 { Builder bld(ctx->program, ctx->block); if (!dst.id()) - dst = bld.tmp(s2); + dst = bld.tmp(bld.lm); assert(val.regClass() == s1); - assert(dst.regClass() == s2); + assert(dst.regClass() == bld.lm); - return bld.sop2(aco_opcode::s_cselect_b64, bld.hint_vcc(Definition(dst)), Operand((uint32_t) -1), Operand(0u), bld.scc(val)); + return bld.sop2(Builder::s_cselect, bld.hint_vcc(Definition(dst)), Operand((uint32_t) -1), Operand(0u), bld.scc(val)); } Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1)) @@ -351,12 +365,12 @@ Temp bool_to_scalar_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s1 if (!dst.id()) dst = bld.tmp(s1); - assert(val.regClass() == s2); + assert(val.regClass() == bld.lm); assert(dst.regClass() == s1); /* if we're currently in WQM mode, ensure that the source is also computed in WQM */ Temp tmp = bld.tmp(s1); - bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.scc(Definition(tmp)), val, Operand(exec, s2)); + bld.sop2(Builder::s_and, bld.def(bld.lm), bld.scc(Definition(tmp)), val, Operand(exec, bld.lm)); return emit_wqm(ctx, tmp, dst); } @@ -475,6 +489,8 @@ void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode o { Temp src0 = get_alu_src(ctx, instr->src[0]); Temp src1 = get_alu_src(ctx, instr->src[1]); + assert(src0.size() == src1.size()); + aco_ptr vopc; if (src1.type() == RegType::sgpr) { if (src0.type() == RegType::vgpr) { @@ -535,12 +551,13 @@ void emit_sopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode o { Temp src0 = get_alu_src(ctx, instr->src[0]); Temp src1 = get_alu_src(ctx, instr->src[1]); + Builder bld(ctx->program, ctx->block); - assert(dst.regClass() == s2); + assert(dst.regClass() == bld.lm); assert(src0.type() == RegType::sgpr); assert(src1.type() == RegType::sgpr); + assert(src0.regClass() == src1.regClass()); - Builder bld(ctx->program, ctx->block); /* Emit the SALU comparison instruction */ Temp cmp = bld.sopc(op, bld.scc(bld.def(s1)), src0, src1); /* Turn the result into a per-lane bool */ @@ -566,17 +583,17 @@ void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst, emit_sopc_instruction(ctx, instr, op, dst); } -void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, aco_opcode op32, aco_opcode op64, Temp dst) +void emit_boolean_logic(isel_context *ctx, nir_alu_instr *instr, Builder::WaveSpecificOpcode op, Temp dst) { Builder bld(ctx->program, ctx->block); Temp src0 = get_alu_src(ctx, instr->src[0]); Temp src1 = get_alu_src(ctx, instr->src[1]); - assert(dst.regClass() == s2); - assert(src0.regClass() == s2); - assert(src1.regClass() == s2); + assert(dst.regClass() == bld.lm); + assert(src0.regClass() == bld.lm); + assert(src1.regClass() == bld.lm); - bld.sop2(op64, Definition(dst), bld.def(s1, scc), src0, src1); + bld.sop2(op, Definition(dst), bld.def(s1, scc), src0, src1); } void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst) @@ -586,7 +603,7 @@ void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst) Temp then = get_alu_src(ctx, instr->src[1]); Temp els = get_alu_src(ctx, instr->src[2]); - assert(cond.regClass() == s2); + assert(cond.regClass() == bld.lm); if (dst.type() == RegType::vgpr) { aco_ptr bcsel; @@ -614,14 +631,15 @@ void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst) } if (instr->dest.dest.ssa.bit_size == 1) { - assert(dst.regClass() == s2); - assert(then.regClass() == s2); - assert(els.regClass() == s2); + assert(dst.regClass() == bld.lm); + assert(then.regClass() == bld.lm); + assert(els.regClass() == bld.lm); } if (!ctx->divergent_vals[instr->src[0].src.ssa->index]) { /* uniform condition and values in sgpr */ if (dst.regClass() == s1 || dst.regClass() == s2) { assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass()); + assert(dst.size() == then.size()); aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64; bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond))); } else { @@ -638,13 +656,68 @@ void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst) assert(instr->dest.dest.ssa.bit_size == 1); if (cond.id() != then.id()) - then = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), cond, then); + then = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), cond, then); if (cond.id() == els.id()) - bld.sop1(aco_opcode::s_mov_b64, Definition(dst), then); + bld.sop1(Builder::s_mov, Definition(dst), then); else - bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), then, - bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), els, cond)); + bld.sop2(Builder::s_or, Definition(dst), bld.def(s1, scc), then, + bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), els, cond)); +} + +void emit_scaled_op(isel_context *ctx, Builder& bld, Definition dst, Temp val, + aco_opcode op, uint32_t undo) +{ + /* multiply by 16777216 to handle denormals */ + Temp is_denormal = bld.vopc(aco_opcode::v_cmp_class_f32, bld.hint_vcc(bld.def(bld.lm)), + as_vgpr(ctx, val), bld.copy(bld.def(v1), Operand((1u << 7) | (1u << 4)))); + Temp scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x4b800000u), val); + scaled = bld.vop1(op, bld.def(v1), scaled); + scaled = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(undo), scaled); + + Temp not_scaled = bld.vop1(op, bld.def(v1), val); + + bld.vop2(aco_opcode::v_cndmask_b32, dst, not_scaled, scaled, is_denormal); +} + +void emit_rcp(isel_context *ctx, Builder& bld, Definition dst, Temp val) +{ + if (ctx->block->fp_mode.denorm32 == 0) { + bld.vop1(aco_opcode::v_rcp_f32, dst, val); + return; + } + + emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rcp_f32, 0x4b800000u); +} + +void emit_rsq(isel_context *ctx, Builder& bld, Definition dst, Temp val) +{ + if (ctx->block->fp_mode.denorm32 == 0) { + bld.vop1(aco_opcode::v_rsq_f32, dst, val); + return; + } + + emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_rsq_f32, 0x45800000u); +} + +void emit_sqrt(isel_context *ctx, Builder& bld, Definition dst, Temp val) +{ + if (ctx->block->fp_mode.denorm32 == 0) { + bld.vop1(aco_opcode::v_sqrt_f32, dst, val); + return; + } + + emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_sqrt_f32, 0x39800000u); +} + +void emit_log2(isel_context *ctx, Builder& bld, Definition dst, Temp val) +{ + if (ctx->block->fp_mode.denorm32 == 0) { + bld.vop1(aco_opcode::v_log_f32, dst, val); + return; + } + + emit_scaled_op(ctx, bld, dst, val, aco_opcode::v_log_f32, 0xc1c00000u); } void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) @@ -697,9 +770,9 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_inot: { Temp src = get_alu_src(ctx, instr->src[0]); if (instr->dest.dest.ssa.bit_size == 1) { - assert(src.regClass() == s2); - assert(dst.regClass() == s2); - bld.sop2(aco_opcode::s_andn2_b64, Definition(dst), bld.def(s1, scc), Operand(exec, s2), src); + assert(src.regClass() == bld.lm); + assert(dst.regClass() == bld.lm); + bld.sop2(Builder::s_andn2, Definition(dst), bld.def(s1, scc), Operand(exec, bld.lm), src); } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst); } else if (dst.type() == RegType::sgpr) { @@ -762,16 +835,21 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) bld.sop2(aco_opcode::s_add_i32, Definition(dst), bld.def(s1, scc), gtz, tmp); } else if (dst.regClass() == s2) { Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u)); - Temp neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand(0u)); - bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, neqz); + Temp neqz; + if (ctx->program->chip_class >= GFX8) + neqz = bld.sopc(aco_opcode::s_cmp_lg_u64, bld.def(s1, scc), src, Operand(0u)); + else + neqz = bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), src, Operand(0u)).def(1).getTemp(); + /* SCC gets zero-extended to 64 bit */ + bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz)); } else if (dst.regClass() == v1) { Temp tmp = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src); - Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(1u), tmp, gtz); } else if (dst.regClass() == v2) { Temp upper = emit_extract_vector(ctx, src, 1, v1); Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper); - Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); Temp lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(1u), neg, gtz); upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper); @@ -832,7 +910,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_ior: { if (instr->dest.dest.ssa.bit_size == 1) { - emit_boolean_logic(ctx, instr, aco_opcode::s_or_b32, aco_opcode::s_or_b64, dst); + emit_boolean_logic(ctx, instr, Builder::s_or, dst); } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true); } else if (dst.regClass() == s1) { @@ -848,7 +926,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_iand: { if (instr->dest.dest.ssa.bit_size == 1) { - emit_boolean_logic(ctx, instr, aco_opcode::s_and_b32, aco_opcode::s_and_b64, dst); + emit_boolean_logic(ctx, instr, Builder::s_and, dst); } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true); } else if (dst.regClass() == s1) { @@ -864,7 +942,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_ixor: { if (instr->dest.dest.ssa.bit_size == 1) { - emit_boolean_logic(ctx, instr, aco_opcode::s_xor_b32, aco_opcode::s_xor_b64, dst); + emit_boolean_logic(ctx, instr, Builder::s_xor, dst); } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true); } else if (dst.regClass() == s1) { @@ -881,9 +959,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_ushr: { if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_lshrrev_b32, dst, false, true); - } else if (dst.regClass() == v2) { + } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) { bld.vop3(aco_opcode::v_lshrrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0])); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_lshr_b64, Definition(dst), + get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1])); } else if (dst.regClass() == s2) { emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b64, dst, true); } else if (dst.regClass() == s1) { @@ -898,9 +979,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_ishl: { if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_lshlrev_b32, dst, false, true); - } else if (dst.regClass() == v2) { + } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) { bld.vop3(aco_opcode::v_lshlrev_b64, Definition(dst), get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0])); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_lshl_b64, Definition(dst), + get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1])); } else if (dst.regClass() == s1) { emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b32, dst, true); } else if (dst.regClass() == s2) { @@ -915,9 +999,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_ishr: { if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_ashrrev_i32, dst, false, true); - } else if (dst.regClass() == v2) { + } else if (dst.regClass() == v2 && ctx->program->chip_class >= GFX8) { bld.vop3(aco_opcode::v_ashrrev_i64, Definition(dst), get_alu_src(ctx, instr->src[1]), get_alu_src(ctx, instr->src[0])); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_ashr_i64, Definition(dst), + get_alu_src(ctx, instr->src[0]), get_alu_src(ctx, instr->src[1])); } else if (dst.regClass() == s1) { emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i32, dst, true); } else if (dst.regClass() == s2) { @@ -1399,7 +1486,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_frsq: { if (dst.size() == 1) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f32, dst); + emit_rsq(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); } else if (dst.size() == 2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst); } else { @@ -1412,8 +1499,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_fneg: { Temp src = get_alu_src(ctx, instr->src[0]); if (dst.size() == 1) { + if (ctx->block->fp_mode.must_flush_denorms32) + src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src)); bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src)); } else if (dst.size() == 2) { + if (ctx->block->fp_mode.must_flush_denorms16_64) + src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src)); Temp upper = bld.tmp(v1), lower = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src); upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x80000000u), upper); @@ -1428,8 +1519,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_fabs: { Temp src = get_alu_src(ctx, instr->src[0]); if (dst.size() == 1) { + if (ctx->block->fp_mode.must_flush_denorms32) + src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src)); bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src)); } else if (dst.size() == 2) { + if (ctx->block->fp_mode.must_flush_denorms16_64) + src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src)); Temp upper = bld.tmp(v1), lower = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), src); upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFFFFFu), upper); @@ -1458,7 +1553,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_flog2: { if (dst.size() == 1) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f32, dst); + emit_log2(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1468,7 +1563,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_frcp: { if (dst.size() == 1) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f32, dst); + emit_rcp(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); } else if (dst.size() == 2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst); } else { @@ -1490,7 +1585,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fsqrt: { if (dst.size() == 1) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f32, dst); + emit_sqrt(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); } else if (dst.size() == 2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst); } else { @@ -1565,12 +1660,8 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) Temp src = get_alu_src(ctx, instr->src[0]); aco_ptr norm; if (dst.size() == 1) { - Temp tmp; - Operand half_pi(0x3e22f983u); - if (src.type() == RegType::sgpr) - tmp = bld.vop2_e64(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src); - else - tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src); + Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u)); + Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, as_vgpr(ctx, src)); /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */ if (ctx->options->chip_class < GFX9) @@ -1632,16 +1723,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_fsign: { Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0])); if (dst.size() == 1) { - Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond); - cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond); } else if (dst.size() == 2) { - Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u)); - Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, src, cond); + Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond); - cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src); + cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u)); upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, upper, cond); @@ -1780,7 +1871,10 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa); Temp new_exponent = bld.tmp(v1); Temp borrow = bld.vsub32(Definition(new_exponent), Operand(63u), exponent, true).def(1).getTemp(); - mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa); + if (ctx->program->chip_class >= GFX8) + mantissa = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), new_exponent, mantissa); + else + mantissa = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), mantissa, new_exponent); Temp saturate = bld.vop1(aco_opcode::v_bfrev_b32, bld.def(v1), Operand(0xfffffffeu)); Temp lower = bld.tmp(v1), upper = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa); @@ -1845,7 +1939,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) Temp src = get_alu_src(ctx, instr->src[0]); if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) { Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src); - Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(s2)), Operand(64u), exponent); + Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent); exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent); Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src); mantissa = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(0x800000u), mantissa); @@ -1854,7 +1948,10 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) mantissa = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(0u), mantissa); Temp new_exponent = bld.tmp(v1); Temp cond_small = bld.vsub32(Definition(new_exponent), exponent, Operand(24u), true).def(1).getTemp(); - mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa); + if (ctx->program->chip_class >= GFX8) + mantissa = bld.vop3(aco_opcode::v_lshlrev_b64, bld.def(v2), new_exponent, mantissa); + else + mantissa = bld.vop3(aco_opcode::v_lshl_b64, bld.def(v2), mantissa, new_exponent); Temp lower = bld.tmp(v1), upper = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lower), Definition(upper), mantissa); lower = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), lower, small, cond_small); @@ -1909,7 +2006,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_b2f32: { Temp src = get_alu_src(ctx, instr->src[0]); - assert(src.regClass() == s2); + assert(src.regClass() == bld.lm); if (dst.regClass() == s1) { src = bool_to_scalar_condition(ctx, src); @@ -1923,7 +2020,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_b2f64: { Temp src = get_alu_src(ctx, instr->src[0]); - assert(src.regClass() == s2); + assert(src.regClass() == bld.lm); if (dst.regClass() == s2) { src = bool_to_scalar_condition(ctx, src); @@ -1970,8 +2067,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_i2i64: { Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32) { - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, Operand(0u)); + if (src.regClass() == s1) { + Temp high = bld.sopc(aco_opcode::s_ashr_i32, bld.def(s1, scc), src, Operand(31u)); + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high); + } else if (src.regClass() == v1) { + Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src); + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1992,7 +2093,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_b2i32: { Temp src = get_alu_src(ctx, instr->src[0]); - assert(src.regClass() == s2); + assert(src.regClass() == bld.lm); if (dst.regClass() == s1) { // TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ @@ -2006,7 +2107,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_i2b1: { Temp src = get_alu_src(ctx, instr->src[0]); - assert(dst.regClass() == s2); + assert(dst.regClass() == bld.lm); if (src.type() == RegType::vgpr) { assert(src.regClass() == v1 || src.regClass() == v2); @@ -2040,8 +2141,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) Temp src0 = bld.tmp(v1); Temp src1 = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src); - bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src0, src1); - + if (!ctx->block->fp_mode.care_about_round32 || ctx->block->fp_mode.round32 == fp_round_tz) + bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src0, src1); + else + bld.vop3(aco_opcode::v_cvt_pk_u16_u32, Definition(dst), + bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src0), + bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src1)); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -2074,16 +2179,31 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fquantize2f16: { - Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), get_alu_src(ctx, instr->src[0])); - - Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */ - - Temp cmp_res = bld.tmp(s2); - bld.vopc_e64(aco_opcode::v_cmp_class_f16, Definition(cmp_res), f16, mask).def(0).setHint(vcc); + Temp src = get_alu_src(ctx, instr->src[0]); + Temp f16 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src); + Temp f32, cmp_res; - Temp f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16); + if (ctx->program->chip_class >= GFX8) { + Temp mask = bld.copy(bld.def(s1), Operand(0x36Fu)); /* value is NOT negative/positive denormal value */ + cmp_res = bld.vopc_e64(aco_opcode::v_cmp_class_f16, bld.hint_vcc(bld.def(bld.lm)), f16, mask); + f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16); + } else { + /* 0x38800000 is smallest half float value (2^-14) in 32-bit float, + * so compare the result and flush to 0 if it's smaller. + */ + f32 = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), f16); + Temp smallest = bld.copy(bld.def(s1), Operand(0x38800000u)); + Instruction* vop3 = bld.vopc_e64(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(s2)), f32, smallest); + static_cast(vop3)->abs[0] = true; + cmp_res = vop3->definitions[0].getTemp(); + } - bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), f32, cmp_res); + if (ctx->block->fp_mode.preserve_signed_zero_inf_nan32 || ctx->program->chip_class < GFX8) { + Temp copysign_0 = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0u), as_vgpr(ctx, src)); + bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), copysign_0, f32, cmp_res); + } else { + bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), f32, cmp_res); + } break; } case nir_op_bfm: { @@ -2247,14 +2367,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_ieq: { if (instr->src[0].src.ssa->bit_size == 1) - emit_boolean_logic(ctx, instr, aco_opcode::s_xnor_b32, aco_opcode::s_xnor_b64, dst); + emit_boolean_logic(ctx, instr, Builder::s_xnor, dst); else emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32, aco_opcode::s_cmp_eq_u64); break; } case nir_op_ine: { if (instr->src[0].src.ssa->bit_size == 1) - emit_boolean_logic(ctx, instr, aco_opcode::s_xor_b32, aco_opcode::s_xor_b64, dst); + emit_boolean_logic(ctx, instr, Builder::s_xor, dst); else emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32, aco_opcode::s_cmp_lg_u64); break; @@ -2273,25 +2393,32 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_fddy_fine: case nir_op_fddx_coarse: case nir_op_fddy_coarse: { - Definition tl = bld.def(v1); - uint16_t dpp_ctrl; + Temp src = get_alu_src(ctx, instr->src[0]); + uint16_t dpp_ctrl1, dpp_ctrl2; if (instr->op == nir_op_fddx_fine) { - bld.vop1_dpp(aco_opcode::v_mov_b32, tl, get_alu_src(ctx, instr->src[0]), dpp_quad_perm(0, 0, 2, 2)); - dpp_ctrl = dpp_quad_perm(1, 1, 3, 3); + dpp_ctrl1 = dpp_quad_perm(0, 0, 2, 2); + dpp_ctrl2 = dpp_quad_perm(1, 1, 3, 3); } else if (instr->op == nir_op_fddy_fine) { - bld.vop1_dpp(aco_opcode::v_mov_b32, tl, get_alu_src(ctx, instr->src[0]), dpp_quad_perm(0, 1, 0, 1)); - dpp_ctrl = dpp_quad_perm(2, 3, 2, 3); + dpp_ctrl1 = dpp_quad_perm(0, 1, 0, 1); + dpp_ctrl2 = dpp_quad_perm(2, 3, 2, 3); } else { - bld.vop1_dpp(aco_opcode::v_mov_b32, tl, get_alu_src(ctx, instr->src[0]), dpp_quad_perm(0, 0, 0, 0)); + dpp_ctrl1 = dpp_quad_perm(0, 0, 0, 0); if (instr->op == nir_op_fddx || instr->op == nir_op_fddx_coarse) - dpp_ctrl = dpp_quad_perm(1, 1, 1, 1); + dpp_ctrl2 = dpp_quad_perm(1, 1, 1, 1); else - dpp_ctrl = dpp_quad_perm(2, 2, 2, 2); + dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2); } - Definition tmp = bld.def(v1); - bld.vop2_dpp(aco_opcode::v_sub_f32, tmp, get_alu_src(ctx, instr->src[0]), tl.getTemp(), dpp_ctrl); - emit_wqm(ctx, tmp.getTemp(), dst, true); + Temp tmp; + if (ctx->program->chip_class >= GFX8) { + Temp tl = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl1); + tmp = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), src, tl, dpp_ctrl2); + } else { + Temp tl = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl1); + Temp tr = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl2); + tmp = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), tr, tl); + } + emit_wqm(ctx, tmp, dst, true); break; } default: @@ -2314,8 +2441,10 @@ void visit_load_const(isel_context *ctx, nir_load_const_instr *instr) Builder bld(ctx->program, ctx->block); if (instr->def.bit_size == 1) { - assert(dst.regClass() == s2); - bld.sop1(aco_opcode::s_mov_b64, Definition(dst), Operand((uint64_t)(instr->value[0].b ? -1 : 0))); + assert(dst.regClass() == bld.lm); + int val = instr->value[0].b ? -1 : 0; + Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val); + bld.sop1(Builder::s_mov, Definition(dst), op); } else if (dst.size() == 1) { bld.copy(Definition(dst), Operand(instr->value[0].u32)); } else { @@ -2374,6 +2503,7 @@ void visit_store_vs_output(isel_context *ctx, nir_intrinsic_instr *instr) void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) { + Builder bld(ctx->program, ctx->block); unsigned write_mask = nir_intrinsic_write_mask(instr); Operand values[4]; Temp src = get_ssa_temp(ctx, instr->src[0].ssa); @@ -2413,16 +2543,8 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) values[2] = values[0]; values[0] = Operand(v1); } else { - aco_ptr exp{create_instruction(aco_opcode::exp, Format::EXP, 4, 0)}; - exp->valid_mask = false; - exp->done = false; - exp->compressed = true; - exp->dest = V_008DFC_SQ_EXP_MRTZ; - exp->enabled_mask = 0xc; - for (int i = 0; i < 4; i++) - exp->operands[i] = Operand(v1); - exp->operands[1] = Operand(values[0]); - ctx->block->instructions.emplace_back(std::move(exp)); + bld.exp(aco_opcode::exp, Operand(v1), Operand(values[0]), Operand(v1), Operand(v1), + 0xc, V_008DFC_SQ_EXP_MRTZ, true); return; } @@ -2442,23 +2564,9 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) values[1] = values[0]; values[0] = Operand(v1); } else { - aco_ptr shift{create_instruction(aco_opcode::v_lshlrev_b32, Format::VOP2, 2, 1)}; - shift->operands[0] = Operand((uint32_t) 16); - shift->operands[1] = values[0]; - Temp tmp = {ctx->program->allocateId(), v1}; - shift->definitions[0] = Definition(tmp); - ctx->block->instructions.emplace_back(std::move(shift)); - - aco_ptr exp{create_instruction(aco_opcode::exp, Format::EXP, 4, 0)}; - exp->valid_mask = false; - exp->done = false; - exp->compressed = true; - exp->dest = V_008DFC_SQ_EXP_MRTZ; - exp->enabled_mask = 0x3; - exp->operands[0] = Operand(tmp); - for (int i = 1; i < 4; i++) - exp->operands[i] = Operand(v1); - ctx->block->instructions.emplace_back(std::move(exp)); + values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u), values[0]); + bld.exp(aco_opcode::exp, values[0], Operand(v1), Operand(v1), Operand(v1), + 0x3, V_008DFC_SQ_EXP_MRTZ, true); return; } @@ -2467,9 +2575,8 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) target = V_008DFC_SQ_EXP_MRT + index; col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf; } - ASSERTED bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1; - ASSERTED bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1; - assert(!is_int8 && !is_int10); + bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1; + bool is_int10 = (ctx->options->key.fs.is_int10 >> index) & 1; switch (col_format) { @@ -2491,6 +2598,7 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) /* Special case: on GFX10, the outputs are different for 32_AR */ enabled_channels = 0x3; values[1] = values[3]; + values[3] = Operand(v1); } else { enabled_channels = 0x9; } @@ -2511,14 +2619,46 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) compr_op = aco_opcode::v_cvt_pknorm_i16_f32; break; - case V_028714_SPI_SHADER_UINT16_ABGR: + case V_028714_SPI_SHADER_UINT16_ABGR: { enabled_channels = 0x5; compr_op = aco_opcode::v_cvt_pk_u16_u32; + if (is_int8 || is_int10) { + /* clamp */ + uint32_t max_rgb = is_int8 ? 255 : is_int10 ? 1023 : 0; + Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb)); + + for (unsigned i = 0; i < 4; i++) { + if ((write_mask >> i) & 1) { + values[i] = bld.vop2(aco_opcode::v_min_u32, bld.def(v1), + i == 3 && is_int10 ? Operand(3u) : Operand(max_rgb_val), + values[i]); + } + } + } break; + } case V_028714_SPI_SHADER_SINT16_ABGR: enabled_channels = 0x5; compr_op = aco_opcode::v_cvt_pk_i16_i32; + if (is_int8 || is_int10) { + /* clamp */ + uint32_t max_rgb = is_int8 ? 127 : is_int10 ? 511 : 0; + uint32_t min_rgb = is_int8 ? -128 :is_int10 ? -512 : 0; + Temp max_rgb_val = bld.copy(bld.def(s1), Operand(max_rgb)); + Temp min_rgb_val = bld.copy(bld.def(s1), Operand(min_rgb)); + + for (unsigned i = 0; i < 4; i++) { + if ((write_mask >> i) & 1) { + values[i] = bld.vop2(aco_opcode::v_min_i32, bld.def(v1), + i == 3 && is_int10 ? Operand(1u) : Operand(max_rgb_val), + values[i]); + values[i] = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), + i == 3 && is_int10 ? Operand(-2u) : Operand(min_rgb_val), + values[i]); + } + } + } break; case V_028714_SPI_SHADER_32_ABGR: @@ -2532,44 +2672,28 @@ void visit_store_fs_output(isel_context *ctx, nir_intrinsic_instr *instr) if (target == V_008DFC_SQ_EXP_NULL) return; - if ((bool)compr_op) - { - for (int i = 0; i < 2; i++) - { + if ((bool) compr_op) { + for (int i = 0; i < 2; i++) { /* check if at least one of the values to be compressed is enabled */ unsigned enabled = (write_mask >> (i*2) | write_mask >> (i*2+1)) & 0x1; if (enabled) { enabled_channels |= enabled << (i*2); - aco_ptr compr{create_instruction(compr_op, Format::VOP3A, 2, 1)}; - Temp tmp{ctx->program->allocateId(), v1}; - compr->operands[0] = values[i*2].isUndefined() ? Operand(0u) : values[i*2]; - compr->operands[1] = values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1]; - compr->definitions[0] = Definition(tmp); - values[i] = Operand(tmp); - ctx->block->instructions.emplace_back(std::move(compr)); + values[i] = bld.vop3(compr_op, bld.def(v1), + values[i*2].isUndefined() ? Operand(0u) : values[i*2], + values[i*2+1].isUndefined() ? Operand(0u): values[i*2+1]); } else { values[i] = Operand(v1); } } - } - - aco_ptr exp{create_instruction(aco_opcode::exp, Format::EXP, 4, 0)}; - exp->valid_mask = false; - exp->done = false; - exp->compressed = (bool) compr_op; - exp->dest = target; - exp->enabled_mask = enabled_channels; - if ((bool) compr_op) { - for (int i = 0; i < 2; i++) - exp->operands[i] = enabled_channels & (3 << (i * 2)) ? values[i] : Operand(v1); - exp->operands[2] = Operand(v1); - exp->operands[3] = Operand(v1); + values[2] = Operand(v1); + values[3] = Operand(v1); } else { for (int i = 0; i < 4; i++) - exp->operands[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1); + values[i] = enabled_channels & (1 << i) ? values[i] : Operand(v1); } - ctx->block->instructions.emplace_back(std::move(exp)); + bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3], + enabled_channels, target, (bool) compr_op); } Operand load_lds_size_m0(isel_context *ctx) @@ -2838,12 +2962,11 @@ void emit_load_frag_coord(isel_context *ctx, Temp dst, unsigned num_components) { aco_ptr vec(create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)); for (unsigned i = 0; i < num_components; i++) - vec->operands[i] = Operand(ctx->fs_inputs[fs_input::frag_pos_0 + i]); - - if (ctx->fs_vgpr_args[fs_input::frag_pos_3]) { + vec->operands[i] = Operand(get_arg(ctx, ctx->args->ac.frag_pos[i])); + if (G_0286CC_POS_W_FLOAT_ENA(ctx->program->config->spi_ps_input_ena)) { assert(num_components == 4); Builder bld(ctx->program, ctx->block); - vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ctx->fs_inputs[fs_input::frag_pos_3]); + vec->operands[3] = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), get_arg(ctx, ctx->args->ac.frag_pos[3])); } for (Operand& op : vec->operands) @@ -2861,7 +2984,7 @@ void visit_load_interpolated_input(isel_context *ctx, nir_intrinsic_instr *instr Temp coords = get_ssa_temp(ctx, instr->src[0].ssa); unsigned idx = nir_intrinsic_base(instr); unsigned component = nir_intrinsic_component(instr); - Temp prim_mask = ctx->prim_mask; + Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask); nir_const_value* offset = nir_src_as_const_value(instr->src[1]); if (offset) { @@ -2943,7 +3066,7 @@ Temp adjust_vertex_fetch_alpha(isel_context *ctx, unsigned adjustment, Temp alph /* Convert back to the right type. */ if (adjustment == RADV_ALPHA_ADJUST_SNORM) { alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha); - Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0xbf800000u), alpha); + Temp clamp = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0xbf800000u), alpha); alpha = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xbf800000u), alpha, clamp); } else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) { alpha = bld.vop1(aco_opcode::v_cvt_f32_i32, bld.def(v1), alpha); @@ -2966,7 +3089,7 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr) } uint32_t offset = nir_instr_as_load_const(off_instr)->value[0].u32; - Temp vertex_buffers = convert_pointer_to_64_bit(ctx, ctx->vertex_buffers); + Temp vertex_buffers = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->vertex_buffers)); unsigned location = nir_intrinsic_base(instr) / 4 - VERT_ATTRIB_GENERIC0 + offset; unsigned component = nir_intrinsic_component(instr); @@ -2991,21 +3114,24 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr) Temp index; if (ctx->options->key.vs.instance_rate_inputs & (1u << location)) { uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[location]; + Temp start_instance = get_arg(ctx, ctx->args->ac.start_instance); if (divisor) { ctx->needs_instance_id = true; - + Temp instance_id = get_arg(ctx, ctx->args->ac.instance_id); if (divisor != 1) { Temp divided = bld.tmp(v1); - emit_v_div_u32(ctx, divided, as_vgpr(ctx, ctx->instance_id), divisor); - index = bld.vadd32(bld.def(v1), ctx->start_instance, divided); + emit_v_div_u32(ctx, divided, as_vgpr(ctx, instance_id), divisor); + index = bld.vadd32(bld.def(v1), start_instance, divided); } else { - index = bld.vadd32(bld.def(v1), ctx->start_instance, ctx->instance_id); + index = bld.vadd32(bld.def(v1), start_instance, instance_id); } } else { - index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), ctx->start_instance); + index = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), start_instance); } } else { - index = bld.vadd32(bld.def(v1), ctx->base_vertex, ctx->vertex_id); + index = bld.vadd32(bld.def(v1), + get_arg(ctx, ctx->args->ac.base_vertex), + get_arg(ctx, ctx->args->ac.vertex_id)); } if (attrib_stride != 0 && attrib_offset > attrib_stride) { @@ -3092,7 +3218,7 @@ void visit_load_input(isel_context *ctx, nir_intrinsic_instr *instr) fprintf(stderr, "\n"); } - Temp prim_mask = ctx->prim_mask; + Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask); nir_const_value* offset = nir_src_as_const_value(instr->src[0]); if (offset) { assert(offset->u32 == 0); @@ -3131,11 +3257,11 @@ Temp load_desc_ptr(isel_context *ctx, unsigned desc_set) { if (ctx->program->info->need_indirect_descriptor_sets) { Builder bld(ctx->program, ctx->block); - Temp ptr64 = convert_pointer_to_64_bit(ctx, ctx->descriptor_sets[0]); + Temp ptr64 = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->descriptor_sets[0])); return bld.smem(aco_opcode::s_load_dword, bld.def(s1), ptr64, Operand(desc_set << 2));//, false, false, false); } - return ctx->descriptor_sets[desc_set]; + return get_arg(ctx, ctx->args->descriptor_sets[desc_set]); } @@ -3156,7 +3282,7 @@ void visit_load_resource(isel_context *ctx, nir_intrinsic_instr *instr) if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset; - desc_ptr = ctx->push_constants; + desc_ptr = get_arg(ctx, ctx->args->ac.push_constants); offset = pipeline_layout->push_constant_size + 16 * idx; stride = 16; } else { @@ -3210,7 +3336,7 @@ void load_buffer(isel_context *ctx, unsigned num_components, Temp dst, bool dlc = glc && ctx->options->chip_class >= GFX10; aco_opcode op; - if (dst.type() == RegType::vgpr || (glc && ctx->options->chip_class < GFX8)) { + if (dst.type() == RegType::vgpr || (ctx->options->chip_class < GFX8 && !readonly)) { if (ctx->options->chip_class < GFX8) offset = as_vgpr(ctx, offset); @@ -3400,12 +3526,12 @@ void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr) unsigned count = instr->dest.ssa.num_components; unsigned start = (offset + index_cv->u32) / 4u; - start -= ctx->base_inline_push_consts; - if (start + count <= ctx->num_inline_push_consts) { + start -= ctx->args->ac.base_inline_push_consts; + if (start + count <= ctx->args->ac.num_inline_push_consts) { std::array elems; aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)}; for (unsigned i = 0; i < count; ++i) { - elems[i] = ctx->inline_push_consts[start + i]; + elems[i] = get_arg(ctx, ctx->args->ac.inline_push_consts[start + i]); vec->operands[i] = Operand{elems[i]}; } vec->definitions[0] = Definition(dst); @@ -3418,7 +3544,7 @@ void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr) Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa)); if (offset != 0) // TODO check if index != 0 as well index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index); - Temp ptr = convert_pointer_to_64_bit(ctx, ctx->push_constants); + Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants)); Temp vec = dst; bool trim = false; aco_opcode op; @@ -3506,8 +3632,8 @@ void visit_discard_if(isel_context *ctx, nir_intrinsic_instr *instr) // TODO: optimize uniform conditions Builder bld(ctx->program, ctx->block); Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - assert(src.regClass() == s2); - src = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); + assert(src.regClass() == bld.lm); + src = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); bld.pseudo(aco_opcode::p_discard_if, src); ctx->block->kind |= block_kind_uses_discard_if; return; @@ -3570,7 +3696,7 @@ void visit_discard(isel_context* ctx, nir_intrinsic_instr *instr) ctx->program->needs_exact = true; /* save exec somewhere temporarily so that it doesn't get * overwritten before the discard from outer exec masks */ - Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, s2)); + Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(0xFFFFFFFF), Operand(exec, bld.lm)); bld.pseudo(aco_opcode::p_discard_if, cond); ctx->block->kind |= block_kind_uses_discard_if; return; @@ -3857,7 +3983,7 @@ static Temp adjust_sample_index_using_fmask(isel_context *ctx, bool da, Temp coo /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK * resource descriptor is 0 (invalid), */ - Temp compare = bld.tmp(s2); + Temp compare = bld.tmp(bld.lm); bld.vopc_e64(aco_opcode::v_cmp_lg_u32, Definition(compare), Operand(0u), emit_extract_vector(ctx, fmask_desc_ptr, 1, s1)).def(0).setHint(vcc); @@ -3970,6 +4096,8 @@ void visit_image_load(isel_context *ctx, nir_intrinsic_instr *instr) tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_channels)}; load->definitions[0] = Definition(tmp); load->idxen = true; + load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT); + load->dlc = load->glc && ctx->options->chip_class >= GFX10; load->barrier = barrier_image; ctx->block->instructions.emplace_back(std::move(load)); @@ -3992,7 +4120,8 @@ void visit_image_load(isel_context *ctx, nir_intrinsic_instr *instr) load->operands[0] = Operand(coords); load->operands[1] = Operand(resource); load->definitions[0] = Definition(tmp); - load->glc = var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0; + load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0; + load->dlc = load->glc && ctx->options->chip_class >= GFX10; load->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array); load->dmask = dmask; load->unrm = true; @@ -4012,7 +4141,7 @@ void visit_image_store(isel_context *ctx, nir_intrinsic_instr *instr) bool is_array = glsl_sampler_type_is_array(type); Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa)); - bool glc = ctx->options->chip_class == GFX6 || var->data.image.access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0; + bool glc = ctx->options->chip_class == GFX6 || var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0; if (dim == GLSL_SAMPLER_DIM_BUF) { Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true); @@ -4345,7 +4474,7 @@ void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr) Temp elem = emit_extract_vector(ctx, data, start + i, RegClass(data.type(), elem_size_bytes / 4)); vec->operands[i] = Operand(smem_nonfs ? bld.as_uniform(elem) : elem); } - write_data = bld.tmp(smem_nonfs ? RegType::sgpr : data.type(), count * elem_size_bytes / 4); + write_data = bld.tmp(!smem ? RegType::vgpr : smem_nonfs ? RegType::sgpr : data.type(), count * elem_size_bytes / 4); vec->definitions[0] = Definition(write_data); ctx->block->instructions.emplace_back(std::move(vec)); } else if (!smem && data.type() != RegType::vgpr) { @@ -4562,6 +4691,7 @@ void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr) flat->operands[1] = Operand(s1); flat->glc = glc; flat->dlc = dlc; + flat->barrier = barrier_buffer; if (dst.type() == RegType::sgpr) { Temp vec = bld.tmp(RegType::vgpr, dst.size()); @@ -4642,12 +4772,12 @@ void visit_store_global(isel_context *ctx, nir_intrinsic_instr *instr) if (offset > 0 && ctx->options->chip_class < GFX9) { Temp addr0 = bld.tmp(v1), addr1 = bld.tmp(v1); Temp new_addr0 = bld.tmp(v1), new_addr1 = bld.tmp(v1); - Temp carry = bld.tmp(s2); + Temp carry = bld.tmp(bld.lm); bld.pseudo(aco_opcode::p_split_vector, Definition(addr0), Definition(addr1), addr); bld.vop2(aco_opcode::v_add_co_u32, Definition(new_addr0), bld.hint_vcc(Definition(carry)), Operand(offset), addr0); - bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(s2), + bld.vop2(aco_opcode::v_addc_co_u32, Definition(new_addr1), bld.def(bld.lm), Operand(0u), addr1, carry).def(1).setHint(vcc); @@ -4682,10 +4812,98 @@ void visit_store_global(isel_context *ctx, nir_intrinsic_instr *instr) flat->glc = glc; flat->dlc = false; flat->offset = offset; + flat->disable_wqm = true; + flat->barrier = barrier_buffer; + ctx->program->needs_exact = true; ctx->block->instructions.emplace_back(std::move(flat)); } } +void visit_global_atomic(isel_context *ctx, nir_intrinsic_instr *instr) +{ + /* return the previous value if dest is ever used */ + bool return_previous = false; + nir_foreach_use_safe(use_src, &instr->dest.ssa) { + return_previous = true; + break; + } + nir_foreach_if_use_safe(use_src, &instr->dest.ssa) { + return_previous = true; + break; + } + + Builder bld(ctx->program, ctx->block); + Temp addr = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); + Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)); + + if (instr->intrinsic == nir_intrinsic_global_atomic_comp_swap) + data = bld.pseudo(aco_opcode::p_create_vector, bld.def(RegType::vgpr, data.size() * 2), + get_ssa_temp(ctx, instr->src[2].ssa), data); + + Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + + bool global = ctx->options->chip_class >= GFX9; + aco_opcode op32, op64; + switch (instr->intrinsic) { + case nir_intrinsic_global_atomic_add: + op32 = global ? aco_opcode::global_atomic_add : aco_opcode::flat_atomic_add; + op64 = global ? aco_opcode::global_atomic_add_x2 : aco_opcode::flat_atomic_add_x2; + break; + case nir_intrinsic_global_atomic_imin: + op32 = global ? aco_opcode::global_atomic_smin : aco_opcode::flat_atomic_smin; + op64 = global ? aco_opcode::global_atomic_smin_x2 : aco_opcode::flat_atomic_smin_x2; + break; + case nir_intrinsic_global_atomic_umin: + op32 = global ? aco_opcode::global_atomic_umin : aco_opcode::flat_atomic_umin; + op64 = global ? aco_opcode::global_atomic_umin_x2 : aco_opcode::flat_atomic_umin_x2; + break; + case nir_intrinsic_global_atomic_imax: + op32 = global ? aco_opcode::global_atomic_smax : aco_opcode::flat_atomic_smax; + op64 = global ? aco_opcode::global_atomic_smax_x2 : aco_opcode::flat_atomic_smax_x2; + break; + case nir_intrinsic_global_atomic_umax: + op32 = global ? aco_opcode::global_atomic_umax : aco_opcode::flat_atomic_umax; + op64 = global ? aco_opcode::global_atomic_umax_x2 : aco_opcode::flat_atomic_umax_x2; + break; + case nir_intrinsic_global_atomic_and: + op32 = global ? aco_opcode::global_atomic_and : aco_opcode::flat_atomic_and; + op64 = global ? aco_opcode::global_atomic_and_x2 : aco_opcode::flat_atomic_and_x2; + break; + case nir_intrinsic_global_atomic_or: + op32 = global ? aco_opcode::global_atomic_or : aco_opcode::flat_atomic_or; + op64 = global ? aco_opcode::global_atomic_or_x2 : aco_opcode::flat_atomic_or_x2; + break; + case nir_intrinsic_global_atomic_xor: + op32 = global ? aco_opcode::global_atomic_xor : aco_opcode::flat_atomic_xor; + op64 = global ? aco_opcode::global_atomic_xor_x2 : aco_opcode::flat_atomic_xor_x2; + break; + case nir_intrinsic_global_atomic_exchange: + op32 = global ? aco_opcode::global_atomic_swap : aco_opcode::flat_atomic_swap; + op64 = global ? aco_opcode::global_atomic_swap_x2 : aco_opcode::flat_atomic_swap_x2; + break; + case nir_intrinsic_global_atomic_comp_swap: + op32 = global ? aco_opcode::global_atomic_cmpswap : aco_opcode::flat_atomic_cmpswap; + op64 = global ? aco_opcode::global_atomic_cmpswap_x2 : aco_opcode::flat_atomic_cmpswap_x2; + break; + default: + unreachable("visit_atomic_global should only be called with nir_intrinsic_global_atomic_* instructions."); + } + aco_opcode op = instr->dest.ssa.bit_size == 32 ? op32 : op64; + aco_ptr flat{create_instruction(op, global ? Format::GLOBAL : Format::FLAT, 3, return_previous ? 1 : 0)}; + flat->operands[0] = Operand(addr); + flat->operands[1] = Operand(s1); + flat->operands[2] = Operand(data); + if (return_previous) + flat->definitions[0] = Definition(dst); + flat->glc = return_previous; + flat->dlc = false; /* Not needed for atomics */ + flat->offset = 0; + flat->disable_wqm = true; + flat->barrier = barrier_buffer; + ctx->program->needs_exact = true; + ctx->block->instructions.emplace_back(std::move(flat)); +} + void emit_memory_barrier(isel_context *ctx, nir_intrinsic_instr *instr) { Builder bld(ctx->program, ctx->block); switch(instr->intrinsic) { @@ -5018,11 +5236,12 @@ void visit_load_sample_mask_in(isel_context *ctx, nir_intrinsic_instr *instr) { Builder bld(ctx->program, ctx->block); - Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), ctx->fs_inputs[fs_input::ancillary], Operand(8u), Operand(4u)); + Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), + get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u)); Temp ps_iter_mask = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(ps_iter_masks[log2_ps_iter_samples])); Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id, ps_iter_mask); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, ctx->fs_inputs[fs_input::sample_coverage]); + bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage)); } Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Temp src) @@ -5033,30 +5252,30 @@ Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Te return src; } if (op == nir_op_iand && cluster_size == 4) { //subgroupClusteredAnd(val, 4) -> ~wqm(exec & ~val) - Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src); - return bld.sop1(aco_opcode::s_not_b64, bld.def(s2), bld.def(s1, scc), - bld.sop1(aco_opcode::s_wqm_b64, bld.def(s2), bld.def(s1, scc), tmp)); + Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src); + return bld.sop1(Builder::s_not, bld.def(bld.lm), bld.def(s1, scc), + bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), tmp)); } else if (op == nir_op_ior && cluster_size == 4) { //subgroupClusteredOr(val, 4) -> wqm(val & exec) - return bld.sop1(aco_opcode::s_wqm_b64, bld.def(s2), bld.def(s1, scc), - bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2))); - } else if (op == nir_op_iand && cluster_size == 64) { + return bld.sop1(Builder::s_wqm, bld.def(bld.lm), bld.def(s1, scc), + bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))); + } else if (op == nir_op_iand && cluster_size == ctx->program->wave_size) { //subgroupAnd(val) -> (exec & ~val) == 0 - Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp(); - return bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(0u), Operand(-1u), bld.scc(tmp)); - } else if (op == nir_op_ior && cluster_size == 64) { + Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp(); + return bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(0u), Operand(-1u), bld.scc(tmp)); + } else if (op == nir_op_ior && cluster_size == ctx->program->wave_size) { //subgroupOr(val) -> (val & exec) != 0 - Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)).def(1).getTemp(); + Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)).def(1).getTemp(); return bool_to_vector_condition(ctx, tmp); - } else if (op == nir_op_ixor && cluster_size == 64) { + } else if (op == nir_op_ixor && cluster_size == ctx->program->wave_size) { //subgroupXor(val) -> s_bcnt1_i32_b64(val & exec) & 1 - Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); - tmp = bld.sop1(aco_opcode::s_bcnt1_i32_b64, bld.def(s2), bld.def(s1, scc), tmp); + Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); + tmp = bld.sop1(Builder::s_bcnt1_i32, bld.def(s1), bld.def(s1, scc), tmp); tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), tmp, Operand(1u)).def(1).getTemp(); return bool_to_vector_condition(ctx, tmp); } else { //subgroupClustered{And,Or,Xor}(val, n) -> - //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) + //lane_id = v_mbcnt_hi_u32_b32(-1, v_mbcnt_lo_u32_b32(-1, 0)) ; just v_mbcnt_lo_u32_b32 on wave32 //cluster_offset = ~(n - 1) & lane_id //cluster_mask = ((1 << n) - 1) //subgroupClusteredAnd(): @@ -5065,31 +5284,36 @@ Temp emit_boolean_reduce(isel_context *ctx, nir_op op, unsigned cluster_size, Te // return ((val & exec) >> cluster_offset) & cluster_mask != 0 //subgroupClusteredXor(): // return v_bnt_u32_b32(((val & exec) >> cluster_offset) & cluster_mask, 0) & 1 != 0 - Temp lane_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u))); + Temp lane_id = emit_mbcnt(ctx, bld.def(v1)); Temp cluster_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(~uint32_t(cluster_size - 1)), lane_id); Temp tmp; if (op == nir_op_iand) - tmp = bld.sop2(aco_opcode::s_orn2_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); + tmp = bld.sop2(Builder::s_orn2, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); else - tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); + tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); uint32_t cluster_mask = cluster_size == 32 ? -1 : (1u << cluster_size) - 1u; - tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp); + + if (ctx->program->chip_class <= GFX7) + tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), tmp, cluster_offset); + else if (ctx->program->wave_size == 64) + tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), cluster_offset, tmp); + else + tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), cluster_offset, tmp); tmp = emit_extract_vector(ctx, tmp, 0, v1); if (cluster_mask != 0xffffffff) tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(cluster_mask), tmp); Definition cmp_def = Definition(); if (op == nir_op_iand) { - cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2), Operand(cluster_mask), tmp).def(0); + cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(cluster_mask), tmp).def(0); } else if (op == nir_op_ior) { - cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp).def(0); + cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0); } else if (op == nir_op_ixor) { tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), bld.vop3(aco_opcode::v_bcnt_u32_b32, bld.def(v1), tmp, Operand(0u))); - cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp).def(0); + cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp).def(0); } cmp_def.setHint(vcc); return cmp_def.getTemp(); @@ -5105,23 +5329,22 @@ Temp emit_boolean_exclusive_scan(isel_context *ctx, nir_op op, Temp src) //subgroupExclusiveXor(val) -> mbcnt(val & exec) & 1 != 0 Temp tmp; if (op == nir_op_iand) - tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src); + tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src); else - tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); + tmp = bld.sop2(Builder::s_and, bld.def(s2), bld.def(s1, scc), src, Operand(exec, bld.lm)); Builder::Result lohi = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), tmp); Temp lo = lohi.def(0).getTemp(); Temp hi = lohi.def(1).getTemp(); - Temp mbcnt = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), hi, - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), lo, Operand(0u))); + Temp mbcnt = emit_mbcnt(ctx, bld.def(v1), Operand(lo), Operand(hi)); Definition cmp_def = Definition(); if (op == nir_op_iand) - cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(s2), Operand(0u), mbcnt).def(0); + cmp_def = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0); else if (op == nir_op_ior) - cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), mbcnt).def(0); + cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), mbcnt).def(0); else if (op == nir_op_ixor) - cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), + cmp_def = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), mbcnt)).def(0); cmp_def.setHint(vcc); return cmp_def.getTemp(); @@ -5136,11 +5359,11 @@ Temp emit_boolean_inclusive_scan(isel_context *ctx, nir_op op, Temp src) //subgroupInclusiveXor(val) -> subgroupExclusiveXor(val) ^^ val Temp tmp = emit_boolean_exclusive_scan(ctx, op, src); if (op == nir_op_iand) - return bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), tmp, src); + return bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), tmp, src); else if (op == nir_op_ior) - return bld.sop2(aco_opcode::s_or_b64, bld.def(s2), bld.def(s1, scc), tmp, src); + return bld.sop2(Builder::s_or, bld.def(bld.lm), bld.def(s1, scc), tmp, src); else if (op == nir_op_ixor) - return bld.sop2(aco_opcode::s_xor_b64, bld.def(s2), bld.def(s1, scc), tmp, src); + return bld.sop2(Builder::s_xor, bld.def(bld.lm), bld.def(s1, scc), tmp, src); assert(false); return Temp(); @@ -5166,16 +5389,35 @@ void emit_uniform_subgroup(isel_context *ctx, nir_intrinsic_instr *instr, Temp s void emit_interp_center(isel_context *ctx, Temp dst, Temp pos1, Temp pos2) { Builder bld(ctx->program, ctx->block); - Temp p1 = ctx->fs_inputs[fs_input::persp_center_p1]; - Temp p2 = ctx->fs_inputs[fs_input::persp_center_p2]; + Temp persp_center = get_arg(ctx, ctx->args->ac.persp_center); + Temp p1 = emit_extract_vector(ctx, persp_center, 0, v1); + Temp p2 = emit_extract_vector(ctx, persp_center, 1, v1); + + Temp ddx_1, ddx_2, ddy_1, ddy_2; + uint32_t dpp_ctrl0 = dpp_quad_perm(0, 0, 0, 0); + uint32_t dpp_ctrl1 = dpp_quad_perm(1, 1, 1, 1); + uint32_t dpp_ctrl2 = dpp_quad_perm(2, 2, 2, 2); /* Build DD X/Y */ - Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_quad_perm(0, 0, 0, 0)); - Temp ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_quad_perm(1, 1, 1, 1)); - Temp ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_quad_perm(2, 2, 2, 2)); - Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_quad_perm(0, 0, 0, 0)); - Temp ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_quad_perm(1, 1, 1, 1)); - Temp ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_quad_perm(2, 2, 2, 2)); + if (ctx->program->chip_class >= GFX8) { + Temp tl_1 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p1, dpp_ctrl0); + ddx_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl1); + ddy_1 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p1, tl_1, dpp_ctrl2); + Temp tl_2 = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), p2, dpp_ctrl0); + ddx_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl1); + ddy_2 = bld.vop2_dpp(aco_opcode::v_sub_f32, bld.def(v1), p2, tl_2, dpp_ctrl2); + } else { + Temp tl_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl0); + ddx_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl1); + ddx_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_1, tl_1); + ddx_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p1, (1 << 15) | dpp_ctrl2); + ddx_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddx_2, tl_1); + Temp tl_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl0); + ddy_1 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl1); + ddy_1 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_1, tl_2); + ddy_2 = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), p2, (1 << 15) | dpp_ctrl2); + ddy_2 = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), ddy_2, tl_2); + } /* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */ Temp tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_1, pos1, p1); @@ -5198,17 +5440,33 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_load_barycentric_pixel: case nir_intrinsic_load_barycentric_centroid: { glsl_interp_mode mode = (glsl_interp_mode)nir_intrinsic_interp_mode(instr); - fs_input input = get_interp_input(instr->intrinsic, mode); - - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - if (input == fs_input::max_inputs) { - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - Operand(0u), Operand(0u)); - } else { - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - ctx->fs_inputs[input], - ctx->fs_inputs[input + 1]); + Temp bary = Temp(0, s2); + switch (mode) { + case INTERP_MODE_SMOOTH: + case INTERP_MODE_NONE: + if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel) + bary = get_arg(ctx, ctx->args->ac.persp_center); + else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid) + bary = ctx->persp_centroid; + else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample) + bary = get_arg(ctx, ctx->args->ac.persp_sample); + break; + case INTERP_MODE_NOPERSPECTIVE: + if (instr->intrinsic == nir_intrinsic_load_barycentric_pixel) + bary = get_arg(ctx, ctx->args->ac.linear_center); + else if (instr->intrinsic == nir_intrinsic_load_barycentric_centroid) + bary = ctx->linear_centroid; + else if (instr->intrinsic == nir_intrinsic_load_barycentric_sample) + bary = get_arg(ctx, ctx->args->ac.linear_sample); + break; + default: + break; } + Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Temp p1 = emit_extract_vector(ctx, bary, 0, v1); + Temp p2 = emit_extract_vector(ctx, bary, 1, v1); + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), + Operand(p1), Operand(p2)); emit_split_vector(ctx, dst, 2); break; } @@ -5252,7 +5510,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) Temp pck0 = bld.tmp(v1); Temp carry = bld.vadd32(Definition(pck0), tmp0, addr, true).def(1).getTemp(); tmp1 = as_vgpr(ctx, tmp1); - Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(s2)), tmp1, Operand(0u), carry); + Temp pck1 = bld.vop2_e64(aco_opcode::v_addc_co_u32, bld.def(v1), bld.hint_vcc(bld.def(bld.lm)), tmp1, Operand(0u), carry); addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), pck0, pck1); /* sample_pos = flat_load_dwordx2 addr */ @@ -5279,20 +5537,20 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) } case nir_intrinsic_load_front_face: { bld.vopc(aco_opcode::v_cmp_lg_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), - Operand(0u), ctx->fs_inputs[fs_input::front_face]).def(0).setHint(vcc); + Operand(0u), get_arg(ctx, ctx->args->ac.front_face)).def(0).setHint(vcc); break; } case nir_intrinsic_load_view_index: case nir_intrinsic_load_layer_id: { if (instr->intrinsic == nir_intrinsic_load_view_index && (ctx->stage & sw_vs)) { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), Operand(ctx->view_index)); + bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.view_index))); break; } unsigned idx = nir_intrinsic_base(instr); bld.vintrp(aco_opcode::v_interp_mov_f32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), - Operand(2u), bld.m0(ctx->prim_mask), idx, 0); + Operand(2u), bld.m0(get_arg(ctx, ctx->args->ac.prim_mask)), idx, 0); break; } case nir_intrinsic_load_frag_coord: { @@ -5300,8 +5558,8 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) break; } case nir_intrinsic_load_sample_pos: { - Temp posx = ctx->fs_inputs[fs_input::frag_pos_0]; - Temp posy = ctx->fs_inputs[fs_input::frag_pos_1]; + Temp posx = get_arg(ctx, ctx->args->ac.frag_pos[0]); + Temp posy = get_arg(ctx, ctx->args->ac.frag_pos[1]); bld.pseudo(aco_opcode::p_create_vector, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), posx.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posx) : Operand(0u), posy.id() ? bld.vop1(aco_opcode::v_fract_f32, bld.def(v1), posy) : Operand(0u)); @@ -5385,6 +5643,18 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_store_global: visit_store_global(ctx, instr); break; + case nir_intrinsic_global_atomic_add: + case nir_intrinsic_global_atomic_imin: + case nir_intrinsic_global_atomic_umin: + case nir_intrinsic_global_atomic_imax: + case nir_intrinsic_global_atomic_umax: + case nir_intrinsic_global_atomic_and: + case nir_intrinsic_global_atomic_or: + case nir_intrinsic_global_atomic_xor: + case nir_intrinsic_global_atomic_exchange: + case nir_intrinsic_global_atomic_comp_swap: + visit_global_atomic(ctx, instr); + break; case nir_intrinsic_ssbo_atomic_add: case nir_intrinsic_ssbo_atomic_imin: case nir_intrinsic_ssbo_atomic_umin: @@ -5409,7 +5679,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_barrier: { unsigned* bsize = ctx->program->info->cs.block_size; unsigned workgroup_size = bsize[0] * bsize[1] * bsize[2]; - if (workgroup_size > 64) + if (workgroup_size > ctx->program->wave_size) bld.sopp(aco_opcode::s_barrier); break; } @@ -5421,68 +5691,90 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_memory_barrier_shared: emit_memory_barrier(ctx, instr); break; - case nir_intrinsic_load_num_work_groups: - case nir_intrinsic_load_work_group_id: + case nir_intrinsic_load_num_work_groups: { + Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.num_work_groups))); + emit_split_vector(ctx, dst, 3); + break; + } case nir_intrinsic_load_local_invocation_id: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - Temp* ids; - if (instr->intrinsic == nir_intrinsic_load_num_work_groups) - ids = ctx->num_workgroups; - else if (instr->intrinsic == nir_intrinsic_load_work_group_id) - ids = ctx->workgroup_ids; - else - ids = ctx->local_invocation_ids; + bld.copy(Definition(dst), Operand(get_arg(ctx, ctx->args->ac.local_invocation_ids))); + emit_split_vector(ctx, dst, 3); + break; + } + case nir_intrinsic_load_work_group_id: { + Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + struct ac_arg *args = ctx->args->ac.workgroup_ids; bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - ids[0].id() ? Operand(ids[0]) : Operand(1u), - ids[1].id() ? Operand(ids[1]) : Operand(1u), - ids[2].id() ? Operand(ids[2]) : Operand(1u)); + args[0].used ? Operand(get_arg(ctx, args[0])) : Operand(0u), + args[1].used ? Operand(get_arg(ctx, args[1])) : Operand(0u), + args[2].used ? Operand(get_arg(ctx, args[2])) : Operand(0u)); emit_split_vector(ctx, dst, 3); break; } case nir_intrinsic_load_local_invocation_index: { - Temp id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u))); - Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u), ctx->tg_size); - bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id); + Temp id = emit_mbcnt(ctx, bld.def(v1)); + + /* The tg_size bits [6:11] contain the subgroup id, + * we need this multiplied by the wave size, and then OR the thread id to it. + */ + if (ctx->program->wave_size == 64) { + /* After the s_and the bits are already multiplied by 64 (left shifted by 6) so we can just feed that to v_or */ + Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u), + get_arg(ctx, ctx->args->ac.tg_size)); + bld.vop2(aco_opcode::v_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, id); + } else { + /* Extract the bit field and multiply the result by 32 (left shift by 5), then do the OR */ + Temp tg_num = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16))); + bld.vop3(aco_opcode::v_lshl_or_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), tg_num, Operand(0x5u), id); + } break; } case nir_intrinsic_load_subgroup_id: { if (ctx->stage == compute_cs) { - Temp tg_num = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfc0u), ctx->tg_size); - bld.sop2(aco_opcode::s_lshr_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), tg_num, Operand(0x6u)); + bld.sop2(aco_opcode::s_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), + get_arg(ctx, ctx->args->ac.tg_size), Operand(0x6u | (0x6u << 16))); } else { bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x0u)); } break; } case nir_intrinsic_load_subgroup_invocation: { - bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand((uint32_t) -1), - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u))); + emit_mbcnt(ctx, Definition(get_ssa_temp(ctx, &instr->dest.ssa))); break; } case nir_intrinsic_load_num_subgroups: { if (ctx->stage == compute_cs) - bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu), ctx->tg_size); + bld.sop2(aco_opcode::s_and_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), bld.def(s1, scc), Operand(0x3fu), + get_arg(ctx, ctx->args->ac.tg_size)); else bld.sop1(aco_opcode::s_mov_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), Operand(0x1u)); break; } case nir_intrinsic_ballot: { - Definition tmp = bld.def(s2); Temp src = get_ssa_temp(ctx, instr->src[0].ssa); + Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); + Definition tmp = bld.def(dst.regClass()); + Definition lanemask_tmp = dst.size() == bld.lm.size() ? tmp : bld.def(src.regClass()); if (instr->src[0].ssa->bit_size == 1) { - assert(src.regClass() == s2); - bld.sop2(aco_opcode::s_and_b64, tmp, bld.def(s1, scc), Operand(exec, s2), src); + assert(src.regClass() == bld.lm); + bld.sop2(Builder::s_and, lanemask_tmp, bld.def(s1, scc), Operand(exec, bld.lm), src); } else if (instr->src[0].ssa->bit_size == 32 && src.regClass() == v1) { - bld.vopc(aco_opcode::v_cmp_lg_u32, tmp, Operand(0u), src); + bld.vopc(aco_opcode::v_cmp_lg_u32, lanemask_tmp, Operand(0u), src); } else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) { - bld.vopc(aco_opcode::v_cmp_lg_u64, tmp, Operand(0u), src); + bld.vopc(aco_opcode::v_cmp_lg_u64, lanemask_tmp, Operand(0u), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); fprintf(stderr, "\n"); } - emit_wqm(ctx, tmp.getTemp(), get_ssa_temp(ctx, &instr->dest.ssa)); + if (dst.size() != bld.lm.size()) { + /* Wave32 with ballot size set to 64 */ + bld.pseudo(aco_opcode::p_create_vector, Definition(tmp), lanemask_tmp.getTemp(), Operand(0u)); + } + emit_wqm(ctx, tmp.getTemp(), dst); break; } case nir_intrinsic_shuffle: @@ -5505,15 +5797,21 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == s1) { - assert(src.regClass() == s2); - Temp tmp = bld.sopc(aco_opcode::s_bitcmp1_b64, bld.def(s1, scc), src, tid); + assert(src.regClass() == bld.lm); + Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, tid); bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst); } else if (instr->dest.ssa.bit_size == 1 && tid.regClass() == v1) { - assert(src.regClass() == s2); - Temp tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src); + assert(src.regClass() == bld.lm); + Temp tmp; + if (ctx->program->chip_class <= GFX7) + tmp = bld.vop3(aco_opcode::v_lshr_b64, bld.def(v2), src, tid); + else if (ctx->program->wave_size == 64) + tmp = bld.vop3(aco_opcode::v_lshrrev_b64, bld.def(v2), tid, src); + else + tmp = bld.vop2_e64(aco_opcode::v_lshrrev_b32, bld.def(v1), tid, src); tmp = emit_extract_vector(ctx, tmp, 0, v1); tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), tmp); - emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), tmp), dst); + emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp), dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -5524,7 +5822,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) } case nir_intrinsic_load_sample_id: { bld.vop3(aco_opcode::v_bfe_u32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), - ctx->fs_inputs[ancillary], Operand(8u), Operand(4u)); + get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u)); break; } case nir_intrinsic_load_sample_mask_in: { @@ -5546,9 +5844,9 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); } else if (instr->dest.ssa.bit_size == 1) { - assert(src.regClass() == s2); - Temp tmp = bld.sopc(aco_opcode::s_bitcmp1_b64, bld.def(s1, scc), src, - bld.sop1(aco_opcode::s_ff1_i32_b64, bld.def(s1), Operand(exec, s2))); + assert(src.regClass() == bld.lm); + Temp tmp = bld.sopc(Builder::s_bitcmp1, bld.def(s1, scc), src, + bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm))); bool_to_vector_condition(ctx, emit_wqm(ctx, tmp), dst); } else if (src.regClass() == s1) { bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src); @@ -5564,22 +5862,22 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_vote_all: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - assert(src.regClass() == s2); - assert(dst.regClass() == s2); + assert(src.regClass() == bld.lm); + assert(dst.regClass() == bld.lm); - Temp tmp = bld.sop2(aco_opcode::s_andn2_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp(); - Temp val = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(0u), Operand(-1u), bld.scc(tmp)); + Temp tmp = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp(); + Temp val = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(0u), Operand(-1u), bld.scc(tmp)); emit_wqm(ctx, val, dst); break; } case nir_intrinsic_vote_any: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - assert(src.regClass() == s2); - assert(dst.regClass() == s2); + assert(src.regClass() == bld.lm); + assert(dst.regClass() == bld.lm); - Temp tmp = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand(exec, s2), src).def(1).getTemp(); - Temp val = bld.sop2(aco_opcode::s_cselect_b64, bld.def(s2), Operand(-1u), Operand(0u), bld.scc(tmp)); + Temp tmp = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), Operand(exec, bld.lm), src).def(1).getTemp(); + Temp val = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), Operand(0u), bld.scc(tmp)); emit_wqm(ctx, val, dst); break; } @@ -5591,7 +5889,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) nir_op op = (nir_op) nir_intrinsic_reduction_op(instr); unsigned cluster_size = instr->intrinsic == nir_intrinsic_reduce ? nir_intrinsic_cluster_size(instr) : 0; - cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : 64, 64)); + cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size)); if (!ctx->divergent_vals[instr->src[0].ssa->index] && (op == nir_op_ior || op == nir_op_iand)) { emit_uniform_subgroup(ctx, instr, src); @@ -5662,7 +5960,7 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) Temp tmp_dst = bld.tmp(dst.regClass()); reduce->definitions[0] = Definition(tmp_dst); - reduce->definitions[1] = bld.def(s2); // used internally + reduce->definitions[1] = bld.def(ctx->program->lane_mask); // used internally reduce->definitions[2] = Definition(); reduce->definitions[3] = Definition(scc, s1); reduce->definitions[4] = Definition(); @@ -5681,25 +5979,33 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) } else { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); unsigned lane = nir_src_as_const_value(instr->src[1])->u32; + uint32_t dpp_ctrl = dpp_quad_perm(lane, lane, lane, lane); + if (instr->dest.ssa.bit_size == 1) { - assert(src.regClass() == s2); + assert(src.regClass() == bld.lm); + assert(dst.regClass() == bld.lm); uint32_t half_mask = 0x11111111u << lane; Temp mask_tmp = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), Operand(half_mask), Operand(half_mask)); - Temp tmp = bld.tmp(s2); - bld.sop1(aco_opcode::s_wqm_b64, Definition(tmp), - bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), mask_tmp, - bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)))); + Temp tmp = bld.tmp(bld.lm); + bld.sop1(Builder::s_wqm, Definition(tmp), + bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp, + bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)))); emit_wqm(ctx, tmp, dst); } else if (instr->dest.ssa.bit_size == 32) { - emit_wqm(ctx, - bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, - dpp_quad_perm(lane, lane, lane, lane)), - dst); + if (ctx->program->chip_class >= GFX8) + emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), dst); + else + emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), dst); } else if (instr->dest.ssa.bit_size == 64) { Temp lo = bld.tmp(v1), hi = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src); - lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_quad_perm(lane, lane, lane, lane))); - hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_quad_perm(lane, lane, lane, lane))); + if (ctx->program->chip_class >= GFX8) { + lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl)); + hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl)); + } else { + lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, (1 << 15) | dpp_ctrl)); + hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, (1 << 15) | dpp_ctrl)); + } bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); } else { @@ -5730,29 +6036,42 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_quad_swap_diagonal: dpp_ctrl = dpp_quad_perm(3, 2, 1, 0); break; - case nir_intrinsic_quad_swizzle_amd: { + case nir_intrinsic_quad_swizzle_amd: dpp_ctrl = nir_intrinsic_swizzle_mask(instr); break; - } default: break; } + if (ctx->program->chip_class < GFX8) + dpp_ctrl |= (1 << 15); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); if (instr->dest.ssa.bit_size == 1) { - assert(src.regClass() == s2); + assert(src.regClass() == bld.lm); src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src); - src = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl); - Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(s2), Operand(0u), src); + if (ctx->program->chip_class >= GFX8) + src = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl); + else + src = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl); + Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src); emit_wqm(ctx, tmp, dst); } else if (instr->dest.ssa.bit_size == 32) { - Temp tmp = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl); + Temp tmp; + if (ctx->program->chip_class >= GFX8) + tmp = bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl); + else + tmp = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl); emit_wqm(ctx, tmp, dst); } else if (instr->dest.ssa.bit_size == 64) { Temp lo = bld.tmp(v1), hi = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src); - lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl)); - hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl)); + if (ctx->program->chip_class >= GFX8) { + lo = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), lo, dpp_ctrl)); + hi = emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), hi, dpp_ctrl)); + } else { + lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, dpp_ctrl)); + hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, dpp_ctrl)); + } bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); } else { @@ -5795,14 +6114,14 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); if (dst.regClass() == v1) { /* src2 is ignored for writelane. RA assigns the same reg for dst */ - emit_wqm(ctx, bld.vop3(aco_opcode::v_writelane_b32, bld.def(v1), val, lane, src), dst); + emit_wqm(ctx, bld.writelane(bld.def(v1), val, lane, src), dst); } else if (dst.regClass() == v2) { Temp src_lo = bld.tmp(v1), src_hi = bld.tmp(v1); Temp val_lo = bld.tmp(s1), val_hi = bld.tmp(s1); bld.pseudo(aco_opcode::p_split_vector, Definition(src_lo), Definition(src_hi), src); bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val); - Temp lo = emit_wqm(ctx, bld.vop3(aco_opcode::v_writelane_b32, bld.def(v1), val_lo, lane, src_hi)); - Temp hi = emit_wqm(ctx, bld.vop3(aco_opcode::v_writelane_b32, bld.def(v1), val_hi, lane, src_hi)); + Temp lo = emit_wqm(ctx, bld.writelane(bld.def(v1), val_lo, lane, src_hi)); + Temp hi = emit_wqm(ctx, bld.writelane(bld.def(v1), val_hi, lane, src_hi)); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi); emit_split_vector(ctx, dst, 2); } else { @@ -5817,9 +6136,8 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) RegClass rc = RegClass(src.type(), 1); Temp mask_lo = bld.tmp(rc), mask_hi = bld.tmp(rc); bld.pseudo(aco_opcode::p_split_vector, Definition(mask_lo), Definition(mask_hi), src); - Temp tmp = bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), mask_lo, Operand(0u)); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - Temp wqm_tmp = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), mask_hi, tmp); + Temp wqm_tmp = emit_mbcnt(ctx, bld.def(v1), Operand(mask_lo), Operand(mask_hi)); emit_wqm(ctx, wqm_tmp, dst); break; } @@ -5844,15 +6162,15 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) break; case nir_intrinsic_demote_if: { Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - assert(src.regClass() == s2); - Temp cond = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), src, Operand(exec, s2)); + assert(src.regClass() == bld.lm); + Temp cond = bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm)); bld.pseudo(aco_opcode::p_demote_to_helper, cond); ctx->block->kind |= block_kind_uses_demote; ctx->program->needs_exact = true; break; } case nir_intrinsic_first_invocation: { - emit_wqm(ctx, bld.sop1(aco_opcode::s_ff1_i32_b64, bld.def(s1), Operand(exec, s2)), + emit_wqm(ctx, bld.sop1(Builder::s_ff1_i32, bld.def(s1), Operand(exec, bld.lm)), get_ssa_temp(ctx, &instr->dest.ssa)); break; } @@ -5862,27 +6180,27 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_vertex_id_zero_base: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), ctx->vertex_id); + bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id)); break; } case nir_intrinsic_load_first_vertex: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), ctx->base_vertex); + bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.base_vertex)); break; } case nir_intrinsic_load_base_instance: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), ctx->start_instance); + bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.start_instance)); break; } case nir_intrinsic_load_instance_id: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), ctx->instance_id); + bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.instance_id)); break; } case nir_intrinsic_load_draw_id: { Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - bld.copy(Definition(dst), ctx->draw_id); + bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.draw_id)); break; } default: @@ -5937,12 +6255,27 @@ void tex_fetch_ptrs(isel_context *ctx, nir_tex_instr *instr, } if (samp_ptr) { *samp_ptr = get_sampler_desc(ctx, sampler_deref_instr, ACO_DESC_SAMPLER, instr, false, false); + if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT && ctx->options->chip_class < GFX8) { - fprintf(stderr, "Unimplemented sampler descriptor: "); - nir_print_instr(&instr->instr, stderr); - fprintf(stderr, "\n"); - abort(); - // TODO: build samp_ptr = and(samp_ptr, res_ptr) + /* fix sampler aniso on SI/CI: samp[0] = samp[0] & img[7] */ + Builder bld(ctx->program, ctx->block); + + /* to avoid unnecessary moves, we split and recombine sampler and image */ + Temp img[8] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), + bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)}; + Temp samp[4] = {bld.tmp(s1), bld.tmp(s1), bld.tmp(s1), bld.tmp(s1)}; + bld.pseudo(aco_opcode::p_split_vector, Definition(img[0]), Definition(img[1]), + Definition(img[2]), Definition(img[3]), Definition(img[4]), + Definition(img[5]), Definition(img[6]), Definition(img[7]), *res_ptr); + bld.pseudo(aco_opcode::p_split_vector, Definition(samp[0]), Definition(samp[1]), + Definition(samp[2]), Definition(samp[3]), *samp_ptr); + + samp[0] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), samp[0], img[7]); + *res_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s8), + img[0], img[1], img[2], img[3], + img[4], img[5], img[6], img[7]); + *samp_ptr = bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), + samp[0], samp[1], samp[2], samp[3]); } } if (fmask_ptr && (instr->op == nir_texop_txf_ms || @@ -5964,14 +6297,14 @@ void build_cube_select(isel_context *ctx, Temp ma, Temp id, Temp deriv, Operand two(0x40000000u); Operand four(0x40800000u); - Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), Operand(0u), ma); + Temp is_ma_positive = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), ma); Temp sgn_ma = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), neg_one, one, is_ma_positive); Temp neg_sgn_ma = bld.vop2(aco_opcode::v_sub_f32, bld.def(v1), Operand(0u), sgn_ma); - Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(s2)), four, id); + Temp is_ma_z = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), four, id); Temp is_ma_y = bld.vopc(aco_opcode::v_cmp_le_f32, bld.def(s2), two, id); - is_ma_y = bld.sop2(aco_opcode::s_andn2_b64, bld.hint_vcc(bld.def(s2)), is_ma_y, is_ma_z); - Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(s2)), bld.def(s1, scc), is_ma_z, is_ma_y); + is_ma_y = bld.sop2(Builder::s_andn2, bld.hint_vcc(bld.def(bld.lm)), is_ma_y, is_ma_z); + Temp is_not_ma_x = bld.sop2(aco_opcode::s_or_b64, bld.hint_vcc(bld.def(bld.lm)), bld.def(s1, scc), is_ma_z, is_ma_y); // select sc Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), deriv_z, deriv_x, is_not_ma_x); @@ -6451,7 +6784,7 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) Operand((uint32_t)V_008F14_IMG_NUM_FORMAT_SINT), bld.scc(compare_cube_wa)); } - tg4_compare_cube_wa64 = bld.tmp(s2); + tg4_compare_cube_wa64 = bld.tmp(bld.lm); bool_to_vector_condition(ctx, compare_cube_wa, tg4_compare_cube_wa64); nfmt = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), nfmt, Operand(26u)); @@ -6485,11 +6818,6 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) } } - if (!(has_ddx && has_ddy) && !has_lod && !level_zero && - instr->sampler_dim != GLSL_SAMPLER_DIM_MS && - instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS) - coords = emit_wqm(ctx, coords, bld.tmp(coords.regClass()), true); - std::vector args; if (has_offset) args.emplace_back(Operand(offset)); @@ -6505,7 +6833,7 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) if (has_lod) args.emplace_back(lod); - Operand arg; + Temp arg; if (args.size() > 1) { aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, args.size(), 1)}; unsigned size = 0; @@ -6517,12 +6845,20 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) Temp tmp = bld.tmp(rc); vec->definitions[0] = Definition(tmp); ctx->block->instructions.emplace_back(std::move(vec)); - arg = Operand(tmp); + arg = tmp; } else { assert(args[0].isTemp()); - arg = Operand(as_vgpr(ctx, args[0].getTemp())); + arg = as_vgpr(ctx, args[0].getTemp()); } + /* we don't need the bias, sample index, compare value or offset to be + * computed in WQM but if the p_create_vector copies the coordinates, then it + * needs to be in WQM */ + if (!(has_ddx && has_ddy) && !has_lod && !level_zero && + instr->sampler_dim != GLSL_SAMPLER_DIM_MS && + instr->sampler_dim != GLSL_SAMPLER_DIM_SUBPASS_MS) + arg = emit_wqm(ctx, arg, bld.tmp(arg.regClass()), true); + if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) { //FIXME: if (ctx->abi->gfx9_stride_size_workaround) return ac_build_buffer_load_format_gfx9_safe() @@ -6581,7 +6917,7 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) assert(dmask == 1 && dst.regClass() == v1); assert(dst.id() != tmp_dst.id()); - Temp tmp = bld.tmp(s2); + Temp tmp = bld.tmp(bld.lm); bld.vopc(aco_opcode::v_cmp_eq_u32, Definition(tmp), Operand(0u), tmp_dst).def(0).setHint(vcc); bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand((uint32_t)-1), tmp); @@ -6654,7 +6990,7 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) } tex.reset(create_instruction(opcode, Format::MIMG, 3, 1)); - tex->operands[0] = arg; + tex->operands[0] = Operand(arg); tex->operands[1] = Operand(resource); tex->operands[2] = Operand(sampler); tex->dim = dim; @@ -6701,20 +7037,48 @@ Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa) void visit_phi(isel_context *ctx, nir_phi_instr *instr) { aco_ptr phi; - unsigned num_src = exec_list_length(&instr->srcs); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == s2); + assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask); - aco_opcode opcode = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index] ? aco_opcode::p_phi : aco_opcode::p_linear_phi; + bool logical = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index]; + logical |= ctx->block->kind & block_kind_merge; + aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi; + /* we want a sorted list of sources, since the predecessor list is also sorted */ std::map phi_src; - bool all_undef = true; - nir_foreach_phi_src(src, instr) { + nir_foreach_phi_src(src, instr) phi_src[src->pred->index] = src->src.ssa; - if (src->src.ssa->parent_instr->type != nir_instr_type_ssa_undef) - all_undef = false; + + std::vector& preds = logical ? ctx->block->logical_preds : ctx->block->linear_preds; + unsigned num_operands = 0; + Operand operands[std::max(exec_list_length(&instr->srcs), (unsigned)preds.size())]; + unsigned num_defined = 0; + unsigned cur_pred_idx = 0; + for (std::pair src : phi_src) { + if (cur_pred_idx < preds.size()) { + /* handle missing preds (IF merges with discard/break) and extra preds (loop exit with discard) */ + unsigned block = ctx->cf_info.nir_to_aco[src.first]; + unsigned skipped = 0; + while (cur_pred_idx + skipped < preds.size() && preds[cur_pred_idx + skipped] != block) + skipped++; + if (cur_pred_idx + skipped < preds.size()) { + for (unsigned i = 0; i < skipped; i++) + operands[num_operands++] = Operand(dst.regClass()); + cur_pred_idx += skipped; + } else { + continue; + } + } + cur_pred_idx++; + Operand op = get_phi_operand(ctx, src.second); + operands[num_operands++] = op; + num_defined += !op.isUndefined(); } - if (all_undef) { + /* handle block_kind_continue_or_break at loop exit blocks */ + while (cur_pred_idx++ < preds.size()) + operands[num_operands++] = Operand(dst.regClass()); + + if (num_defined == 0) { Builder bld(ctx->program, ctx->block); if (dst.regClass() == s1) { bld.sop1(aco_opcode::s_mov_b32, Definition(dst), Operand(0u)); @@ -6730,17 +7094,41 @@ void visit_phi(isel_context *ctx, nir_phi_instr *instr) return; } + /* we can use a linear phi in some cases if one src is undef */ + if (dst.is_linear() && ctx->block->kind & block_kind_merge && num_defined == 1) { + phi.reset(create_instruction(aco_opcode::p_linear_phi, Format::PSEUDO, num_operands, 1)); + + Block *linear_else = &ctx->program->blocks[ctx->block->linear_preds[1]]; + Block *invert = &ctx->program->blocks[linear_else->linear_preds[0]]; + assert(invert->kind & block_kind_invert); + + unsigned then_block = invert->linear_preds[0]; + + Block* insert_block = NULL; + for (unsigned i = 0; i < num_operands; i++) { + Operand op = operands[i]; + if (op.isUndefined()) + continue; + insert_block = ctx->block->logical_preds[i] == then_block ? invert : ctx->block; + phi->operands[0] = op; + break; + } + assert(insert_block); /* should be handled by the "num_defined == 0" case above */ + phi->operands[1] = Operand(dst.regClass()); + phi->definitions[0] = Definition(dst); + insert_block->instructions.emplace(insert_block->instructions.begin(), std::move(phi)); + return; + } + /* try to scalarize vector phis */ if (instr->dest.ssa.bit_size != 1 && dst.size() > 1) { // TODO: scalarize linear phis on divergent ifs bool can_scalarize = (opcode == aco_opcode::p_phi || !(ctx->block->kind & block_kind_merge)); std::array new_vec; - for (std::pair& pair : phi_src) { - Operand src = get_phi_operand(ctx, pair.second); - if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end()) { + for (unsigned i = 0; can_scalarize && (i < num_operands); i++) { + Operand src = operands[i]; + if (src.isTemp() && ctx->allocated_vec.find(src.tempId()) == ctx->allocated_vec.end()) can_scalarize = false; - break; - } } if (can_scalarize) { unsigned num_components = instr->dest.ssa.num_components; @@ -6749,12 +7137,10 @@ void visit_phi(isel_context *ctx, nir_phi_instr *instr) aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)}; for (unsigned k = 0; k < num_components; k++) { - phi.reset(create_instruction(opcode, Format::PSEUDO, num_src, 1)); - std::map::iterator it = phi_src.begin(); - for (unsigned i = 0; i < num_src; i++) { - Operand src = get_phi_operand(ctx, it->second); + phi.reset(create_instruction(opcode, Format::PSEUDO, num_operands, 1)); + for (unsigned i = 0; i < num_operands; i++) { + Operand src = operands[i]; phi->operands[i] = src.isTemp() ? Operand(ctx->allocated_vec[src.tempId()][k]) : Operand(rc); - ++it; } Temp phi_dst = {ctx->program->allocateId(), rc}; phi->definitions[0] = Definition(phi_dst); @@ -6769,43 +7155,9 @@ void visit_phi(isel_context *ctx, nir_phi_instr *instr) } } - unsigned extra_src = 0; - if (opcode == aco_opcode::p_linear_phi && (ctx->block->kind & block_kind_loop_exit) && - ctx->program->blocks[ctx->block->index-2].kind & block_kind_continue_or_break) { - extra_src++; - } - - phi.reset(create_instruction(opcode, Format::PSEUDO, num_src + extra_src, 1)); - - /* if we have a linear phi on a divergent if, we know that one src is undef */ - if (opcode == aco_opcode::p_linear_phi && ctx->block->kind & block_kind_merge) { - assert(extra_src == 0); - Block* block; - /* we place the phi either in the invert-block or in the current block */ - if (phi_src.begin()->second->parent_instr->type != nir_instr_type_ssa_undef) { - assert((++phi_src.begin())->second->parent_instr->type == nir_instr_type_ssa_undef); - Block& linear_else = ctx->program->blocks[ctx->block->linear_preds[1]]; - block = &ctx->program->blocks[linear_else.linear_preds[0]]; - assert(block->kind & block_kind_invert); - phi->operands[0] = get_phi_operand(ctx, phi_src.begin()->second); - } else { - assert((++phi_src.begin())->second->parent_instr->type != nir_instr_type_ssa_undef); - block = ctx->block; - phi->operands[0] = get_phi_operand(ctx, (++phi_src.begin())->second); - } - phi->operands[1] = Operand(dst.regClass()); - phi->definitions[0] = Definition(dst); - block->instructions.emplace(block->instructions.begin(), std::move(phi)); - return; - } - - std::map::iterator it = phi_src.begin(); - for (unsigned i = 0; i < num_src; i++) { - phi->operands[i] = get_phi_operand(ctx, it->second); - ++it; - } - for (unsigned i = 0; i < extra_src; i++) - phi->operands[num_src + i] = Operand(dst.regClass()); + phi.reset(create_instruction(opcode, Format::PSEUDO, num_operands, 1)); + for (unsigned i = 0; i < num_operands; i++) + phi->operands[i] = operands[i]; phi->definitions[0] = Definition(dst); ctx->block->instructions.emplace(ctx->block->instructions.begin(), std::move(phi)); } @@ -6851,6 +7203,7 @@ void visit_jump(isel_context *ctx, nir_jump_instr *instr) return; } ctx->cf_info.parent_loop.has_divergent_branch = true; + ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index; break; case nir_jump_continue: logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx]; @@ -6862,6 +7215,7 @@ void visit_jump(isel_context *ctx, nir_jump_instr *instr) we must ensure that they are handled correctly */ ctx->cf_info.parent_loop.has_divergent_continue = true; ctx->cf_info.parent_loop.has_divergent_branch = true; + ctx->cf_info.nir_to_aco[instr->instr.block->index] = ctx->block->index; } else { /* uniform continue - directly jump to the loop header */ ctx->block->kind |= block_kind_uniform; @@ -6933,6 +7287,9 @@ void visit_block(isel_context *ctx, nir_block *block) //abort(); } } + + if (!ctx->cf_info.parent_loop.has_divergent_branch) + ctx->cf_info.nir_to_aco[block->index] = ctx->block->index; } @@ -7055,7 +7412,7 @@ static void begin_divergent_if_then(isel_context *ctx, if_context *ic, Temp cond ctx->block->kind |= block_kind_branch; /* branch to linear then block */ - assert(cond.regClass() == s2); + assert(cond.regClass() == ctx->program->lane_mask); aco_ptr branch; branch.reset(create_instruction(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0)); branch->operands[0] = Operand(cond); @@ -7199,7 +7556,7 @@ static void visit_if(isel_context *ctx, nir_if *if_stmt) ctx->block->kind |= block_kind_uniform; /* emit branch */ - assert(cond.regClass() == s2); + assert(cond.regClass() == bld.lm); // TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction cond = bool_to_scalar_condition(ctx, cond); @@ -7393,12 +7750,12 @@ static void create_vs_exports(isel_context *ctx) if (outinfo->export_prim_id) { ctx->vs_output.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1; - ctx->vs_output.outputs[VARYING_SLOT_PRIMITIVE_ID][0] = ctx->vs_prim_id; + ctx->vs_output.outputs[VARYING_SLOT_PRIMITIVE_ID][0] = get_arg(ctx, ctx->args->vs_prim_id); } if (ctx->options->key.has_multiview_view_index) { ctx->vs_output.mask[VARYING_SLOT_LAYER] |= 0x1; - ctx->vs_output.outputs[VARYING_SLOT_LAYER][0] = as_vgpr(ctx, ctx->view_index); + ctx->vs_output.outputs[VARYING_SLOT_LAYER][0] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index)); } /* the order these position exports are created is important */ @@ -7502,7 +7859,7 @@ static void emit_streamout(isel_context *ctx, unsigned stream) Builder bld(ctx->program, ctx->block); Temp so_buffers[4]; - Temp buf_ptr = convert_pointer_to_64_bit(ctx, ctx->streamout_buffers); + Temp buf_ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->streamout_buffers)); for (unsigned i = 0; i < 4; i++) { unsigned stride = ctx->program->info->so.strides[i]; if (!stride) @@ -7512,10 +7869,9 @@ static void emit_streamout(isel_context *ctx, unsigned stream) } Temp so_vtx_count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), - ctx->streamout_config, Operand(0x70010u)); + get_arg(ctx, ctx->args->streamout_config), Operand(0x70010u)); - Temp tid = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u))); + Temp tid = emit_mbcnt(ctx, bld.def(v1)); Temp can_emit = bld.vopc(aco_opcode::v_cmp_gt_i32, bld.def(s2), so_vtx_count, tid); @@ -7524,7 +7880,7 @@ static void emit_streamout(isel_context *ctx, unsigned stream) bld.reset(ctx->block); - Temp so_write_index = bld.vadd32(bld.def(v1), ctx->streamout_write_idx, tid); + Temp so_write_index = bld.vadd32(bld.def(v1), get_arg(ctx, ctx->args->streamout_write_idx), tid); Temp so_write_offset[4]; @@ -7535,13 +7891,15 @@ static void emit_streamout(isel_context *ctx, unsigned stream) if (stride == 1) { Temp offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), - ctx->streamout_write_idx, ctx->streamout_offset[i]); + get_arg(ctx, ctx->args->streamout_write_idx), + get_arg(ctx, ctx->args->streamout_offset[i])); Temp new_offset = bld.vadd32(bld.def(v1), offset, tid); so_write_offset[i] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), new_offset); } else { Temp offset = bld.v_mul_imm(bld.def(v1), so_write_index, stride * 4u); - Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u), ctx->streamout_offset[i]); + Temp offset2 = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(4u), + get_arg(ctx, ctx->args->streamout_offset[i])); so_write_offset[i] = bld.vadd32(bld.def(v1), offset, offset2); } } @@ -7561,6 +7919,19 @@ static void emit_streamout(isel_context *ctx, unsigned stream) } /* end namespace */ +void split_arguments(isel_context *ctx, Pseudo_instruction *startpgm) +{ + /* Split all arguments except for the first (ring_offsets) and the last + * (exec) so that the dead channels don't stay live throughout the program. + */ + for (unsigned i = 1; i < startpgm->definitions.size() - 1; i++) { + if (startpgm->definitions[i].regClass().size() > 1) { + emit_split_vector(ctx, startpgm->definitions[i].getTemp(), + startpgm->definitions[i].regClass().size()); + } + } +} + void handle_bc_optimize(isel_context *ctx) { /* needed when SPI_PS_IN_CONTROL.BC_OPTIMIZE_DISABLE is set to 0 */ @@ -7568,56 +7939,119 @@ void handle_bc_optimize(isel_context *ctx) uint32_t spi_ps_input_ena = ctx->program->config->spi_ps_input_ena; bool uses_center = G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena); bool uses_centroid = G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) || G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena); + ctx->persp_centroid = get_arg(ctx, ctx->args->ac.persp_centroid); + ctx->linear_centroid = get_arg(ctx, ctx->args->ac.linear_centroid); if (uses_center && uses_centroid) { - Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(s2)), ctx->prim_mask, Operand(0u)); + Temp sel = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)), + get_arg(ctx, ctx->args->ac.prim_mask), Operand(0u)); if (G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena)) { + Temp new_coord[2]; for (unsigned i = 0; i < 2; i++) { - Temp new_coord = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), - ctx->fs_inputs[fs_input::persp_centroid_p1 + i], - ctx->fs_inputs[fs_input::persp_center_p1 + i], - sel); - ctx->fs_inputs[fs_input::persp_centroid_p1 + i] = new_coord; + Temp persp_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_centroid), i, v1); + Temp persp_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.persp_center), i, v1); + new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), + persp_centroid, persp_center, sel); } + ctx->persp_centroid = bld.tmp(v2); + bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->persp_centroid), + Operand(new_coord[0]), Operand(new_coord[1])); + emit_split_vector(ctx, ctx->persp_centroid, 2); } if (G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena)) { + Temp new_coord[2]; for (unsigned i = 0; i < 2; i++) { - Temp new_coord = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), - ctx->fs_inputs[fs_input::linear_centroid_p1 + i], - ctx->fs_inputs[fs_input::linear_center_p1 + i], - sel); - ctx->fs_inputs[fs_input::linear_centroid_p1 + i] = new_coord; + Temp linear_centroid = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_centroid), i, v1); + Temp linear_center = emit_extract_vector(ctx, get_arg(ctx, ctx->args->ac.linear_center), i, v1); + new_coord[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), + linear_centroid, linear_center, sel); } + ctx->linear_centroid = bld.tmp(v2); + bld.pseudo(aco_opcode::p_create_vector, Definition(ctx->linear_centroid), + Operand(new_coord[0]), Operand(new_coord[1])); + emit_split_vector(ctx, ctx->linear_centroid, 2); } } } +void setup_fp_mode(isel_context *ctx, nir_shader *shader) +{ + Program *program = ctx->program; + + unsigned float_controls = shader->info.float_controls_execution_mode; + + program->next_fp_mode.preserve_signed_zero_inf_nan32 = + float_controls & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; + program->next_fp_mode.preserve_signed_zero_inf_nan16_64 = + float_controls & (FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 | + FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64); + + program->next_fp_mode.must_flush_denorms32 = + float_controls & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; + program->next_fp_mode.must_flush_denorms16_64 = + float_controls & (FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 | + FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64); + + program->next_fp_mode.care_about_round32 = + float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32); + + program->next_fp_mode.care_about_round16_64 = + float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 | + FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64); + + /* default to preserving fp16 and fp64 denorms, since it's free */ + if (program->next_fp_mode.must_flush_denorms16_64) + program->next_fp_mode.denorm16_64 = 0; + else + program->next_fp_mode.denorm16_64 = fp_denorm_keep; + + /* preserving fp32 denorms is expensive, so only do it if asked */ + if (float_controls & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) + program->next_fp_mode.denorm32 = fp_denorm_keep; + else + program->next_fp_mode.denorm32 = 0; + + if (float_controls & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) + program->next_fp_mode.round32 = fp_round_tz; + else + program->next_fp_mode.round32 = fp_round_ne; + + if (float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64)) + program->next_fp_mode.round16_64 = fp_round_tz; + else + program->next_fp_mode.round16_64 = fp_round_ne; + + ctx->block->fp_mode = program->next_fp_mode; +} + void select_program(Program *program, unsigned shader_count, struct nir_shader *const *shaders, ac_shader_config* config, - struct radv_shader_info *info, - struct radv_nir_compiler_options *options) + struct radv_shader_args *args) { - isel_context ctx = setup_isel_context(program, shader_count, shaders, config, info, options); + isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args); for (unsigned i = 0; i < shader_count; i++) { nir_shader *nir = shaders[i]; init_context(&ctx, nir); + setup_fp_mode(&ctx, nir); + if (!i) { - add_startpgm(&ctx); /* needs to be after init_context() for FS */ + /* needs to be after init_context() for FS */ + Pseudo_instruction *startpgm = add_startpgm(&ctx); append_logical_start(ctx.block); + split_arguments(&ctx, startpgm); } if_context ic; if (shader_count >= 2) { Builder bld(ctx.program, ctx.block); Temp count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), ctx.merged_wave_info, Operand((8u << 16) | (i * 8u))); - Temp thread_id = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, bld.def(v1), Operand((uint32_t) -1), - bld.vop3(aco_opcode::v_mbcnt_lo_u32_b32, bld.def(v1), Operand((uint32_t) -1), Operand(0u))); - Temp cond = bld.vopc(aco_opcode::v_cmp_gt_u32, bld.hint_vcc(bld.def(s2)), count, thread_id); + Temp thread_id = emit_mbcnt(&ctx, bld.def(v1)); + Temp cond = bld.vopc(aco_opcode::v_cmp_gt_u32, bld.hint_vcc(bld.def(bld.lm)), count, thread_id); begin_divergent_if_then(&ctx, &ic, cond); } @@ -7648,6 +8082,8 @@ void select_program(Program *program, ralloc_free(ctx.divergent_vals); } + program->config->float_mode = program->blocks[0].fp_mode.val; + append_logical_end(ctx.block); ctx.block->kind |= block_kind_uniform; Builder bld(ctx.program, ctx.block);