X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fcompiler%2Faco_instruction_selection.cpp;h=1c2b17223061f66a033397449dddead0ada9d795;hb=98b4cc7110d47ae2c8bab7c530cad757300a5323;hp=777072437f0905e2b62557989c39634c0b6c904c;hpb=8cfddc91999965545eb2d973557840354f40a2fa;p=mesa.git diff --git a/src/amd/compiler/aco_instruction_selection.cpp b/src/amd/compiler/aco_instruction_selection.cpp index 777072437f0..1c2b1722306 100644 --- a/src/amd/compiler/aco_instruction_selection.cpp +++ b/src/amd/compiler/aco_instruction_selection.cpp @@ -85,6 +85,7 @@ struct if_context { unsigned BB_if_idx; unsigned invert_idx; + bool uniform_has_then_branch; bool then_branch_divergent; Block BB_invert; Block BB_endif; @@ -274,7 +275,6 @@ Temp emit_extract_vector(isel_context* ctx, Temp src, uint32_t idx, RegClass dst assert(src.bytes() > (idx * dst_rc.bytes())); Builder bld(ctx->program, ctx->block); auto it = ctx->allocated_vec.find(src.id()); - /* the size check needs to be early because elements other than 0 may be garbage */ if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) { if (it->second[idx].regClass() == dst_rc) { return it->second[idx]; @@ -561,16 +561,8 @@ void emit_vop2_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode o Temp t = src0; src0 = src1; src1 = t; - } else if (src0.type() == RegType::vgpr && - op != aco_opcode::v_madmk_f32 && - op != aco_opcode::v_madak_f32 && - op != aco_opcode::v_madmk_f16 && - op != aco_opcode::v_madak_f16) { - /* If the instruction is not commutative, we emit a VOP3A instruction */ - bld.vop2_e64(op, Definition(dst), src0, src1); - return; } else { - src1 = bld.copy(bld.def(RegType::vgpr, src1.size()), src1); //TODO: as_vgpr + src1 = as_vgpr(ctx, src1); } } @@ -626,6 +618,24 @@ void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode o if (src0.type() == RegType::vgpr) { /* to swap the operands, we might also have to change the opcode */ switch (op) { + case aco_opcode::v_cmp_lt_f16: + op = aco_opcode::v_cmp_gt_f16; + break; + case aco_opcode::v_cmp_ge_f16: + op = aco_opcode::v_cmp_le_f16; + break; + case aco_opcode::v_cmp_lt_i16: + op = aco_opcode::v_cmp_gt_i16; + break; + case aco_opcode::v_cmp_ge_i16: + op = aco_opcode::v_cmp_le_i16; + break; + case aco_opcode::v_cmp_lt_u16: + op = aco_opcode::v_cmp_gt_u16; + break; + case aco_opcode::v_cmp_ge_u16: + op = aco_opcode::v_cmp_le_u16; + break; case aco_opcode::v_cmp_lt_f32: op = aco_opcode::v_cmp_gt_f32; break; @@ -695,10 +705,10 @@ void emit_sopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode o } void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst, - aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes) + aco_opcode v16_op, aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes) { - aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : s32_op; - aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : v32_op; + aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : instr->src[0].src.ssa->bit_size == 32 ? s32_op : aco_opcode::num_opcodes; + aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : instr->src[0].src.ssa->bit_size == 32 ? v32_op : v16_op; bool divergent_vals = ctx->divergent_vals[instr->dest.dest.ssa.index]; bool use_valu = s_op == aco_opcode::num_opcodes || divergent_vals || @@ -738,12 +748,18 @@ void emit_bcsel(isel_context *ctx, nir_alu_instr *instr, Temp dst) if (dst.type() == RegType::vgpr) { aco_ptr bcsel; - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + then = as_vgpr(ctx, then); + els = as_vgpr(ctx, els); + + Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), els, then, cond); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { then = as_vgpr(ctx, then); els = as_vgpr(ctx, els); bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), els, then, cond); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { Temp then_lo = bld.tmp(v1), then_hi = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(then_lo), Definition(then_hi), then); Temp else_lo = bld.tmp(v1), else_hi = bld.tmp(v1); @@ -926,6 +942,58 @@ Temp emit_floor_f64(isel_context *ctx, Builder& bld, Definition dst, Temp val) return add->definitions[0].getTemp(); } +Temp convert_int(Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits, bool is_signed, Temp dst=Temp()) { + if (!dst.id()) { + if (dst_bits % 32 == 0 || src.type() == RegType::sgpr) + dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u)); + else + dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword()); + } + + if (dst.bytes() == src.bytes() && dst_bits < src_bits) + return bld.copy(Definition(dst), src); + else if (dst.bytes() < src.bytes()) + return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u)); + + Temp tmp = dst; + if (dst_bits == 64) + tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1); + + if (tmp == src) { + } else if (src.regClass() == s1) { + if (is_signed) + bld.sop1(src_bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16, Definition(tmp), src); + else + bld.sop2(aco_opcode::s_and_b32, Definition(tmp), bld.def(s1, scc), Operand(src_bits == 8 ? 0xFFu : 0xFFFFu), src); + } else { + assert(src_bits != 8 || src.regClass() == v1b); + assert(src_bits != 16 || src.regClass() == v2b); + aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; + sdwa->operands[0] = Operand(src); + sdwa->definitions[0] = Definition(tmp); + if (is_signed) + sdwa->sel[0] = src_bits == 8 ? sdwa_sbyte : sdwa_sword; + else + sdwa->sel[0] = src_bits == 8 ? sdwa_ubyte : sdwa_uword; + sdwa->dst_sel = tmp.bytes() == 2 ? sdwa_uword : sdwa_udword; + bld.insert(std::move(sdwa)); + } + + if (dst_bits == 64) { + if (is_signed && dst.regClass() == s2) { + Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand(31u)); + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high); + } else if (is_signed && dst.regClass() == v2) { + Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), tmp); + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high); + } else { + bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u)); + } + } + + return dst; +} + void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) { if (!instr->dest.dest.is_ssa) { @@ -1062,9 +1130,8 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_isign: { Temp src = get_alu_src(ctx, instr->src[0]); if (dst.regClass() == s1) { - Temp tmp = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u)); - Temp gtz = bld.sopc(aco_opcode::s_cmp_gt_i32, bld.def(s1, scc), src, Operand(0u)); - bld.sop2(aco_opcode::s_add_i32, Definition(dst), bld.def(s1, scc), gtz, tmp); + Temp tmp = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand((uint32_t)-1)); + bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand(1u)); } else if (dst.regClass() == s2) { Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u)); Temp neqz; @@ -1075,9 +1142,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) /* SCC gets zero-extended to 64 bit */ bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz)); } else if (dst.regClass() == v1) { - Temp tmp = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src); - Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); - bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(1u), tmp, gtz); + bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand((uint32_t)-1), src, Operand(1u)); } else if (dst.regClass() == v2) { Temp upper = emit_extract_vector(ctx, src, 1, v1); Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper); @@ -1526,11 +1591,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmul: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1])); + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, tmp, true); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true); - } else if (dst.size() == 2) { - bld.vop3(aco_opcode::v_mul_f64, Definition(dst), get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_mul_f64, Definition(dst), src0, src1); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1539,11 +1609,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fadd: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1])); + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, tmp, true); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true); - } else if (dst.size() == 2) { - bld.vop3(aco_opcode::v_add_f64, Definition(dst), get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, src1); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1554,15 +1629,21 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) case nir_op_fsub: { Temp src0 = get_alu_src(ctx, instr->src[0]); Temp src1 = get_alu_src(ctx, instr->src[1]); - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr) + emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, tmp, false); + else + emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, tmp, true); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr) emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false); else emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), - get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + as_vgpr(ctx, src0), as_vgpr(ctx, src1)); VOP3A_instruction* sub = static_cast(add); sub->neg[1] = true; } else { @@ -1573,18 +1654,21 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmax: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1])); + if (dst.regClass() == v2b) { + // TODO: check fp_mode.must_flush_denorms16_64 + Temp tmp = bld.tmp(v1); + emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, tmp, true); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) { - Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2), - get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2), src0, src1); bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp); } else { - bld.vop3(aco_opcode::v_max_f64, Definition(dst), - get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + bld.vop3(aco_opcode::v_max_f64, Definition(dst), src0, src1); } } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1594,18 +1678,21 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmin: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1])); + if (dst.regClass() == v2b) { + // TODO: check fp_mode.must_flush_denorms16_64 + Temp tmp = bld.tmp(v1); + emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, tmp, true); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) { - Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), - get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), src0, src1); bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp); } else { - bld.vop3(aco_opcode::v_min_f64, Definition(dst), - get_alu_src(ctx, instr->src[0]), - as_vgpr(ctx, get_alu_src(ctx, instr->src[1]))); + bld.vop3(aco_opcode::v_min_f64, Definition(dst), src0, src1); } } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1615,7 +1702,11 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmax3: { - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f16, tmp, false); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f32, dst, ctx->block->fp_mode.must_flush_denorms32); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1625,7 +1716,11 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmin3: { - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f16, tmp, false); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f32, dst, ctx->block->fp_mode.must_flush_denorms32); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1635,7 +1730,11 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fmed3: { - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f16, tmp, false); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f32, dst, ctx->block->fp_mode.must_flush_denorms32); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1731,9 +1830,13 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_frsq: { - if (dst.size() == 1) { - emit_rsq(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); - } else if (dst.size() == 2) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_rsq_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + emit_rsq(ctx, bld, Definition(dst), src); + } else if (dst.regClass() == v2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1744,11 +1847,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fneg: { Temp src = get_alu_src(ctx, instr->src[0]); - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x8000u), as_vgpr(ctx, src)); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { if (ctx->block->fp_mode.must_flush_denorms32) src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src)); bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src)); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->block->fp_mode.must_flush_denorms16_64) src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src)); Temp upper = bld.tmp(v1), lower = bld.tmp(v1); @@ -1764,11 +1870,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fabs: { Temp src = get_alu_src(ctx, instr->src[0]); - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFu), as_vgpr(ctx, src)); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { if (ctx->block->fp_mode.must_flush_denorms32) src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src)); bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src)); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->block->fp_mode.must_flush_denorms16_64) src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src)); Temp upper = bld.tmp(v1), lower = bld.tmp(v1); @@ -1784,11 +1893,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fsat: { Temp src = get_alu_src(ctx, instr->src[0]); - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp tmp = bld.vop3(aco_opcode::v_med3_f16, bld.def(v1), Operand(0u), Operand(0x3f800000u), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand(0u), Operand(0x3f800000u), src); /* apparently, it is not necessary to flush denorms if this instruction is used with these operands */ // TODO: confirm that this holds under any circumstances - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand(0u)); VOP3A_instruction* vop3 = static_cast(add); vop3->clamp = true; @@ -1800,8 +1912,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_flog2: { - if (dst.size() == 1) { - emit_log2(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_log_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + emit_log2(ctx, bld, Definition(dst), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1810,9 +1926,13 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_frcp: { - if (dst.size() == 1) { - emit_rcp(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); - } else if (dst.size() == 2) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_rcp_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + emit_rcp(ctx, bld, Definition(dst), src); + } else if (dst.regClass() == v2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1822,7 +1942,11 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fexp2: { - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp src = get_alu_src(ctx, instr->src[0]); + Temp tmp = bld.vop1(aco_opcode::v_exp_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1832,9 +1956,13 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fsqrt: { - if (dst.size() == 1) { - emit_sqrt(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); - } else if (dst.size() == 2) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_sqrt_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + emit_sqrt(ctx, bld, Definition(dst), src); + } else if (dst.regClass() == v2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1844,9 +1972,13 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_ffract: { - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp src = get_alu_src(ctx, instr->src[0]); + Temp tmp = bld.vop1(aco_opcode::v_fract_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); @@ -1856,10 +1988,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_ffloor: { - if (dst.size() == 1) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_floor_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst); - } else if (dst.size() == 2) { - emit_floor_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); + } else if (dst.regClass() == v2) { + emit_floor_f64(ctx, bld, Definition(dst), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1868,15 +2004,17 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fceil: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_ceil_f16, bld.def(v1), src0); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->options->chip_class >= GFX7) { emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst); } else { /* GFX6 doesn't support V_CEIL_F64, lower it. */ - Temp src0 = get_alu_src(ctx, instr->src[0]); - /* trunc = trunc(src0) * if (src0 > 0.0 && src0 != trunc) * trunc += 1.0 @@ -1897,10 +2035,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_ftrunc: { - if (dst.size() == 1) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_trunc_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst); - } else if (dst.size() == 2) { - emit_trunc_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0])); + } else if (dst.regClass() == v2) { + emit_trunc_f64(ctx, bld, Definition(dst), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1909,15 +2051,17 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_fround_even: { - if (dst.size() == 1) { + Temp src0 = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_rndne_f16, bld.def(v1), src0); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { if (ctx->options->chip_class >= GFX7) { emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst); } else { /* GFX6 doesn't support V_RNDNE_F64, lower it. */ - Temp src0 = get_alu_src(ctx, instr->src[0]); - Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1); bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0); @@ -1949,11 +2093,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fsin: case nir_op_fcos: { - Temp src = get_alu_src(ctx, instr->src[0]); + Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0])); aco_ptr norm; - if (dst.size() == 1) { - Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u)); - Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, as_vgpr(ctx, src)); + Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u)); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop2(aco_opcode::v_mul_f16, bld.def(v1), half_pi, src); + aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16; + tmp = bld.vop1(opcode, bld.def(v1), tmp); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src); /* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */ if (ctx->options->chip_class < GFX9) @@ -1969,14 +2118,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_ldexp: { - if (dst.size() == 1) { - bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst), - as_vgpr(ctx, get_alu_src(ctx, instr->src[0])), - get_alu_src(ctx, instr->src[1])); - } else if (dst.size() == 2) { - bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst), - as_vgpr(ctx, get_alu_src(ctx, instr->src[0])), - get_alu_src(ctx, instr->src[1])); + Temp src0 = get_alu_src(ctx, instr->src[0]); + Temp src1 = get_alu_src(ctx, instr->src[1]); + if (dst.regClass() == v2b) { + Temp tmp = bld.tmp(v1); + emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, tmp, false); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst), as_vgpr(ctx, src0), src1); + } else if (dst.regClass() == v2) { + bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst), as_vgpr(ctx, src0), src1); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1985,12 +2136,14 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_frexp_sig: { - if (dst.size() == 1) { - bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst), - get_alu_src(ctx, instr->src[0])); - } else if (dst.size() == 2) { - bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst), - get_alu_src(ctx, instr->src[0])); + Temp src = get_alu_src(ctx, instr->src[0]); + if (dst.regClass() == v2b) { + Temp tmp = bld.vop1(aco_opcode::v_frexp_mant_f16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { + bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst), src); + } else if (dst.regClass() == v2) { + bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -1999,12 +2152,15 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_frexp_exp: { - if (instr->src[0].src.ssa->bit_size == 32) { - bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst), - get_alu_src(ctx, instr->src[0])); + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size == 16) { + Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src); + tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand(0u)); + convert_int(bld, tmp, 8, 32, true, dst); + } else if (instr->src[0].src.ssa->bit_size == 32) { + bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst), src); } else if (instr->src[0].src.ssa->bit_size == 64) { - bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst), - get_alu_src(ctx, instr->src[0])); + bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst), src); } else { fprintf(stderr, "Unimplemented NIR instr bit size: "); nir_print_instr(&instr->instr, stderr); @@ -2014,12 +2170,20 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_fsign: { Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0])); - if (dst.size() == 1) { + if (dst.regClass() == v2b) { + Temp one = bld.copy(bld.def(v1), Operand(0x3c00u)); + Temp minus_one = bld.copy(bld.def(v1), Operand(0xbc00u)); + Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); + src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), one, src, cond); + cond = bld.vopc(aco_opcode::v_cmp_le_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); + Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), minus_one, src, cond); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else if (dst.regClass() == v1) { Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond); cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond); - } else if (dst.size() == 2) { + } else if (dst.regClass() == v2) { Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src); Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u)); Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond); @@ -2072,14 +2236,29 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src); break; } + case nir_op_i2f16: { + assert(dst.regClass() == v2b); + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size == 8) + src = convert_int(bld, src, 8, 16, true); + Temp tmp = bld.vop1(aco_opcode::v_cvt_f16_i16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + break; + } case nir_op_i2f32: { assert(dst.size() == 1); - emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_i32, dst); + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size <= 16) + src = convert_int(bld, src, instr->src[0].src.ssa->bit_size, 32, true); + bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src); break; } case nir_op_i2f64: { - if (instr->src[0].src.ssa->bit_size == 32) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_i32, dst); + if (instr->src[0].src.ssa->bit_size <= 32) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size <= 16) + src = convert_int(bld, src, instr->src[0].src.ssa->bit_size, 32, true); + bld.vop1(aco_opcode::v_cvt_f64_i32, Definition(dst), src); } else if (instr->src[0].src.ssa->bit_size == 64) { Temp src = get_alu_src(ctx, instr->src[0]); RegClass rc = RegClass(src.type(), 1); @@ -2097,14 +2276,34 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } break; } + case nir_op_u2f16: { + assert(dst.regClass() == v2b); + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size == 8) + src = convert_int(bld, src, 8, 16, false); + Temp tmp = bld.vop1(aco_opcode::v_cvt_f16_u16, bld.def(v1), src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + break; + } case nir_op_u2f32: { assert(dst.size() == 1); - emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_u32, dst); + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size == 8) { + //TODO: we should use v_cvt_f32_ubyte1/v_cvt_f32_ubyte2/etc depending on the register assignment + bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src); + } else { + if (instr->src[0].src.ssa->bit_size == 16) + src = convert_int(bld, src, instr->src[0].src.ssa->bit_size, 32, true); + bld.vop1(aco_opcode::v_cvt_f32_u32, Definition(dst), src); + } break; } case nir_op_u2f64: { - if (instr->src[0].src.ssa->bit_size == 32) { - emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_u32, dst); + if (instr->src[0].src.ssa->bit_size <= 32) { + Temp src = get_alu_src(ctx, instr->src[0]); + if (instr->src[0].src.ssa->bit_size <= 16) + src = convert_int(bld, src, instr->src[0].src.ssa->bit_size, 32, false); + bld.vop1(aco_opcode::v_cvt_f64_u32, Definition(dst), src); } else if (instr->src[0].src.ssa->bit_size == 64) { Temp src = get_alu_src(ctx, instr->src[0]); RegClass rc = RegClass(src.type(), 1); @@ -2121,6 +2320,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } break; } + case nir_op_f2i8: case nir_op_f2i16: { Temp src = get_alu_src(ctx, instr->src[0]); if (instr->src[0].src.ssa->bit_size == 16) @@ -2131,11 +2331,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) src = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), src); if (dst.type() == RegType::vgpr) - bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src); + bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u)); else bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src); break; } + case nir_op_f2u8: case nir_op_f2u16: { Temp src = get_alu_src(ctx, instr->src[0]); if (instr->src[0].src.ssa->bit_size == 16) @@ -2146,14 +2347,22 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) src = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), src); if (dst.type() == RegType::vgpr) - bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), src); + bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u)); else bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src); break; } case nir_op_f2i32: { Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32) { + if (instr->src[0].src.ssa->bit_size == 16) { + Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src); + if (dst.type() == RegType::vgpr) { + bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), tmp); + } else { + bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), + bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp)); + } + } else if (instr->src[0].src.ssa->bit_size == 32) { if (dst.type() == RegType::vgpr) bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), src); else @@ -2176,7 +2385,15 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_f2u32: { Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32) { + if (instr->src[0].src.ssa->bit_size == 16) { + Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src); + if (dst.type() == RegType::vgpr) { + bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), tmp); + } else { + bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), + bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp)); + } + } else if (instr->src[0].src.ssa->bit_size == 32) { if (dst.type() == RegType::vgpr) bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), src); else @@ -2199,7 +2416,10 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_f2i64: { Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) { + if (instr->src[0].src.ssa->bit_size == 16) + src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src); + + if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) { Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src); exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand(0x0u), exponent, Operand(64u)); Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src); @@ -2225,13 +2445,13 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper); - } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) { + } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) { if (src.type() == RegType::vgpr) src = bld.as_uniform(src); Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u)); - exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u)); - exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent); - exponent = bld.sop2(aco_opcode::s_min_u32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent); + exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u)); + exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent); + exponent = bld.sop2(aco_opcode::s_min_i32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent); Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src); Temp sign = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u)); mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa); @@ -2275,7 +2495,10 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } case nir_op_f2u64: { Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) { + if (instr->src[0].src.ssa->bit_size == 16) + src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src); + + if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) { Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src); Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent); exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent); @@ -2298,12 +2521,12 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), upper, exponent_in_range); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper); - } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) { + } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) { if (src.type() == RegType::vgpr) src = bld.as_uniform(src); Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u)); - exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u)); - exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent); + exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u)); + exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent); Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src); mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa); Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(24u), exponent); @@ -2342,6 +2565,22 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) } break; } + case nir_op_b2f16: { + Temp src = get_alu_src(ctx, instr->src[0]); + assert(src.regClass() == bld.lm); + + if (dst.regClass() == s1) { + src = bool_to_scalar_condition(ctx, src); + bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand(0x3c00u), src); + } else if (dst.regClass() == v2b) { + Temp one = bld.copy(bld.def(v1), Operand(0x3c00u)); + Temp tmp = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), one, src); + bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp); + } else { + unreachable("Wrong destination register class for nir_op_b2f16."); + } + break; + } case nir_op_b2f32: { Temp src = get_alu_src(ctx, instr->src[0]); assert(src.regClass() == bld.lm); @@ -2373,159 +2612,19 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_i2i8: - case nir_op_u2u8: { - Temp src = get_alu_src(ctx, instr->src[0]); - /* we can actually just say dst = src */ - if (src.regClass() == s1) - bld.copy(Definition(dst), src); - else - emit_extract_vector(ctx, src, 0, dst); - break; - } - case nir_op_i2i16: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 8) { - if (dst.regClass() == s1) { - bld.sop1(aco_opcode::s_sext_i32_i8, Definition(dst), Operand(src)); - } else { - assert(src.regClass() == v1b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_sbyte; - sdwa->dst_sel = sdwa_sword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else { - Temp src = get_alu_src(ctx, instr->src[0]); - /* we can actually just say dst = src */ - if (src.regClass() == s1) - bld.copy(Definition(dst), src); - else - emit_extract_vector(ctx, src, 0, dst); - } - break; - } - case nir_op_u2u16: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 8) { - if (dst.regClass() == s1) - bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFu), src); - else { - assert(src.regClass() == v1b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_ubyte; - sdwa->dst_sel = sdwa_uword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else { - Temp src = get_alu_src(ctx, instr->src[0]); - /* we can actually just say dst = src */ - if (src.regClass() == s1) - bld.copy(Definition(dst), src); - else - emit_extract_vector(ctx, src, 0, dst); - } - break; - } - case nir_op_i2i32: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 8) { - if (dst.regClass() == s1) { - bld.sop1(aco_opcode::s_sext_i32_i8, Definition(dst), Operand(src)); - } else { - assert(src.regClass() == v1b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_sbyte; - sdwa->dst_sel = sdwa_sdword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else if (instr->src[0].src.ssa->bit_size == 16) { - if (dst.regClass() == s1) { - bld.sop1(aco_opcode::s_sext_i32_i16, Definition(dst), Operand(src)); - } else { - assert(src.regClass() == v2b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_sword; - sdwa->dst_sel = sdwa_udword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else if (instr->src[0].src.ssa->bit_size == 64) { - /* we can actually just say dst = src, as it would map the lower register */ - emit_extract_vector(ctx, src, 0, dst); - } else { - fprintf(stderr, "Unimplemented NIR instr bit size: "); - nir_print_instr(&instr->instr, stderr); - fprintf(stderr, "\n"); - } - break; - } - case nir_op_u2u32: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 8) { - if (dst.regClass() == s1) - bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFu), src); - else { - assert(src.regClass() == v1b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_ubyte; - sdwa->dst_sel = sdwa_udword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else if (instr->src[0].src.ssa->bit_size == 16) { - if (dst.regClass() == s1) { - bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFFFu), src); - } else { - assert(src.regClass() == v2b); - aco_ptr sdwa{create_instruction(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)}; - sdwa->operands[0] = Operand(src); - sdwa->definitions[0] = Definition(dst); - sdwa->sel[0] = sdwa_uword; - sdwa->dst_sel = sdwa_udword; - ctx->block->instructions.emplace_back(std::move(sdwa)); - } - } else if (instr->src[0].src.ssa->bit_size == 64) { - /* we can actually just say dst = src, as it would map the lower register */ - emit_extract_vector(ctx, src, 0, dst); - } else { - fprintf(stderr, "Unimplemented NIR instr bit size: "); - nir_print_instr(&instr->instr, stderr); - fprintf(stderr, "\n"); - } - break; - } + case nir_op_i2i16: + case nir_op_i2i32: case nir_op_i2i64: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (src.regClass() == s1) { - Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u)); - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high); - } else if (src.regClass() == v1) { - Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src); - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high); - } else { - fprintf(stderr, "Unimplemented NIR instr bit size: "); - nir_print_instr(&instr->instr, stderr); - fprintf(stderr, "\n"); - } + convert_int(bld, get_alu_src(ctx, instr->src[0]), + instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, true, dst); break; } + case nir_op_u2u8: + case nir_op_u2u16: + case nir_op_u2u32: case nir_op_u2u64: { - Temp src = get_alu_src(ctx, instr->src[0]); - if (instr->src[0].src.ssa->bit_size == 32) { - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, Operand(0u)); - } else { - fprintf(stderr, "Unimplemented NIR instr bit size: "); - nir_print_instr(&instr->instr, stderr); - fprintf(stderr, "\n"); - } + convert_int(bld, get_alu_src(ctx, instr->src[0]), + instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, false, dst); break; } case nir_op_b2b32: @@ -2590,13 +2689,15 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) if (dst.type() == RegType::vgpr) { bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0])); } else { - bld.sop2(aco_opcode::s_bfe_u32, Definition(dst), get_alu_src(ctx, instr->src[0]), Operand(uint32_t(16 << 16 | 16))); + bld.sop2(aco_opcode::s_bfe_u32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]), Operand(uint32_t(16 << 16 | 16))); } break; case nir_op_pack_32_2x16_split: { Temp src0 = get_alu_src(ctx, instr->src[0]); Temp src1 = get_alu_src(ctx, instr->src[1]); if (dst.regClass() == v1) { + src0 = emit_extract_vector(ctx, src0, 0, v2b); + src1 = emit_extract_vector(ctx, src1, 0, v2b); bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1); } else { src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0, Operand(0xFFFFu)); @@ -2813,34 +2914,34 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) break; } case nir_op_flt: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f16, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64); break; } case nir_op_fge: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f16, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64); break; } case nir_op_feq: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64); break; } case nir_op_fne: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64); break; } case nir_op_ilt: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i16, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32); break; } case nir_op_ige: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i16, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32); break; } case nir_op_ieq: { if (instr->src[0].src.ssa->bit_size == 1) emit_boolean_logic(ctx, instr, Builder::s_xnor, dst); else - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32, + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i16, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32, ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes); break; } @@ -2848,16 +2949,16 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr) if (instr->src[0].src.ssa->bit_size == 1) emit_boolean_logic(ctx, instr, Builder::s_xor, dst); else - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32, + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i16, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32, ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes); break; } case nir_op_ult: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u16, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32); break; } case nir_op_uge: { - emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32); + emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u16, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32); break; } case nir_op_fddx: @@ -2918,6 +3019,12 @@ void visit_load_const(isel_context *ctx, nir_load_const_instr *instr) int val = instr->value[0].b ? -1 : 0; Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val); bld.sop1(Builder::s_mov, Definition(dst), op); + } else if (instr->def.bit_size == 8) { + /* ensure that the value is correctly represented in the low byte of the register */ + bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u8); + } else if (instr->def.bit_size == 16) { + /* ensure that the value is correctly represented in the low half of the register */ + bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u16); } else if (dst.size() == 1) { bld.copy(Definition(dst), Operand(instr->value[0].u32)); } else { @@ -2944,117 +3051,557 @@ uint32_t widen_mask(uint32_t mask, unsigned multiplier) return new_mask; } -Operand load_lds_size_m0(isel_context *ctx) +void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst) { - /* TODO: m0 does not need to be initialized on GFX9+ */ Builder bld(ctx->program, ctx->block); - return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff)); -} + if (offset.isTemp()) { + Temp tmp[3] = {vec, vec, vec}; -Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst, - Temp address, unsigned base_offset, unsigned align) + if (vec.size() == 3) { + tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1); + bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec); + } else if (vec.size() == 2) { + tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1]; + bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec); + } + for (unsigned i = 0; i < dst.size(); i++) + tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset); + + vec = tmp[0]; + if (dst.size() == 2) + vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]); + + offset = Operand(0u); + } + + if (vec.bytes() == dst.bytes() && offset.constantValue() == 0) + bld.copy(Definition(dst), vec); + else + trim_subdword_vector(ctx, vec, dst, vec.bytes(), ((1 << dst.bytes()) - 1) << offset.constantValue()); +} + +struct LoadEmitInfo { + Operand offset; + Temp dst; + unsigned num_components; + unsigned component_size; + Temp resource = Temp(0, s1); + unsigned component_stride = 0; + unsigned const_offset = 0; + unsigned align_mul = 0; + unsigned align_offset = 0; + + bool glc = false; + unsigned swizzle_component_size = 0; + barrier_interaction barrier = barrier_none; + bool can_reorder = true; + Temp soffset = Temp(0, s1); +}; + +using LoadCallback = Temp(*)( + Builder& bld, const LoadEmitInfo* info, Temp offset, unsigned bytes_needed, + unsigned align, unsigned const_offset, Temp dst_hint); + +template +void emit_load(isel_context *ctx, Builder& bld, const LoadEmitInfo *info) { - assert(util_is_power_of_two_nonzero(align) && align >= 4); + unsigned load_size = info->num_components * info->component_size; + unsigned component_size = info->component_size; - Builder bld(ctx->program, ctx->block); + unsigned num_vals = 0; + Temp vals[info->dst.bytes()]; + + unsigned const_offset = info->const_offset; - Operand m = load_lds_size_m0(ctx); + unsigned align_mul = info->align_mul ? info->align_mul : component_size; + unsigned align_offset = (info->align_offset + const_offset) % align_mul; - unsigned num_components = dst.size() * 4u / elem_size_bytes; unsigned bytes_read = 0; - unsigned result_size = 0; - unsigned total_bytes = num_components * elem_size_bytes; - std::array result; - bool large_ds_read = ctx->options->chip_class >= GFX7; - bool usable_read2 = ctx->options->chip_class >= GFX7; - - while (bytes_read < total_bytes) { - unsigned todo = total_bytes - bytes_read; - bool aligned8 = bytes_read % 8 == 0 && align % 8 == 0; - bool aligned16 = bytes_read % 16 == 0 && align % 16 == 0; - - aco_opcode op = aco_opcode::last_opcode; - bool read2 = false; - if (todo >= 16 && aligned16 && large_ds_read) { - op = aco_opcode::ds_read_b128; - todo = 16; - } else if (todo >= 16 && aligned8 && usable_read2) { - op = aco_opcode::ds_read2_b64; - read2 = true; - todo = 16; - } else if (todo >= 12 && aligned16 && large_ds_read) { - op = aco_opcode::ds_read_b96; - todo = 12; - } else if (todo >= 8 && aligned8) { - op = aco_opcode::ds_read_b64; - todo = 8; - } else if (todo >= 8 && usable_read2) { - op = aco_opcode::ds_read2_b32; - read2 = true; - todo = 8; - } else if (todo >= 4) { - op = aco_opcode::ds_read_b32; - todo = 4; - } else { - assert(false); + while (bytes_read < load_size) { + unsigned bytes_needed = load_size - bytes_read; + + /* add buffer for unaligned loads */ + int byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1; + + if (byte_align) { + if ((bytes_needed > 2 || !supports_8bit_16bit_loads) && byte_align_loads) { + if (info->component_stride) { + assert(supports_8bit_16bit_loads && "unimplemented"); + bytes_needed = 2; + byte_align = 0; + } else { + bytes_needed += byte_align == -1 ? 4 - info->align_mul : byte_align; + bytes_needed = align(bytes_needed, 4); + } + } else { + byte_align = 0; + } } - assert(todo % elem_size_bytes == 0); - unsigned num_elements = todo / elem_size_bytes; - unsigned offset = base_offset + bytes_read; - unsigned max_offset = read2 ? 1019 : 65535; - Temp address_offset = address; - if (offset > max_offset) { - address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset); - offset = bytes_read; + if (info->swizzle_component_size) + bytes_needed = MIN2(bytes_needed, info->swizzle_component_size); + if (info->component_stride) + bytes_needed = MIN2(bytes_needed, info->component_size); + + bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4); + + /* reduce constant offset */ + Operand offset = info->offset; + unsigned reduced_const_offset = const_offset; + bool remove_const_offset_completely = need_to_align_offset; + if (const_offset && (remove_const_offset_completely || const_offset >= max_const_offset_plus_one)) { + unsigned to_add = const_offset; + if (remove_const_offset_completely) { + reduced_const_offset = 0; + } else { + to_add = const_offset / max_const_offset_plus_one * max_const_offset_plus_one; + reduced_const_offset %= max_const_offset_plus_one; + } + Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp(); + if (offset.isConstant()) { + offset = Operand(offset.constantValue() + to_add); + } else if (offset_tmp.regClass() == s1) { + offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), + offset_tmp, Operand(to_add)); + } else if (offset_tmp.regClass() == v1) { + offset = bld.vadd32(bld.def(v1), offset_tmp, Operand(to_add)); + } else { + Temp lo = bld.tmp(offset_tmp.type(), 1); + Temp hi = bld.tmp(offset_tmp.type(), 1); + bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp); + + if (offset_tmp.regClass() == s2) { + Temp carry = bld.tmp(s1); + lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), lo, Operand(to_add)); + hi = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), hi, carry); + offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), lo, hi); + } else { + Temp new_lo = bld.tmp(v1); + Temp carry = bld.vadd32(Definition(new_lo), lo, Operand(to_add), true).def(1).getTemp(); + hi = bld.vadd32(bld.def(v1), hi, Operand(0u), false, carry); + offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_lo, hi); + } + } } - assert(offset <= max_offset); /* bytes_read shouldn't be large enough for this to happen */ - Temp res; - if (num_components == 1 && dst.type() == RegType::vgpr) - res = dst; - else - res = bld.tmp(RegClass(RegType::vgpr, todo / 4)); + /* align offset down if needed */ + Operand aligned_offset = offset; + if (need_to_align_offset) { + Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp(); + if (offset.isConstant()) { + aligned_offset = Operand(offset.constantValue() & 0xfffffffcu); + } else if (offset_tmp.regClass() == s1) { + aligned_offset = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfffffffcu), offset_tmp); + } else if (offset_tmp.regClass() == s2) { + aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand((uint64_t)0xfffffffffffffffcllu), offset_tmp); + } else if (offset_tmp.regClass() == v1) { + aligned_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), offset_tmp); + } else if (offset_tmp.regClass() == v2) { + Temp hi = bld.tmp(v1), lo = bld.tmp(v1); + bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp); + lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), lo); + aligned_offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), lo, hi); + } + } + Temp aligned_offset_tmp = aligned_offset.isTemp() ? aligned_offset.getTemp() : + bld.copy(bld.def(s1), aligned_offset); - if (read2) - res = bld.ds(op, Definition(res), address_offset, m, offset / (todo / 2), (offset / (todo / 2)) + 1); - else - res = bld.ds(op, Definition(res), address_offset, m, offset); + unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul; + Temp val = callback(bld, info, aligned_offset_tmp, bytes_needed, align, + reduced_const_offset, byte_align ? Temp() : info->dst); - if (num_components == 1) { - assert(todo == total_bytes); - if (dst.type() == RegType::sgpr) - bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), res); - return dst; + /* shift result right if needed */ + if (byte_align) { + Operand align((uint32_t)byte_align); + if (byte_align == -1) { + if (offset.isConstant()) + align = Operand(offset.constantValue() % 4u); + else if (offset.size() == 2) + align = Operand(emit_extract_vector(ctx, offset.getTemp(), 0, RegClass(offset.getTemp().type(), 1))); + else + align = offset; + } + + if (align.isTemp() || align.constantValue()) { + assert(val.bytes() >= load_size && "unimplemented"); + Temp new_val = bld.tmp(RegClass::get(val.type(), load_size)); + if (val.type() == RegType::sgpr) + byte_align_scalar(ctx, val, align, new_val); + else + byte_align_vector(ctx, val, align, new_val); + val = new_val; + } } - if (dst.type() == RegType::sgpr) { - Temp new_res = bld.tmp(RegType::sgpr, res.size()); - expand_vector(ctx, res, new_res, res.size(), (1 << res.size()) - 1); - res = new_res; + /* add result to list and advance */ + if (info->component_stride) { + assert(val.bytes() == info->component_size && "unimplemented"); + const_offset += info->component_stride; + align_offset = (align_offset + info->component_stride) % align_mul; + } else { + const_offset += val.bytes(); + align_offset = (align_offset + val.bytes()) % align_mul; + } + bytes_read += val.bytes(); + vals[num_vals++] = val; + } + + /* the callback wrote directly to dst */ + if (vals[0] == info->dst) { + assert(num_vals == 1); + emit_split_vector(ctx, info->dst, info->num_components); + return; + } + + /* create array of components */ + unsigned components_split = 0; + std::array allocated_vec; + bool has_vgprs = false; + for (unsigned i = 0; i < num_vals;) { + Temp tmp[num_vals]; + unsigned num_tmps = 0; + unsigned tmp_size = 0; + RegType reg_type = RegType::sgpr; + while ((!tmp_size || (tmp_size % component_size)) && i < num_vals) { + if (vals[i].type() == RegType::vgpr) + reg_type = RegType::vgpr; + tmp_size += vals[i].bytes(); + tmp[num_tmps++] = vals[i++]; + } + if (num_tmps > 1) { + aco_ptr vec{create_instruction( + aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)}; + for (unsigned i = 0; i < num_vals; i++) + vec->operands[i] = Operand(tmp[i]); + tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size)); + vec->definitions[0] = Definition(tmp[0]); + bld.insert(std::move(vec)); + } + + if (tmp[0].bytes() % component_size) { + /* trim tmp[0] */ + assert(i == num_vals); + RegClass new_rc = RegClass::get(reg_type, tmp[0].bytes() / component_size * component_size); + tmp[0] = bld.pseudo(aco_opcode::p_extract_vector, bld.def(new_rc), tmp[0], Operand(0u)); } - if (num_elements == 1) { - result[result_size++] = res; + RegClass elem_rc = RegClass::get(reg_type, component_size); + + unsigned start = components_split; + + if (tmp_size == elem_rc.bytes()) { + allocated_vec[components_split++] = tmp[0]; } else { - assert(res != dst && res.size() % num_elements == 0); - aco_ptr split{create_instruction(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_elements)}; - split->operands[0] = Operand(res); - for (unsigned i = 0; i < num_elements; i++) - split->definitions[i] = Definition(result[result_size++] = bld.tmp(res.type(), elem_size_bytes / 4)); - ctx->block->instructions.emplace_back(std::move(split)); + assert(tmp_size % elem_rc.bytes() == 0); + aco_ptr split{create_instruction( + aco_opcode::p_split_vector, Format::PSEUDO, 1, tmp_size / elem_rc.bytes())}; + for (unsigned i = 0; i < split->definitions.size(); i++) { + Temp component = bld.tmp(elem_rc); + allocated_vec[components_split++] = component; + split->definitions[i] = Definition(component); + } + split->operands[0] = Operand(tmp[0]); + bld.insert(std::move(split)); } - bytes_read += todo; + /* try to p_as_uniform early so we can create more optimizable code and + * also update allocated_vec */ + for (unsigned j = start; j < components_split; j++) { + if (allocated_vec[j].bytes() % 4 == 0 && info->dst.type() == RegType::sgpr) + allocated_vec[j] = bld.as_uniform(allocated_vec[j]); + has_vgprs |= allocated_vec[j].type() == RegType::vgpr; + } } - assert(result_size == num_components && result_size > 1); - aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, result_size, 1)}; - for (unsigned i = 0; i < result_size; i++) - vec->operands[i] = Operand(result[i]); - vec->definitions[0] = Definition(dst); - ctx->block->instructions.emplace_back(std::move(vec)); - ctx->allocated_vec.emplace(dst.id(), result); + /* concatenate components and p_as_uniform() result if needed */ + if (info->dst.type() == RegType::vgpr || !has_vgprs) + ctx->allocated_vec.emplace(info->dst.id(), allocated_vec); + + int padding_bytes = MAX2((int)info->dst.bytes() - int(allocated_vec[0].bytes() * info->num_components), 0); + + aco_ptr vec{create_instruction( + aco_opcode::p_create_vector, Format::PSEUDO, info->num_components + !!padding_bytes, 1)}; + for (unsigned i = 0; i < info->num_components; i++) + vec->operands[i] = Operand(allocated_vec[i]); + if (padding_bytes) + vec->operands[info->num_components] = Operand(RegClass::get(RegType::vgpr, padding_bytes)); + if (info->dst.type() == RegType::sgpr && has_vgprs) { + Temp tmp = bld.tmp(RegType::vgpr, info->dst.size()); + vec->definitions[0] = Definition(tmp); + bld.insert(std::move(vec)); + bld.pseudo(aco_opcode::p_as_uniform, Definition(info->dst), tmp); + } else { + vec->definitions[0] = Definition(info->dst); + bld.insert(std::move(vec)); + } +} + +Operand load_lds_size_m0(Builder& bld) +{ + /* TODO: m0 does not need to be initialized on GFX9+ */ + return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff)); +} + +Temp lds_load_callback(Builder& bld, const LoadEmitInfo *info, + Temp offset, unsigned bytes_needed, + unsigned align, unsigned const_offset, + Temp dst_hint) +{ + offset = offset.regClass() == s1 ? bld.copy(bld.def(v1), offset) : offset; + + Operand m = load_lds_size_m0(bld); + + bool large_ds_read = bld.program->chip_class >= GFX7; + bool usable_read2 = bld.program->chip_class >= GFX7; + + bool read2 = false; + unsigned size = 0; + aco_opcode op; + //TODO: use ds_read_u8_d16_hi/ds_read_u16_d16_hi if beneficial + if (bytes_needed >= 16 && align % 16 == 0 && large_ds_read) { + size = 16; + op = aco_opcode::ds_read_b128; + } else if (bytes_needed >= 16 && align % 8 == 0 && const_offset % 8 == 0 && usable_read2) { + size = 16; + read2 = true; + op = aco_opcode::ds_read2_b64; + } else if (bytes_needed >= 12 && align % 16 == 0 && large_ds_read) { + size = 12; + op = aco_opcode::ds_read_b96; + } else if (bytes_needed >= 8 && align % 8 == 0) { + size = 8; + op = aco_opcode::ds_read_b64; + } else if (bytes_needed >= 8 && align % 4 == 0 && const_offset % 4 == 0) { + size = 8; + read2 = true; + op = aco_opcode::ds_read2_b32; + } else if (bytes_needed >= 4 && align % 4 == 0) { + size = 4; + op = aco_opcode::ds_read_b32; + } else if (bytes_needed >= 2 && align % 2 == 0) { + size = 2; + op = aco_opcode::ds_read_u16; + } else { + size = 1; + op = aco_opcode::ds_read_u8; + } + + unsigned max_offset_plus_one = read2 ? 254 * (size / 2u) + 1 : 65536; + if (const_offset >= max_offset_plus_one) { + offset = bld.vadd32(bld.def(v1), offset, Operand(const_offset / max_offset_plus_one)); + const_offset %= max_offset_plus_one; + } + + if (read2) + const_offset /= (size / 2u); + + RegClass rc = RegClass(RegType::vgpr, DIV_ROUND_UP(size, 4)); + Temp val = rc == info->dst.regClass() && dst_hint.id() ? dst_hint : bld.tmp(rc); + if (read2) + bld.ds(op, Definition(val), offset, m, const_offset, const_offset + 1); + else + bld.ds(op, Definition(val), offset, m, const_offset); + + if (size < 4) + val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, size)), val, Operand(0u)); + + return val; +} + +static auto emit_lds_load = emit_load; + +Temp smem_load_callback(Builder& bld, const LoadEmitInfo *info, + Temp offset, unsigned bytes_needed, + unsigned align, unsigned const_offset, + Temp dst_hint) +{ + unsigned size = 0; + aco_opcode op; + if (bytes_needed <= 4) { + size = 1; + op = info->resource.id() ? aco_opcode::s_buffer_load_dword : aco_opcode::s_load_dword; + } else if (bytes_needed <= 8) { + size = 2; + op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx2 : aco_opcode::s_load_dwordx2; + } else if (bytes_needed <= 16) { + size = 4; + op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx4 : aco_opcode::s_load_dwordx4; + } else if (bytes_needed <= 32) { + size = 8; + op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx8 : aco_opcode::s_load_dwordx8; + } else { + size = 16; + op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx16 : aco_opcode::s_load_dwordx16; + } + aco_ptr load{create_instruction(op, Format::SMEM, 2, 1)}; + if (info->resource.id()) { + load->operands[0] = Operand(info->resource); + load->operands[1] = Operand(offset); + } else { + load->operands[0] = Operand(offset); + load->operands[1] = Operand(0u); + } + RegClass rc(RegType::sgpr, size); + Temp val = dst_hint.id() && dst_hint.regClass() == rc ? dst_hint : bld.tmp(rc); + load->definitions[0] = Definition(val); + load->glc = info->glc; + load->dlc = info->glc && bld.program->chip_class >= GFX10; + load->barrier = info->barrier; + load->can_reorder = false; // FIXME: currently, it doesn't seem beneficial due to how our scheduler works + bld.insert(std::move(load)); + return val; +} + +static auto emit_smem_load = emit_load; + +Temp mubuf_load_callback(Builder& bld, const LoadEmitInfo *info, + Temp offset, unsigned bytes_needed, + unsigned align_, unsigned const_offset, + Temp dst_hint) +{ + Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1); + Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0); + + if (info->soffset.id()) { + if (soffset.isTemp()) + vaddr = bld.copy(bld.def(v1), soffset); + soffset = Operand(info->soffset); + } + + unsigned bytes_size = 0; + aco_opcode op; + if (bytes_needed == 1) { + bytes_size = 1; + op = aco_opcode::buffer_load_ubyte; + } else if (bytes_needed == 2) { + bytes_size = 2; + op = aco_opcode::buffer_load_ushort; + } else if (bytes_needed <= 4) { + bytes_size = 4; + op = aco_opcode::buffer_load_dword; + } else if (bytes_needed <= 8) { + bytes_size = 8; + op = aco_opcode::buffer_load_dwordx2; + } else if (bytes_needed <= 12 && bld.program->chip_class > GFX6) { + bytes_size = 12; + op = aco_opcode::buffer_load_dwordx3; + } else { + bytes_size = 16; + op = aco_opcode::buffer_load_dwordx4; + } + aco_ptr mubuf{create_instruction(op, Format::MUBUF, 3, 1)}; + mubuf->operands[0] = Operand(info->resource); + mubuf->operands[1] = vaddr; + mubuf->operands[2] = soffset; + mubuf->offen = (offset.type() == RegType::vgpr); + mubuf->glc = info->glc; + mubuf->dlc = info->glc && bld.program->chip_class >= GFX10; + mubuf->barrier = info->barrier; + mubuf->can_reorder = info->can_reorder; + mubuf->offset = const_offset; + RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4)); + Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc); + mubuf->definitions[0] = Definition(val); + bld.insert(std::move(mubuf)); + + if (bytes_size < 4) + val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, bytes_size)), val, Operand(0u)); + + return val; +} + +static auto emit_mubuf_load = emit_load; + +Temp get_gfx6_global_rsrc(Builder& bld, Temp addr) +{ + uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | + S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); + + if (addr.type() == RegType::vgpr) + return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf)); + return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf)); +} + +Temp global_load_callback(Builder& bld, const LoadEmitInfo *info, + Temp offset, unsigned bytes_needed, + unsigned align_, unsigned const_offset, + Temp dst_hint) +{ + unsigned bytes_size = 0; + bool mubuf = bld.program->chip_class == GFX6; + bool global = bld.program->chip_class >= GFX9; + aco_opcode op; + if (bytes_needed == 1) { + bytes_size = 1; + op = mubuf ? aco_opcode::buffer_load_ubyte : global ? aco_opcode::global_load_ubyte : aco_opcode::flat_load_ubyte; + } else if (bytes_needed == 2) { + bytes_size = 2; + op = mubuf ? aco_opcode::buffer_load_ushort : global ? aco_opcode::global_load_ushort : aco_opcode::flat_load_ushort; + } else if (bytes_needed <= 4) { + bytes_size = 4; + op = mubuf ? aco_opcode::buffer_load_dword : global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword; + } else if (bytes_needed <= 8) { + bytes_size = 8; + op = mubuf ? aco_opcode::buffer_load_dwordx2 : global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2; + } else if (bytes_needed <= 12 && !mubuf) { + bytes_size = 12; + op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3; + } else { + bytes_size = 16; + op = mubuf ? aco_opcode::buffer_load_dwordx4 : global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4; + } + RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4)); + Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc); + if (mubuf) { + aco_ptr mubuf{create_instruction(op, Format::MUBUF, 3, 1)}; + mubuf->operands[0] = Operand(get_gfx6_global_rsrc(bld, offset)); + mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1); + mubuf->operands[2] = Operand(0u); + mubuf->glc = info->glc; + mubuf->dlc = false; + mubuf->offset = 0; + mubuf->addr64 = offset.type() == RegType::vgpr; + mubuf->disable_wqm = false; + mubuf->barrier = info->barrier; + mubuf->definitions[0] = Definition(val); + bld.insert(std::move(mubuf)); + } else { + offset = offset.regClass() == s2 ? bld.copy(bld.def(v2), offset) : offset; + + aco_ptr flat{create_instruction(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)}; + flat->operands[0] = Operand(offset); + flat->operands[1] = Operand(s1); + flat->glc = info->glc; + flat->dlc = info->glc && bld.program->chip_class >= GFX10; + flat->barrier = info->barrier; + flat->offset = 0u; + flat->definitions[0] = Definition(val); + bld.insert(std::move(flat)); + } + + if (bytes_size < 4) + val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, bytes_size)), val, Operand(0u)); + + return val; +} + +static auto emit_global_load = emit_load; + +Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst, + Temp address, unsigned base_offset, unsigned align) +{ + assert(util_is_power_of_two_nonzero(align)); + + Builder bld(ctx->program, ctx->block); + + unsigned num_components = dst.bytes() / elem_size_bytes; + LoadEmitInfo info = {Operand(as_vgpr(ctx, address)), dst, num_components, elem_size_bytes}; + info.align_mul = align; + info.align_offset = 0; + info.barrier = barrier_shared; + info.can_reorder = false; + info.const_offset = base_offset; + emit_lds_load(ctx, bld, &info); return dst; } @@ -3074,129 +3621,229 @@ Temp extract_subvector(isel_context *ctx, Temp data, unsigned start, unsigned si start /= size_hint; size /= size_hint; - Temp elems[size]; - for (unsigned i = 0; i < size; i++) - elems[i] = emit_extract_vector(ctx, data, start + i, RegClass(type, size_hint)); + Temp elems[size]; + for (unsigned i = 0; i < size; i++) + elems[i] = emit_extract_vector(ctx, data, start + i, RegClass(type, size_hint)); + + if (size == 1) + return type == RegType::vgpr ? as_vgpr(ctx, elems[0]) : elems[0]; + + aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)}; + for (unsigned i = 0; i < size; i++) + vec->operands[i] = Operand(elems[i]); + Temp res = {ctx->program->allocateId(), RegClass(type, size * size_hint)}; + vec->definitions[0] = Definition(res); + ctx->block->instructions.emplace_back(std::move(vec)); + return res; +} + +void split_store_data(isel_context *ctx, RegType dst_type, unsigned count, Temp *dst, unsigned *offsets, Temp src) +{ + if (!count) + return; + + Builder bld(ctx->program, ctx->block); + + ASSERTED bool is_subdword = false; + for (unsigned i = 0; i < count; i++) + is_subdword |= offsets[i] % 4; + is_subdword |= (src.bytes() - offsets[count - 1]) % 4; + assert(!is_subdword || dst_type == RegType::vgpr); + + /* count == 1 fast path */ + if (count == 1) { + if (dst_type == RegType::sgpr) + dst[0] = bld.as_uniform(src); + else + dst[0] = as_vgpr(ctx, src); + return; + } + + for (unsigned i = 0; i < count - 1; i++) + dst[i] = bld.tmp(RegClass::get(dst_type, offsets[i + 1] - offsets[i])); + dst[count - 1] = bld.tmp(RegClass::get(dst_type, src.bytes() - offsets[count - 1])); + + if (is_subdword && src.type() == RegType::sgpr) { + src = as_vgpr(ctx, src); + } else { + /* use allocated_vec if possible */ + auto it = ctx->allocated_vec.find(src.id()); + if (it != ctx->allocated_vec.end()) { + unsigned total_size = 0; + for (unsigned i = 0; it->second[i].bytes() && (i < NIR_MAX_VEC_COMPONENTS); i++) + total_size += it->second[i].bytes(); + if (total_size != src.bytes()) + goto split; + + unsigned elem_size = it->second[0].bytes(); + + for (unsigned i = 0; i < count; i++) { + if (offsets[i] % elem_size || dst[i].bytes() % elem_size) + goto split; + } + + for (unsigned i = 0; i < count; i++) { + unsigned start_idx = offsets[i] / elem_size; + unsigned op_count = dst[i].bytes() / elem_size; + if (op_count == 1) { + if (dst_type == RegType::sgpr) + dst[i] = bld.as_uniform(it->second[start_idx]); + else + dst[i] = as_vgpr(ctx, it->second[start_idx]); + continue; + } + + aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, op_count, 1)}; + for (unsigned j = 0; j < op_count; j++) { + Temp tmp = it->second[start_idx + j]; + if (dst_type == RegType::sgpr) + tmp = bld.as_uniform(tmp); + vec->operands[j] = Operand(tmp); + } + vec->definitions[0] = Definition(dst[i]); + bld.insert(std::move(vec)); + } + return; + } + } + + if (dst_type == RegType::sgpr) + src = bld.as_uniform(src); + + split: + /* just split it */ + aco_ptr split{create_instruction(aco_opcode::p_split_vector, Format::PSEUDO, 1, count)}; + split->operands[0] = Operand(src); + for (unsigned i = 0; i < count; i++) + split->definitions[i] = Definition(dst[i]); + bld.insert(std::move(split)); +} + +bool scan_write_mask(uint32_t mask, uint32_t todo_mask, + int *start, int *count) +{ + unsigned start_elem = ffs(todo_mask) - 1; + bool skip = !(mask & (1 << start_elem)); + if (skip) + mask = ~mask & todo_mask; + + mask &= todo_mask; + + u_bit_scan_consecutive_range(&mask, start, count); - if (size == 1) - return type == RegType::vgpr ? as_vgpr(ctx, elems[0]) : elems[0]; + return !skip; +} - aco_ptr vec{create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)}; - for (unsigned i = 0; i < size; i++) - vec->operands[i] = Operand(elems[i]); - Temp res = {ctx->program->allocateId(), RegClass(type, size * size_hint)}; - vec->definitions[0] = Definition(res); - ctx->block->instructions.emplace_back(std::move(vec)); - return res; +void advance_write_mask(uint32_t *todo_mask, int start, int count) +{ + *todo_mask &= ~u_bit_consecutive(0, count) << start; } -void ds_write_helper(isel_context *ctx, Operand m, Temp address, Temp data, unsigned data_start, unsigned total_size, unsigned offset0, unsigned offset1, unsigned align) +void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask, + Temp address, unsigned base_offset, unsigned align) { + assert(util_is_power_of_two_nonzero(align)); + assert(util_is_power_of_two_nonzero(elem_size_bytes) && elem_size_bytes <= 8); + Builder bld(ctx->program, ctx->block); - unsigned bytes_written = 0; bool large_ds_write = ctx->options->chip_class >= GFX7; bool usable_write2 = ctx->options->chip_class >= GFX7; - while (bytes_written < total_size * 4) { - unsigned todo = total_size * 4 - bytes_written; - bool aligned8 = bytes_written % 8 == 0 && align % 8 == 0; - bool aligned16 = bytes_written % 16 == 0 && align % 16 == 0; + unsigned write_count = 0; + Temp write_datas[32]; + unsigned offsets[32]; + aco_opcode opcodes[32]; + + wrmask = widen_mask(wrmask, elem_size_bytes); + + uint32_t todo = u_bit_consecutive(0, data.bytes()); + while (todo) { + int offset, bytes; + if (!scan_write_mask(wrmask, todo, &offset, &bytes)) { + offsets[write_count] = offset; + opcodes[write_count] = aco_opcode::num_opcodes; + write_count++; + advance_write_mask(&todo, offset, bytes); + continue; + } + + bool aligned2 = offset % 2 == 0 && align % 2 == 0; + bool aligned4 = offset % 4 == 0 && align % 4 == 0; + bool aligned8 = offset % 8 == 0 && align % 8 == 0; + bool aligned16 = offset % 16 == 0 && align % 16 == 0; - aco_opcode op = aco_opcode::last_opcode; - bool write2 = false; - unsigned size = 0; - if (todo >= 16 && aligned16 && large_ds_write) { + //TODO: use ds_write_b8_d16_hi/ds_write_b16_d16_hi if beneficial + aco_opcode op = aco_opcode::num_opcodes; + if (bytes >= 16 && aligned16 && large_ds_write) { op = aco_opcode::ds_write_b128; - size = 4; - } else if (todo >= 16 && aligned8 && usable_write2) { - op = aco_opcode::ds_write2_b64; - write2 = true; - size = 4; - } else if (todo >= 12 && aligned16 && large_ds_write) { + bytes = 16; + } else if (bytes >= 12 && aligned16 && large_ds_write) { op = aco_opcode::ds_write_b96; - size = 3; - } else if (todo >= 8 && aligned8) { + bytes = 12; + } else if (bytes >= 8 && aligned8) { op = aco_opcode::ds_write_b64; - size = 2; - } else if (todo >= 8 && usable_write2) { - op = aco_opcode::ds_write2_b32; - write2 = true; - size = 2; - } else if (todo >= 4) { + bytes = 8; + } else if (bytes >= 4 && aligned4) { op = aco_opcode::ds_write_b32; - size = 1; + bytes = 4; + } else if (bytes >= 2 && aligned2) { + op = aco_opcode::ds_write_b16; + bytes = 2; + } else if (bytes >= 1) { + op = aco_opcode::ds_write_b8; + bytes = 1; } else { assert(false); } - unsigned offset = offset0 + offset1 + bytes_written; - unsigned max_offset = write2 ? 1020 : 65535; - Temp address_offset = address; - if (offset > max_offset) { - address_offset = bld.vadd32(bld.def(v1), Operand(offset0), address_offset); - offset = offset1 + bytes_written; - } - assert(offset <= max_offset); /* offset1 shouldn't be large enough for this to happen */ + offsets[write_count] = offset; + opcodes[write_count] = op; + write_count++; + advance_write_mask(&todo, offset, bytes); + } - if (write2) { - Temp val0 = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size / 2, RegType::vgpr); - Temp val1 = extract_subvector(ctx, data, data_start + (bytes_written >> 2) + 1, size / 2, RegType::vgpr); - bld.ds(op, address_offset, val0, val1, m, offset / size / 2, (offset / size / 2) + 1); - } else { - Temp val = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size, RegType::vgpr); - bld.ds(op, address_offset, val, m, offset); - } + Operand m = load_lds_size_m0(bld); - bytes_written += size * 4; - } -} + split_store_data(ctx, RegType::vgpr, write_count, write_datas, offsets, data); -void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask, - Temp address, unsigned base_offset, unsigned align) -{ - assert(util_is_power_of_two_nonzero(align) && align >= 4); - assert(elem_size_bytes == 4 || elem_size_bytes == 8); + for (unsigned i = 0; i < write_count; i++) { + aco_opcode op = opcodes[i]; + if (op == aco_opcode::num_opcodes) + continue; - Operand m = load_lds_size_m0(ctx); + Temp data = write_datas[i]; - /* we need at most two stores, assuming that the writemask is at most 4 bits wide */ - assert(wrmask <= 0x0f); - int start[2], count[2]; - u_bit_scan_consecutive_range(&wrmask, &start[0], &count[0]); - u_bit_scan_consecutive_range(&wrmask, &start[1], &count[1]); - assert(wrmask == 0); + unsigned second = write_count; + if (usable_write2 && (op == aco_opcode::ds_write_b32 || op == aco_opcode::ds_write_b64)) { + for (second = i + 1; second < write_count; second++) { + if (opcodes[second] == op && (offsets[second] - offsets[i]) % data.bytes() == 0) { + op = data.bytes() == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64; + opcodes[second] = aco_opcode::num_opcodes; + break; + } + } + } - /* one combined store is sufficient */ - if (count[0] == count[1] && (align % elem_size_bytes) == 0 && (base_offset % elem_size_bytes) == 0) { - Builder bld(ctx->program, ctx->block); + bool write2 = op == aco_opcode::ds_write2_b32 || op == aco_opcode::ds_write2_b64; + unsigned write2_off = (offsets[second] - offsets[i]) / data.bytes(); + unsigned inline_offset = base_offset + offsets[i]; + unsigned max_offset = write2 ? (255 - write2_off) * data.bytes() : 65535; Temp address_offset = address; - if ((base_offset / elem_size_bytes) + start[1] > 255) { + if (inline_offset > max_offset) { address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset); - base_offset = 0; + inline_offset = offsets[i]; } + assert(inline_offset <= max_offset); /* offsets[i] shouldn't be large enough for this to happen */ - assert(count[0] == 1); - RegClass xtract_rc(RegType::vgpr, elem_size_bytes / 4); - - Temp val0 = emit_extract_vector(ctx, data, start[0], xtract_rc); - Temp val1 = emit_extract_vector(ctx, data, start[1], xtract_rc); - aco_opcode op = elem_size_bytes == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64; - base_offset = base_offset / elem_size_bytes; - bld.ds(op, address_offset, val0, val1, m, - base_offset + start[0], base_offset + start[1]); - return; - } - - for (unsigned i = 0; i < 2; i++) { - if (count[i] == 0) - continue; - - unsigned elem_size_words = elem_size_bytes / 4; - ds_write_helper(ctx, m, address, data, start[i] * elem_size_words, count[i] * elem_size_words, - base_offset, start[i] * elem_size_bytes, align); + if (write2) { + Temp second_data = write_datas[second]; + inline_offset /= data.bytes(); + bld.ds(op, address_offset, data, second_data, m, inline_offset, inline_offset + write2_off); + } else { + bld.ds(op, address_offset, data, m, inline_offset); + } } - return; } unsigned calculate_lds_alignment(isel_context *ctx, unsigned const_offset) @@ -3209,6 +3856,59 @@ unsigned calculate_lds_alignment(isel_context *ctx, unsigned const_offset) } +void split_buffer_store(isel_context *ctx, nir_intrinsic_instr *instr, bool smem, RegType dst_type, + Temp data, unsigned writemask, int swizzle_element_size, + unsigned *write_count, Temp *write_datas, unsigned *offsets) +{ + unsigned write_count_with_skips = 0; + bool skips[16]; + + /* determine how to split the data */ + unsigned todo = u_bit_consecutive(0, data.bytes()); + while (todo) { + int offset, bytes; + skips[write_count_with_skips] = !scan_write_mask(writemask, todo, &offset, &bytes); + offsets[write_count_with_skips] = offset; + if (skips[write_count_with_skips]) { + advance_write_mask(&todo, offset, bytes); + write_count_with_skips++; + continue; + } + + /* only supported sizes are 1, 2, 4, 8, 12 and 16 bytes and can't be + * larger than swizzle_element_size */ + bytes = MIN2(bytes, swizzle_element_size); + if (bytes % 4) + bytes = bytes > 4 ? bytes & ~0x3 : MIN2(bytes, 2); + + /* SMEM and GFX6 VMEM can't emit 12-byte stores */ + if ((ctx->program->chip_class == GFX6 || smem) && bytes == 12) + bytes = 8; + + /* dword or larger stores have to be dword-aligned */ + unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4; + unsigned align_offset = instr ? nir_intrinsic_align_mul(instr) : 0; + bool dword_aligned = (align_offset + offset) % 4 == 0 && align_mul % 4 == 0; + if (bytes >= 4 && !dword_aligned) + bytes = MIN2(bytes, 2); + + advance_write_mask(&todo, offset, bytes); + write_count_with_skips++; + } + + /* actually split data */ + split_store_data(ctx, dst_type, write_count_with_skips, write_datas, offsets, data); + + /* remove skips */ + for (unsigned i = 0; i < write_count_with_skips; i++) { + if (skips[i]) + continue; + write_datas[*write_count] = write_datas[i]; + offsets[*write_count] = offsets[i]; + (*write_count)++; + } +} + Temp create_vec_from_array(isel_context *ctx, Temp arr[], unsigned cnt, RegType reg_type, unsigned elem_size_bytes, unsigned split_cnt = 0u, Temp dst = Temp()) { @@ -3322,29 +4022,6 @@ void store_vmem_mubuf(isel_context *ctx, Temp src, Temp descriptor, Temp voffset } } -Temp emit_single_mubuf_load(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset, - unsigned const_offset, unsigned size_dwords, bool allow_reorder = true) -{ - assert(size_dwords != 3 || ctx->program->chip_class != GFX6); - assert(size_dwords >= 1 && size_dwords <= 4); - - Builder bld(ctx->program, ctx->block); - Temp vdata = bld.tmp(RegClass(RegType::vgpr, size_dwords)); - aco_opcode op = (aco_opcode) ((unsigned) aco_opcode::buffer_load_dword + size_dwords - 1); - const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset); - - Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1); - Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u); - Builder::Result r = bld.mubuf(op, Definition(vdata), Operand(descriptor), voffset_op, soffset_op, const_offset, - /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false, - /* disable_wqm */ false, /* glc */ true, - /* dlc*/ ctx->program->chip_class >= GFX10, /* slc */ false); - - static_cast(r.instr)->can_reorder = allow_reorder; - - return vdata; -} - void load_vmem_mubuf(isel_context *ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset, unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components, unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true) @@ -3354,35 +4031,16 @@ void load_vmem_mubuf(isel_context *ctx, Temp dst, Temp descriptor, Temp voffset, assert(!!stride != allow_combining); Builder bld(ctx->program, ctx->block); - unsigned split_cnt = num_components; - - if (elem_size_bytes == 8) { - elem_size_bytes = 4; - num_components *= 2; - } - - if (!stride) - stride = elem_size_bytes; - - unsigned load_size = 1; - if (allow_combining) { - if ((num_components % 4) == 0) - load_size = 4; - else if ((num_components % 3) == 0 && ctx->program->chip_class != GFX6) - load_size = 3; - else if ((num_components % 2) == 0) - load_size = 2; - } - unsigned num_loads = num_components / load_size; - std::array elems; - - for (unsigned i = 0; i < num_loads; ++i) { - unsigned const_offset = i * stride * load_size + base_const_offset; - elems[i] = emit_single_mubuf_load(ctx, descriptor, voffset, soffset, const_offset, load_size, allow_reorder); - } - - create_vec_from_array(ctx, elems.data(), num_loads, RegType::vgpr, load_size * 4u, split_cnt, dst); + LoadEmitInfo info = {Operand(voffset), dst, num_components, elem_size_bytes, descriptor}; + info.component_stride = allow_combining ? 0 : stride; + info.glc = true; + info.swizzle_component_size = allow_combining ? 0 : 4; + info.align_mul = MIN2(elem_size_bytes, 4); + info.align_offset = 0; + info.soffset = soffset; + info.const_offset = base_const_offset; + emit_mubuf_load(ctx, bld, &info); } std::pair offset_add_from_nir(isel_context *ctx, const std::pair &base_offset, nir_src *off_src, unsigned stride = 1u) @@ -3397,7 +4055,7 @@ std::pair offset_add_from_nir(isel_context *ctx, const std::pair /* Calculate indirect offset with stride */ if (likely(indirect_offset_arg.regClass() == v1)) - with_stride = bld.v_mul_imm(bld.def(v1), indirect_offset_arg, stride); + with_stride = bld.v_mul24_imm(bld.def(v1), indirect_offset_arg, stride); else if (indirect_offset_arg.regClass() == s1) with_stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), indirect_offset_arg); else @@ -3449,7 +4107,7 @@ std::pair offset_mul(isel_context *ctx, const std::pair get_tcs_per_patch_output_vmem_offset(isel_context *ctx offs.second += const_base_offset * attr_stride; Temp rel_patch_id = get_tess_rel_patch_id(ctx); - Temp patch_off = bld.v_mul_imm(bld.def(v1), rel_patch_id, 16u); + Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, 16u); offs = offset_add(ctx, offs, std::make_pair(patch_off, per_patch_data_offset)); return offs; @@ -3591,6 +4249,9 @@ std::pair get_tcs_per_patch_output_vmem_offset(isel_context *ctx bool tcs_driver_location_matches_api_mask(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex, uint64_t mask, bool *indirect) { + if (mask == 0) + return false; + unsigned off = nir_intrinsic_base(instr) * 4u; nir_src *off_src = nir_get_io_offset_src(instr); @@ -3658,11 +4319,8 @@ bool load_input_from_temps(isel_context *ctx, nir_intrinsic_instr *instr, Temp d unsigned idx = nir_intrinsic_base(instr) + nir_intrinsic_component(instr) + 4 * nir_src_as_uint(*off_src); Temp *src = &ctx->inputs.temps[idx]; - Temp vec = create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u); - assert(vec.size() == dst.size()); + create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u, 0, dst); - Builder bld(ctx->program, ctx->block); - bld.copy(Definition(dst), vec); return true; } @@ -3670,11 +4328,6 @@ void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr) { Builder bld(ctx->program, ctx->block); - std::pair offs = get_intrinsic_io_basic_offset(ctx, instr, 4u); - Temp src = get_ssa_temp(ctx, instr->src[0].ssa); - unsigned write_mask = nir_intrinsic_write_mask(instr); - unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8u; - if (ctx->tcs_in_out_eq && store_output_to_temps(ctx, instr)) { /* When the TCS only reads this output directly and for the same vertices as its invocation id, it is unnecessary to store the VS output to LDS. */ bool indirect_write; @@ -3683,6 +4336,11 @@ void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr) return; } + std::pair offs = get_intrinsic_io_basic_offset(ctx, instr, 4u); + Temp src = get_ssa_temp(ctx, instr->src[0].ssa); + unsigned write_mask = nir_intrinsic_write_mask(instr); + unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8u; + if (ctx->stage == vertex_es || ctx->stage == tess_eval_es) { /* GFX6-8: ES stage is not merged into GS, data is passed from ES to GS in VMEM. */ Temp esgs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_VS * 16u)); @@ -3707,7 +4365,7 @@ void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr) */ unsigned num_tcs_inputs = util_last_bit64(ctx->args->shader_info->vs.ls_outputs_written); Temp vertex_idx = get_arg(ctx, ctx->args->rel_auto_id); - lds_base = bld.v_mul_imm(bld.def(v1), vertex_idx, num_tcs_inputs * 16u); + lds_base = bld.v_mul24_imm(bld.def(v1), vertex_idx, num_tcs_inputs * 16u); } else { unreachable("Invalid LS or ES stage"); } @@ -3718,23 +4376,35 @@ void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr) } } -bool should_write_tcs_patch_output_to_vmem(isel_context *ctx, nir_intrinsic_instr *instr) +bool tcs_output_is_tess_factor(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex) { + if (per_vertex) + return false; + unsigned off = nir_intrinsic_base(instr) * 4u; - return off != ctx->tcs_tess_lvl_out_loc && - off != ctx->tcs_tess_lvl_in_loc; + return off == ctx->tcs_tess_lvl_out_loc || + off == ctx->tcs_tess_lvl_in_loc; + } -bool should_write_tcs_output_to_lds(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex) +bool tcs_output_is_read_by_tes(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex) { - /* When none of the appropriate outputs are read, we are OK to never write to LDS */ - if (per_vertex ? ctx->shader->info.outputs_read == 0U : ctx->shader->info.patch_outputs_read == 0u) - return false; + uint64_t mask = per_vertex + ? ctx->program->info->tcs.tes_inputs_read + : ctx->program->info->tcs.tes_patch_inputs_read; + + bool indirect_write = false; + bool output_read_by_tes = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write); + return indirect_write || output_read_by_tes; +} +bool tcs_output_is_read_by_tcs(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex) +{ uint64_t mask = per_vertex ? ctx->shader->info.outputs_read : ctx->shader->info.patch_outputs_read; - bool indirect_write; + + bool indirect_write = false; bool output_read = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write); return indirect_write || output_read; } @@ -3750,10 +4420,9 @@ void visit_store_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8; unsigned write_mask = nir_intrinsic_write_mask(instr); - /* Only write to VMEM if the output is per-vertex or it's per-patch non tess factor */ - bool write_to_vmem = per_vertex || should_write_tcs_patch_output_to_vmem(ctx, instr); - /* Only write to LDS if the output is read by the shader, or it's per-patch tess factor */ - bool write_to_lds = !write_to_vmem || should_write_tcs_output_to_lds(ctx, instr, per_vertex); + bool is_tess_factor = tcs_output_is_tess_factor(ctx, instr, per_vertex); + bool write_to_vmem = !is_tess_factor && tcs_output_is_read_by_tes(ctx, instr, per_vertex); + bool write_to_lds = is_tess_factor || tcs_output_is_read_by_tcs(ctx, instr, per_vertex); if (write_to_vmem) { std::pair vmem_offs = per_vertex @@ -3792,6 +4461,8 @@ void visit_store_output(isel_context *ctx, nir_intrinsic_instr *instr) if (ctx->stage == vertex_vs || ctx->stage == tess_eval_vs || ctx->stage == fragment_fs || + ctx->stage == ngg_vertex_gs || + ctx->stage == ngg_tess_eval_gs || ctx->shader->info.stage == MESA_SHADER_GEOMETRY) { bool stored_to_temps = store_output_to_temps(ctx, instr); if (!stored_to_temps) { @@ -4473,236 +5144,25 @@ void visit_load_resource(isel_context *ctx, nir_intrinsic_instr *instr) } void load_buffer(isel_context *ctx, unsigned num_components, unsigned component_size, - Temp dst, Temp rsrc, Temp offset, int byte_align, + Temp dst, Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset, bool glc=false, bool readonly=true) { Builder bld(ctx->program, ctx->block); - bool dlc = glc && ctx->options->chip_class >= GFX10; - unsigned num_bytes = num_components * component_size; - - aco_opcode op; - if (dst.type() == RegType::vgpr || (ctx->options->chip_class < GFX8 && !readonly)) { - Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1); - Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0); - unsigned const_offset = 0; - - /* for small bit sizes add buffer for unaligned loads */ - if (byte_align) { - if (num_bytes > 2) - num_bytes += byte_align == -1 ? 4 - component_size : byte_align; - else - byte_align = 0; - } - - Temp lower = Temp(); - if (num_bytes > 16) { - assert(num_components == 3 || num_components == 4); - op = aco_opcode::buffer_load_dwordx4; - lower = bld.tmp(v4); - aco_ptr mubuf{create_instruction(op, Format::MUBUF, 3, 1)}; - mubuf->definitions[0] = Definition(lower); - mubuf->operands[0] = Operand(rsrc); - mubuf->operands[1] = vaddr; - mubuf->operands[2] = soffset; - mubuf->offen = (offset.type() == RegType::vgpr); - mubuf->glc = glc; - mubuf->dlc = dlc; - mubuf->barrier = readonly ? barrier_none : barrier_buffer; - mubuf->can_reorder = readonly; - bld.insert(std::move(mubuf)); - emit_split_vector(ctx, lower, 2); - num_bytes -= 16; - const_offset = 16; - } else if (num_bytes == 12 && ctx->options->chip_class == GFX6) { - /* GFX6 doesn't support loading vec3, expand to vec4. */ - num_bytes = 16; - } - - switch (num_bytes) { - case 1: - op = aco_opcode::buffer_load_ubyte; - break; - case 2: - op = aco_opcode::buffer_load_ushort; - break; - case 3: - case 4: - op = aco_opcode::buffer_load_dword; - break; - case 5: - case 6: - case 7: - case 8: - op = aco_opcode::buffer_load_dwordx2; - break; - case 10: - case 12: - assert(ctx->options->chip_class > GFX6); - op = aco_opcode::buffer_load_dwordx3; - break; - case 16: - op = aco_opcode::buffer_load_dwordx4; - break; - default: - unreachable("Load SSBO not implemented for this size."); - } - aco_ptr mubuf{create_instruction(op, Format::MUBUF, 3, 1)}; - mubuf->operands[0] = Operand(rsrc); - mubuf->operands[1] = vaddr; - mubuf->operands[2] = soffset; - mubuf->offen = (offset.type() == RegType::vgpr); - mubuf->glc = glc; - mubuf->dlc = dlc; - mubuf->barrier = readonly ? barrier_none : barrier_buffer; - mubuf->can_reorder = readonly; - mubuf->offset = const_offset; - aco_ptr instr = std::move(mubuf); - - if (dst.regClass().is_subdword()) { - Temp vec = num_bytes <= 4 ? bld.tmp(v1) : num_bytes <= 8 ? bld.tmp(v2) : bld.tmp(v3); - instr->definitions[0] = Definition(vec); - bld.insert(std::move(instr)); - - if (byte_align == -1 || (byte_align && dst.type() == RegType::sgpr)) { - Operand align = byte_align == -1 ? Operand(offset) : Operand((uint32_t)byte_align); - Temp tmp[3] = {vec, vec, vec}; - - if (vec.size() == 3) { - tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1); - bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec); - } else if (vec.size() == 2) { - tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1]; - bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec); - } - for (unsigned i = 0; i < dst.size(); i++) - tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], align); - - vec = tmp[0]; - if (dst.size() == 2) - vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]); - - byte_align = 0; - } - - if (dst.type() == RegType::vgpr && num_components == 1) { - bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), vec, Operand(byte_align / component_size)); - } else { - trim_subdword_vector(ctx, vec, dst, 4 * vec.size() / component_size, ((1 << num_components) - 1) << byte_align / component_size); - } - - return; - - } else if (dst.size() > 4) { - assert(lower != Temp()); - Temp upper = bld.tmp(RegType::vgpr, dst.size() - lower.size()); - instr->definitions[0] = Definition(upper); - bld.insert(std::move(instr)); - if (dst.size() == 8) - emit_split_vector(ctx, upper, 2); - instr.reset(create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, dst.size() / 2, 1)); - instr->operands[0] = Operand(emit_extract_vector(ctx, lower, 0, v2)); - instr->operands[1] = Operand(emit_extract_vector(ctx, lower, 1, v2)); - instr->operands[2] = Operand(emit_extract_vector(ctx, upper, 0, v2)); - if (dst.size() == 8) - instr->operands[3] = Operand(emit_extract_vector(ctx, upper, 1, v2)); - } else if (dst.size() == 3 && ctx->options->chip_class == GFX6) { - Temp vec = bld.tmp(v4); - instr->definitions[0] = Definition(vec); - bld.insert(std::move(instr)); - emit_split_vector(ctx, vec, 4); - - instr.reset(create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1)); - instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1)); - instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1)); - instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1)); - } - - if (dst.type() == RegType::sgpr) { - Temp vec = bld.tmp(RegType::vgpr, dst.size()); - instr->definitions[0] = Definition(vec); - bld.insert(std::move(instr)); - expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1); - } else { - instr->definitions[0] = Definition(dst); - bld.insert(std::move(instr)); - emit_split_vector(ctx, dst, num_components); - } - } else { - /* for small bit sizes add buffer for unaligned loads */ - if (byte_align) - num_bytes += byte_align == -1 ? 4 - component_size : byte_align; - switch (num_bytes) { - case 1: - case 2: - case 3: - case 4: - op = aco_opcode::s_buffer_load_dword; - break; - case 5: - case 6: - case 7: - case 8: - op = aco_opcode::s_buffer_load_dwordx2; - break; - case 10: - case 12: - case 16: - op = aco_opcode::s_buffer_load_dwordx4; - break; - case 24: - case 32: - op = aco_opcode::s_buffer_load_dwordx8; - break; - default: - unreachable("Load SSBO not implemented for this size."); - } + bool use_smem = dst.type() != RegType::vgpr && ((ctx->options->chip_class >= GFX8 && component_size >= 4) || readonly); + if (use_smem) offset = bld.as_uniform(offset); - aco_ptr load{create_instruction(op, Format::SMEM, 2, 1)}; - load->operands[0] = Operand(rsrc); - load->operands[1] = Operand(offset); - assert(load->operands[1].getTemp().type() == RegType::sgpr); - load->definitions[0] = Definition(dst); - load->glc = glc; - load->dlc = dlc; - load->barrier = readonly ? barrier_none : barrier_buffer; - load->can_reorder = false; // FIXME: currently, it doesn't seem beneficial due to how our scheduler works - assert(ctx->options->chip_class >= GFX8 || !glc); - - /* adjust misaligned small bit size loads */ - if (byte_align) { - Temp vec = num_bytes <= 4 ? bld.tmp(s1) : num_bytes <= 8 ? bld.tmp(s2) : bld.tmp(s4); - load->definitions[0] = Definition(vec); - bld.insert(std::move(load)); - Operand byte_offset = byte_align > 0 ? Operand(uint32_t(byte_align)) : Operand(offset); - byte_align_scalar(ctx, vec, byte_offset, dst); - - /* trim vector */ - } else if (dst.size() == 3) { - Temp vec = bld.tmp(s4); - load->definitions[0] = Definition(vec); - bld.insert(std::move(load)); - emit_split_vector(ctx, vec, 4); - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - emit_extract_vector(ctx, vec, 0, s1), - emit_extract_vector(ctx, vec, 1, s1), - emit_extract_vector(ctx, vec, 2, s1)); - } else if (dst.size() == 6) { - Temp vec = bld.tmp(s8); - load->definitions[0] = Definition(vec); - bld.insert(std::move(load)); - emit_split_vector(ctx, vec, 4); - - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - emit_extract_vector(ctx, vec, 0, s2), - emit_extract_vector(ctx, vec, 1, s2), - emit_extract_vector(ctx, vec, 2, s2)); - } else { - bld.insert(std::move(load)); - } - emit_split_vector(ctx, dst, num_components); - } + LoadEmitInfo info = {Operand(offset), dst, num_components, component_size, rsrc}; + info.glc = glc; + info.barrier = readonly ? barrier_none : barrier_buffer; + info.can_reorder = readonly; + info.align_mul = align_mul; + info.align_offset = align_offset; + if (use_smem) + emit_smem_load(ctx, bld, &info); + else + emit_mubuf_load(ctx, bld, &info); } void visit_load_ubo(isel_context *ctx, nir_intrinsic_instr *instr) @@ -4741,13 +5201,8 @@ void visit_load_ubo(isel_context *ctx, nir_intrinsic_instr *instr) rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u)); } unsigned size = instr->dest.ssa.bit_size / 8; - int byte_align = 0; - if (size < 4) { - unsigned align_mul = nir_intrinsic_align_mul(instr); - unsigned align_offset = nir_intrinsic_align_offset(instr); - byte_align = align_mul % 4 == 0 ? align_offset : -1; - } - load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), byte_align); + load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), + nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr)); } void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr) @@ -4873,8 +5328,7 @@ void visit_load_constant(isel_context *ctx, nir_intrinsic_instr *instr) Operand(desc_type)); unsigned size = instr->dest.ssa.bit_size / 8; // TODO: get alignment information for subdword constants - unsigned byte_align = size < 4 ? -1 : 0; - load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, byte_align); + load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0); } void visit_discard_if(isel_context *ctx, nir_intrinsic_instr *instr) @@ -5688,13 +6142,8 @@ void visit_load_ssbo(isel_context *ctx, nir_intrinsic_instr *instr) bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT); unsigned size = instr->dest.ssa.bit_size / 8; - int byte_align = 0; - if (size < 4) { - unsigned align_mul = nir_intrinsic_align_mul(instr); - unsigned align_offset = nir_intrinsic_align_offset(instr); - byte_align = align_mul % 4 == 0 ? align_offset : -1; - } - load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), byte_align, glc, false); + load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), + nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, false); } void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr) @@ -5926,180 +6375,43 @@ void visit_atomic_ssbo(isel_context *ctx, nir_intrinsic_instr *instr) mubuf->offset = 0; mubuf->offen = (offset.type() == RegType::vgpr); mubuf->glc = return_previous; - mubuf->dlc = false; /* Not needed for atomics */ - mubuf->disable_wqm = true; - mubuf->barrier = barrier_buffer; - ctx->program->needs_exact = true; - ctx->block->instructions.emplace_back(std::move(mubuf)); -} - -void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) { - - Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); - Builder bld(ctx->program, ctx->block); - Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u)); - get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false); -} - -Temp get_gfx6_global_rsrc(Builder& bld, Temp addr) -{ - uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | - S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); - - if (addr.type() == RegType::vgpr) - return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf)); - return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf)); -} - -void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr) -{ - Builder bld(ctx->program, ctx->block); - unsigned num_components = instr->num_components; - unsigned num_bytes = num_components * instr->dest.ssa.bit_size / 8; - - Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - Temp addr = get_ssa_temp(ctx, instr->src[0].ssa); - - bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT); - bool dlc = glc && ctx->options->chip_class >= GFX10; - aco_opcode op; - if (dst.type() == RegType::vgpr || (glc && ctx->options->chip_class < GFX8)) { - bool global = ctx->options->chip_class >= GFX9; - - if (ctx->options->chip_class >= GFX7) { - aco_opcode op; - switch (num_bytes) { - case 4: - op = global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword; - break; - case 8: - op = global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2; - break; - case 12: - op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3; - break; - case 16: - op = global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4; - break; - default: - unreachable("load_global not implemented for this size."); - } - - aco_ptr flat{create_instruction(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)}; - flat->operands[0] = Operand(addr); - flat->operands[1] = Operand(s1); - flat->glc = glc; - flat->dlc = dlc; - flat->barrier = barrier_buffer; - - if (dst.type() == RegType::sgpr) { - Temp vec = bld.tmp(RegType::vgpr, dst.size()); - flat->definitions[0] = Definition(vec); - ctx->block->instructions.emplace_back(std::move(flat)); - bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec); - } else { - flat->definitions[0] = Definition(dst); - ctx->block->instructions.emplace_back(std::move(flat)); - } - emit_split_vector(ctx, dst, num_components); - } else { - assert(ctx->options->chip_class == GFX6); - - /* GFX6 doesn't support loading vec3, expand to vec4. */ - num_bytes = num_bytes == 12 ? 16 : num_bytes; - - aco_opcode op; - switch (num_bytes) { - case 4: - op = aco_opcode::buffer_load_dword; - break; - case 8: - op = aco_opcode::buffer_load_dwordx2; - break; - case 16: - op = aco_opcode::buffer_load_dwordx4; - break; - default: - unreachable("load_global not implemented for this size."); - } + mubuf->dlc = false; /* Not needed for atomics */ + mubuf->disable_wqm = true; + mubuf->barrier = barrier_buffer; + ctx->program->needs_exact = true; + ctx->block->instructions.emplace_back(std::move(mubuf)); +} - Temp rsrc = get_gfx6_global_rsrc(bld, addr); +void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) { - aco_ptr mubuf{create_instruction(op, Format::MUBUF, 3, 1)}; - mubuf->operands[0] = Operand(rsrc); - mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1); - mubuf->operands[2] = Operand(0u); - mubuf->glc = glc; - mubuf->dlc = false; - mubuf->offset = 0; - mubuf->addr64 = addr.type() == RegType::vgpr; - mubuf->disable_wqm = false; - mubuf->barrier = barrier_buffer; - aco_ptr instr = std::move(mubuf); - - /* expand vector */ - if (dst.size() == 3) { - Temp vec = bld.tmp(v4); - instr->definitions[0] = Definition(vec); - bld.insert(std::move(instr)); - emit_split_vector(ctx, vec, 4); - - instr.reset(create_instruction(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1)); - instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1)); - instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1)); - instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1)); - } + Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); + Builder bld(ctx->program, ctx->block); + Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u)); + get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false); +} - if (dst.type() == RegType::sgpr) { - Temp vec = bld.tmp(RegType::vgpr, dst.size()); - instr->definitions[0] = Definition(vec); - bld.insert(std::move(instr)); - expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1); - bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec); - } else { - instr->definitions[0] = Definition(dst); - bld.insert(std::move(instr)); - emit_split_vector(ctx, dst, num_components); - } - } +void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr) +{ + Builder bld(ctx->program, ctx->block); + unsigned num_components = instr->num_components; + unsigned component_size = instr->dest.ssa.bit_size / 8; + + LoadEmitInfo info = {Operand(get_ssa_temp(ctx, instr->src[0].ssa)), + get_ssa_temp(ctx, &instr->dest.ssa), + num_components, component_size}; + info.glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT); + info.align_mul = nir_intrinsic_align_mul(instr); + info.align_offset = nir_intrinsic_align_offset(instr); + info.barrier = barrier_buffer; + info.can_reorder = false; + /* VMEM stores don't update the SMEM cache and it's difficult to prove that + * it's safe to use SMEM */ + bool can_use_smem = nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE; + if (info.dst.type() == RegType::vgpr || (info.glc && ctx->options->chip_class < GFX8) || !can_use_smem) { + emit_global_load(ctx, bld, &info); } else { - switch (num_bytes) { - case 4: - op = aco_opcode::s_load_dword; - break; - case 8: - op = aco_opcode::s_load_dwordx2; - break; - case 12: - case 16: - op = aco_opcode::s_load_dwordx4; - break; - default: - unreachable("load_global not implemented for this size."); - } - aco_ptr load{create_instruction(op, Format::SMEM, 2, 1)}; - load->operands[0] = Operand(addr); - load->operands[1] = Operand(0u); - load->definitions[0] = Definition(dst); - load->glc = glc; - load->dlc = dlc; - load->barrier = barrier_buffer; - assert(ctx->options->chip_class >= GFX8 || !glc); - - if (dst.size() == 3) { - /* trim vector */ - Temp vec = bld.tmp(s4); - load->definitions[0] = Definition(vec); - ctx->block->instructions.emplace_back(std::move(load)); - emit_split_vector(ctx, vec, 4); - - bld.pseudo(aco_opcode::p_create_vector, Definition(dst), - emit_extract_vector(ctx, vec, 0, s1), - emit_extract_vector(ctx, vec, 1, s1), - emit_extract_vector(ctx, vec, 2, s1)); - } else { - ctx->block->instructions.emplace_back(std::move(load)); - } + info.offset = Operand(bld.as_uniform(info.offset)); + emit_smem_load(ctx, bld, &info); } } @@ -6433,7 +6745,8 @@ void visit_store_shared(isel_context *ctx, nir_intrinsic_instr *instr) void visit_shared_atomic(isel_context *ctx, nir_intrinsic_instr *instr) { unsigned offset = nir_intrinsic_base(instr); - Operand m = load_lds_size_m0(ctx); + Builder bld(ctx->program, ctx->block); + Operand m = load_lds_size_m0(bld); Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa)); Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); @@ -6526,7 +6839,6 @@ void visit_shared_atomic(isel_context *ctx, nir_intrinsic_instr *instr) } if (offset > 65535) { - Builder bld(ctx->program, ctx->block); address = bld.vadd32(bld.def(v1), Operand(offset), address); offset = 0; } @@ -6571,62 +6883,19 @@ Temp get_scratch_resource(isel_context *ctx) } void visit_load_scratch(isel_context *ctx, nir_intrinsic_instr *instr) { - assert(instr->dest.ssa.bit_size == 32 || instr->dest.ssa.bit_size == 64); Builder bld(ctx->program, ctx->block); Temp rsrc = get_scratch_resource(ctx); Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa)); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); - aco_opcode op; - switch (dst.size()) { - case 1: - op = aco_opcode::buffer_load_dword; - break; - case 2: - op = aco_opcode::buffer_load_dwordx2; - break; - case 3: - op = aco_opcode::buffer_load_dwordx3; - break; - case 4: - op = aco_opcode::buffer_load_dwordx4; - break; - case 6: - case 8: { - std::array elems; - Temp lower = bld.mubuf(aco_opcode::buffer_load_dwordx4, - bld.def(v4), rsrc, offset, - ctx->program->scratch_offset, 0, true); - Temp upper = bld.mubuf(dst.size() == 6 ? aco_opcode::buffer_load_dwordx2 : - aco_opcode::buffer_load_dwordx4, - dst.size() == 6 ? bld.def(v2) : bld.def(v4), - rsrc, offset, ctx->program->scratch_offset, 16, true); - emit_split_vector(ctx, lower, 2); - elems[0] = emit_extract_vector(ctx, lower, 0, v2); - elems[1] = emit_extract_vector(ctx, lower, 1, v2); - if (dst.size() == 8) { - emit_split_vector(ctx, upper, 2); - elems[2] = emit_extract_vector(ctx, upper, 0, v2); - elems[3] = emit_extract_vector(ctx, upper, 1, v2); - } else { - elems[2] = upper; - } - - aco_ptr vec{create_instruction(aco_opcode::p_create_vector, - Format::PSEUDO, dst.size() / 2, 1)}; - for (unsigned i = 0; i < dst.size() / 2; i++) - vec->operands[i] = Operand(elems[i]); - vec->definitions[0] = Definition(dst); - bld.insert(std::move(vec)); - ctx->allocated_vec.emplace(dst.id(), elems); - return; - } - default: - unreachable("Wrong dst size for nir_intrinsic_load_scratch"); - } - - bld.mubuf(op, Definition(dst), rsrc, offset, ctx->program->scratch_offset, 0, true); - emit_split_vector(ctx, dst, instr->num_components); + LoadEmitInfo info = {Operand(offset), dst, instr->dest.ssa.num_components, + instr->dest.ssa.bit_size / 8u, rsrc}; + info.align_mul = nir_intrinsic_align_mul(instr); + info.align_offset = nir_intrinsic_align_offset(instr); + info.swizzle_component_size = 16; + info.can_reorder = false; + info.soffset = ctx->program->scratch_offset; + emit_mubuf_load(ctx, bld, &info); } void visit_store_scratch(isel_context *ctx, nir_intrinsic_instr *instr) { @@ -8269,7 +8538,7 @@ void visit_tex(isel_context *ctx, nir_tex_instr *instr) if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->chip_class == GFX9) { assert(has_ddx && has_ddy && ddx.size() == 1 && ddy.size() == 1); Temp zero = bld.copy(bld.def(v1), Operand(0u)); - derivs = {ddy, zero, ddy, zero}; + derivs = {ddx, zero, ddy, zero}; } else { for (unsigned i = 0; has_ddx && i < ddx.size(); i++) derivs.emplace_back(emit_extract_vector(ctx, ddx, i, v1)); @@ -9306,11 +9575,98 @@ static void end_divergent_if(isel_context *ctx, if_context *ic) } } +static void begin_uniform_if_then(isel_context *ctx, if_context *ic, Temp cond) +{ + assert(cond.regClass() == s1); + + append_logical_end(ctx->block); + ctx->block->kind |= block_kind_uniform; + + aco_ptr branch; + aco_opcode branch_opcode = aco_opcode::p_cbranch_z; + branch.reset(create_instruction(branch_opcode, Format::PSEUDO_BRANCH, 1, 0)); + branch->operands[0] = Operand(cond); + branch->operands[0].setFixed(scc); + ctx->block->instructions.emplace_back(std::move(branch)); + + ic->BB_if_idx = ctx->block->index; + ic->BB_endif = Block(); + ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth; + ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level; + + ctx->cf_info.has_branch = false; + ctx->cf_info.parent_loop.has_divergent_branch = false; + + /** emit then block */ + Block* BB_then = ctx->program->create_and_insert_block(); + BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth; + add_edge(ic->BB_if_idx, BB_then); + append_logical_start(BB_then); + ctx->block = BB_then; +} + +static void begin_uniform_if_else(isel_context *ctx, if_context *ic) +{ + Block *BB_then = ctx->block; + + ic->uniform_has_then_branch = ctx->cf_info.has_branch; + ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch; + + if (!ic->uniform_has_then_branch) { + append_logical_end(BB_then); + /* branch from then block to endif block */ + aco_ptr branch; + branch.reset(create_instruction(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0)); + BB_then->instructions.emplace_back(std::move(branch)); + add_linear_edge(BB_then->index, &ic->BB_endif); + if (!ic->then_branch_divergent) + add_logical_edge(BB_then->index, &ic->BB_endif); + BB_then->kind |= block_kind_uniform; + } + + ctx->cf_info.has_branch = false; + ctx->cf_info.parent_loop.has_divergent_branch = false; + + /** emit else block */ + Block* BB_else = ctx->program->create_and_insert_block(); + BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth; + add_edge(ic->BB_if_idx, BB_else); + append_logical_start(BB_else); + ctx->block = BB_else; +} + +static void end_uniform_if(isel_context *ctx, if_context *ic) +{ + Block *BB_else = ctx->block; + + if (!ctx->cf_info.has_branch) { + append_logical_end(BB_else); + /* branch from then block to endif block */ + aco_ptr branch; + branch.reset(create_instruction(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0)); + BB_else->instructions.emplace_back(std::move(branch)); + add_linear_edge(BB_else->index, &ic->BB_endif); + if (!ctx->cf_info.parent_loop.has_divergent_branch) + add_logical_edge(BB_else->index, &ic->BB_endif); + BB_else->kind |= block_kind_uniform; + } + + ctx->cf_info.has_branch &= ic->uniform_has_then_branch; + ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent; + + /** emit endif merge block */ + if (!ctx->cf_info.has_branch) { + ctx->block = ctx->program->insert_block(std::move(ic->BB_endif)); + append_logical_start(ctx->block); + } +} + static bool visit_if(isel_context *ctx, nir_if *if_stmt) { Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa); Builder bld(ctx->program, ctx->block); aco_ptr branch; + if_context ic; if (!ctx->divergent_vals[if_stmt->condition.ssa->index]) { /* uniform condition */ /** @@ -9328,77 +9684,19 @@ static bool visit_if(isel_context *ctx, nir_if *if_stmt) * to the loop exit/entry block. Otherwise, it branches to the next * merge block. **/ - append_logical_end(ctx->block); - ctx->block->kind |= block_kind_uniform; - /* emit branch */ - assert(cond.regClass() == bld.lm); // TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction + assert(cond.regClass() == ctx->program->lane_mask); cond = bool_to_scalar_condition(ctx, cond); - branch.reset(create_instruction(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0)); - branch->operands[0] = Operand(cond); - branch->operands[0].setFixed(scc); - ctx->block->instructions.emplace_back(std::move(branch)); - - unsigned BB_if_idx = ctx->block->index; - Block BB_endif = Block(); - BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth; - BB_endif.kind |= ctx->block->kind & block_kind_top_level; - - /** emit then block */ - Block* BB_then = ctx->program->create_and_insert_block(); - BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth; - add_edge(BB_if_idx, BB_then); - append_logical_start(BB_then); - ctx->block = BB_then; + begin_uniform_if_then(ctx, &ic, cond); visit_cf_list(ctx, &if_stmt->then_list); - BB_then = ctx->block; - bool then_branch = ctx->cf_info.has_branch; - bool then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch; - - if (!then_branch) { - append_logical_end(BB_then); - /* branch from then block to endif block */ - branch.reset(create_instruction(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0)); - BB_then->instructions.emplace_back(std::move(branch)); - add_linear_edge(BB_then->index, &BB_endif); - if (!then_branch_divergent) - add_logical_edge(BB_then->index, &BB_endif); - BB_then->kind |= block_kind_uniform; - } - - ctx->cf_info.has_branch = false; - ctx->cf_info.parent_loop.has_divergent_branch = false; - /** emit else block */ - Block* BB_else = ctx->program->create_and_insert_block(); - BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth; - add_edge(BB_if_idx, BB_else); - append_logical_start(BB_else); - ctx->block = BB_else; + begin_uniform_if_else(ctx, &ic); visit_cf_list(ctx, &if_stmt->else_list); - BB_else = ctx->block; - if (!ctx->cf_info.has_branch) { - append_logical_end(BB_else); - /* branch from then block to endif block */ - branch.reset(create_instruction(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0)); - BB_else->instructions.emplace_back(std::move(branch)); - add_linear_edge(BB_else->index, &BB_endif); - if (!ctx->cf_info.parent_loop.has_divergent_branch) - add_logical_edge(BB_else->index, &BB_endif); - BB_else->kind |= block_kind_uniform; - } - - ctx->cf_info.has_branch &= then_branch; - ctx->cf_info.parent_loop.has_divergent_branch &= then_branch_divergent; + end_uniform_if(ctx, &ic); - /** emit endif merge block */ - if (!ctx->cf_info.has_branch) { - ctx->block = ctx->program->insert_block(std::move(BB_endif)); - append_logical_start(ctx->block); - } return !ctx->cf_info.has_branch; } else { /* non-uniform condition */ /** @@ -9426,8 +9724,6 @@ static bool visit_if(isel_context *ctx, nir_if *if_stmt) * *) Exceptions may be due to break and continue statements within loops **/ - if_context ic; - begin_divergent_if_then(ctx, &ic, cond); visit_cf_list(ctx, &if_stmt->then_list); @@ -9479,9 +9775,11 @@ static bool export_vs_varying(isel_context *ctx, int slot, bool is_pos, int *nex { assert(ctx->stage == vertex_vs || ctx->stage == tess_eval_vs || - ctx->stage == gs_copy_vs); + ctx->stage == gs_copy_vs || + ctx->stage == ngg_vertex_gs || + ctx->stage == ngg_tess_eval_gs); - int offset = ctx->stage == tess_eval_vs + int offset = (ctx->stage & sw_tes) ? ctx->program->info->tes.outinfo.vs_output_param_offset[slot] : ctx->program->info->vs.outinfo.vs_output_param_offset[slot]; uint64_t mask = ctx->outputs.mask[slot]; @@ -9549,17 +9847,46 @@ static void export_vs_psiz_layer_viewport(isel_context *ctx, int *next_pos) ctx->block->instructions.emplace_back(std::move(exp)); } +static void create_export_phis(isel_context *ctx) +{ + /* Used when exports are needed, but the output temps are defined in a preceding block. + * This function will set up phis in order to access the outputs in the next block. + */ + + assert(ctx->block->instructions.back()->opcode == aco_opcode::p_logical_start); + aco_ptr logical_start = aco_ptr(ctx->block->instructions.back().release()); + ctx->block->instructions.pop_back(); + + Builder bld(ctx->program, ctx->block); + + for (unsigned slot = 0; slot <= VARYING_SLOT_VAR31; ++slot) { + uint64_t mask = ctx->outputs.mask[slot]; + for (unsigned i = 0; i < 4; ++i) { + if (!(mask & (1 << i))) + continue; + + Temp old = ctx->outputs.temps[slot * 4 + i]; + Temp phi = bld.pseudo(aco_opcode::p_phi, bld.def(v1), old, Operand(v1)); + ctx->outputs.temps[slot * 4 + i] = phi; + } + } + + bld.insert(std::move(logical_start)); +} + static void create_vs_exports(isel_context *ctx) { assert(ctx->stage == vertex_vs || ctx->stage == tess_eval_vs || - ctx->stage == gs_copy_vs); + ctx->stage == gs_copy_vs || + ctx->stage == ngg_vertex_gs || + ctx->stage == ngg_tess_eval_gs); - radv_vs_output_info *outinfo = ctx->stage == tess_eval_vs + radv_vs_output_info *outinfo = (ctx->stage & sw_tes) ? &ctx->program->info->tes.outinfo : &ctx->program->info->vs.outinfo; - if (outinfo->export_prim_id) { + if (outinfo->export_prim_id && !(ctx->stage & hw_ngg_gs)) { ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1; ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = get_arg(ctx, ctx->args->vs_prim_id); } @@ -9589,8 +9916,10 @@ static void create_vs_exports(isel_context *ctx) } for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) { - if (i < VARYING_SLOT_VAR0 && i != VARYING_SLOT_LAYER && - i != VARYING_SLOT_PRIMITIVE_ID) + if (i < VARYING_SLOT_VAR0 && + i != VARYING_SLOT_LAYER && + i != VARYING_SLOT_PRIMITIVE_ID && + i != VARYING_SLOT_VIEWPORT) continue; export_vs_varying(ctx, i, false, NULL); @@ -9887,7 +10216,7 @@ static void write_tcs_tess_factors(isel_context *ctx) Temp rel_patch_id = get_tess_rel_patch_id(ctx); Temp tf_base = get_arg(ctx, ctx->args->tess_factor_offset); - Temp byte_offset = bld.v_mul_imm(bld.def(v1), rel_patch_id, stride * 4u); + Temp byte_offset = bld.v_mul24_imm(bld.def(v1), rel_patch_id, stride * 4u); unsigned tf_const_offset = 0; if (ctx->program->chip_class <= GFX8) { @@ -9949,7 +10278,7 @@ static void emit_stream_output(isel_context *ctx, Temp out[4]; bool all_undef = true; - assert(ctx->stage == vertex_vs || ctx->stage == gs_copy_vs); + assert(ctx->stage & hw_vs); for (unsigned i = 0; i < num_comps; i++) { out[i] = ctx->outputs.temps[loc * 4 + start + i]; all_undef = all_undef && !out[i].id(); @@ -10227,6 +10556,240 @@ void cleanup_cfg(Program *program) } } +Temp merged_wave_info_to_mask(isel_context *ctx, unsigned i) +{ + Builder bld(ctx->program, ctx->block); + + /* The s_bfm only cares about s0.u[5:0] so we don't need either s_bfe nor s_and here */ + Temp count = i == 0 + ? get_arg(ctx, ctx->args->merged_wave_info) + : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->merged_wave_info), Operand(i * 8u)); + + Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand(0u)); + Temp cond; + + if (ctx->program->wave_size == 64) { + /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */ + Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count, Operand(6u /* log2(64) */)); + cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), mask, bld.scc(active_64)); + } else { + /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of the register */ + cond = emit_extract_vector(ctx, mask, 0, bld.lm); + } + + return cond; +} + +bool ngg_early_prim_export(isel_context *ctx) +{ + /* TODO: Check edge flags, and if they are written, return false. (Needed for OpenGL, not for Vulkan.) */ + return true; +} + +void ngg_emit_sendmsg_gs_alloc_req(isel_context *ctx) +{ + Builder bld(ctx->program, ctx->block); + + /* It is recommended to do the GS_ALLOC_REQ as soon and as quickly as possible, so we set the maximum priority (3). */ + bld.sopp(aco_opcode::s_setprio, -1u, 0x3u); + + /* Get the id of the current wave within the threadgroup (workgroup) */ + Builder::Result wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16))); + + /* Execute the following code only on the first wave (wave id 0), + * use the SCC def to tell if the wave id is zero or not. + */ + Temp cond = wave_id_in_tg.def(1).getTemp(); + if_context ic; + begin_uniform_if_then(ctx, &ic, cond); + begin_uniform_if_else(ctx, &ic); + bld.reset(ctx->block); + + /* Number of vertices output by VS/TES */ + Temp vtx_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->gs_tg_info), Operand(12u | (9u << 16u))); + /* Number of primitives output by VS/TES */ + Temp prm_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->gs_tg_info), Operand(22u | (9u << 16u))); + + /* Put the number of vertices and primitives into m0 for the GS_ALLOC_REQ */ + Temp tmp = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), prm_cnt, Operand(12u)); + tmp = bld.sop2(aco_opcode::s_or_b32, bld.m0(bld.def(s1)), bld.def(s1, scc), tmp, vtx_cnt); + + /* Request the SPI to allocate space for the primitives and vertices that will be exported by the threadgroup. */ + bld.sopp(aco_opcode::s_sendmsg, bld.m0(tmp), -1, sendmsg_gs_alloc_req); + + end_uniform_if(ctx, &ic); + + /* After the GS_ALLOC_REQ is done, reset priority to default (0). */ + bld.reset(ctx->block); + bld.sopp(aco_opcode::s_setprio, -1u, 0x0u); +} + +Temp ngg_get_prim_exp_arg(isel_context *ctx, unsigned num_vertices, const Temp vtxindex[]) +{ + Builder bld(ctx->program, ctx->block); + + if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) { + return get_arg(ctx, ctx->args->gs_vtx_offset[0]); + } + + Temp gs_invocation_id = get_arg(ctx, ctx->args->ac.gs_invocation_id); + Temp tmp; + + for (unsigned i = 0; i < num_vertices; ++i) { + assert(vtxindex[i].id()); + + if (i) + tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), vtxindex[i], Operand(10u * i), tmp); + else + tmp = vtxindex[i]; + + /* The initial edge flag is always false in tess eval shaders. */ + if (ctx->stage == ngg_vertex_gs) { + Temp edgeflag = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), gs_invocation_id, Operand(8 + i), Operand(1u)); + tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), edgeflag, Operand(10u * i + 9u), tmp); + } + } + + /* TODO: Set isnull field in case of merged NGG VS+GS. */ + + return tmp; +} + +void ngg_emit_prim_export(isel_context *ctx, unsigned num_vertices_per_primitive, const Temp vtxindex[]) +{ + Builder bld(ctx->program, ctx->block); + Temp prim_exp_arg = ngg_get_prim_exp_arg(ctx, num_vertices_per_primitive, vtxindex); + + bld.exp(aco_opcode::exp, prim_exp_arg, Operand(v1), Operand(v1), Operand(v1), + 1 /* enabled mask */, V_008DFC_SQ_EXP_PRIM /* dest */, + false /* compressed */, true/* done */, false /* valid mask */); +} + +void ngg_emit_nogs_gsthreads(isel_context *ctx) +{ + /* Emit the things that NGG GS threads need to do, for shaders that don't have SW GS. + * These must always come before VS exports. + * + * It is recommended to do these as early as possible. They can be at the beginning when + * there is no SW GS and the shader doesn't write edge flags. + */ + + if_context ic; + Temp is_gs_thread = merged_wave_info_to_mask(ctx, 1); + begin_divergent_if_then(ctx, &ic, is_gs_thread); + + Builder bld(ctx->program, ctx->block); + constexpr unsigned max_vertices_per_primitive = 3; + unsigned num_vertices_per_primitive = max_vertices_per_primitive; + + if (ctx->stage == ngg_vertex_gs) { + /* TODO: optimize for points & lines */ + } else if (ctx->stage == ngg_tess_eval_gs) { + if (ctx->shader->info.tess.point_mode) + num_vertices_per_primitive = 1; + else if (ctx->shader->info.tess.primitive_mode == GL_ISOLINES) + num_vertices_per_primitive = 2; + } else { + unreachable("Unsupported NGG shader stage"); + } + + Temp vtxindex[max_vertices_per_primitive]; + vtxindex[0] = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu), + get_arg(ctx, ctx->args->gs_vtx_offset[0])); + vtxindex[1] = num_vertices_per_primitive < 2 ? Temp(0, v1) : + bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), + get_arg(ctx, ctx->args->gs_vtx_offset[0]), Operand(16u), Operand(16u)); + vtxindex[2] = num_vertices_per_primitive < 3 ? Temp(0, v1) : + bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu), + get_arg(ctx, ctx->args->gs_vtx_offset[2])); + + /* Export primitive data to the index buffer. */ + ngg_emit_prim_export(ctx, num_vertices_per_primitive, vtxindex); + + /* Export primitive ID. */ + if (ctx->stage == ngg_vertex_gs && ctx->args->options->key.vs_common_out.export_prim_id) { + /* Copy Primitive IDs from GS threads to the LDS address corresponding to the ES thread of the provoking vertex. */ + Temp prim_id = get_arg(ctx, ctx->args->ac.gs_prim_id); + Temp provoking_vtx_index = vtxindex[0]; + Temp addr = bld.v_mul_imm(bld.def(v1), provoking_vtx_index, 4u); + + store_lds(ctx, 4, prim_id, 0x1u, addr, 0u, 4u); + } + + begin_divergent_if_else(ctx, &ic); + end_divergent_if(ctx, &ic); +} + +void ngg_emit_nogs_output(isel_context *ctx) +{ + /* Emits NGG GS output, for stages that don't have SW GS. */ + + if_context ic; + Builder bld(ctx->program, ctx->block); + bool late_prim_export = !ngg_early_prim_export(ctx); + + /* NGG streamout is currently disabled by default. */ + assert(!ctx->args->shader_info->so.num_outputs); + + if (late_prim_export) { + /* VS exports are output to registers in a predecessor block. Emit phis to get them into this block. */ + create_export_phis(ctx); + /* Do what we need to do in the GS threads. */ + ngg_emit_nogs_gsthreads(ctx); + + /* What comes next should be executed on ES threads. */ + Temp is_es_thread = merged_wave_info_to_mask(ctx, 0); + begin_divergent_if_then(ctx, &ic, is_es_thread); + bld.reset(ctx->block); + } + + /* Export VS outputs */ + ctx->block->kind |= block_kind_export_end; + create_vs_exports(ctx); + + /* Export primitive ID */ + if (ctx->args->options->key.vs_common_out.export_prim_id) { + Temp prim_id; + + if (ctx->stage == ngg_vertex_gs) { + /* Wait for GS threads to store primitive ID in LDS. */ + bld.barrier(aco_opcode::p_memory_barrier_shared); + bld.sopp(aco_opcode::s_barrier); + + /* Calculate LDS address where the GS threads stored the primitive ID. */ + Temp wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), + get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16))); + Temp thread_id_in_wave = emit_mbcnt(ctx, bld.def(v1)); + Temp wave_id_mul = bld.v_mul24_imm(bld.def(v1), as_vgpr(ctx, wave_id_in_tg), ctx->program->wave_size); + Temp thread_id_in_tg = bld.vadd32(bld.def(v1), Operand(wave_id_mul), Operand(thread_id_in_wave)); + Temp addr = bld.v_mul24_imm(bld.def(v1), thread_id_in_tg, 4u); + + /* Load primitive ID from LDS. */ + prim_id = load_lds(ctx, 4, bld.tmp(v1), addr, 0u, 4u); + } else if (ctx->stage == ngg_tess_eval_gs) { + /* TES: Just use the patch ID as the primitive ID. */ + prim_id = get_arg(ctx, ctx->args->ac.tes_patch_id); + } else { + unreachable("unsupported NGG shader stage."); + } + + ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1; + ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = prim_id; + + export_vs_varying(ctx, VARYING_SLOT_PRIMITIVE_ID, false, nullptr); + } + + if (late_prim_export) { + begin_divergent_if_else(ctx, &ic); + end_divergent_if(ctx, &ic); + bld.reset(ctx->block); + } +} + void select_program(Program *program, unsigned shader_count, struct nir_shader *const *shaders, @@ -10235,6 +10798,7 @@ void select_program(Program *program, { isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args, false); if_context ic_merged_wave_info; + bool ngg_no_gs = ctx.stage == ngg_vertex_gs || ctx.stage == ngg_tess_eval_gs; for (unsigned i = 0; i < shader_count; i++) { nir_shader *nir = shaders[i]; @@ -10253,6 +10817,13 @@ void select_program(Program *program, split_arguments(&ctx, startpgm); } + if (ngg_no_gs) { + ngg_emit_sendmsg_gs_alloc_req(&ctx); + + if (ngg_early_prim_export(&ctx)) + ngg_emit_nogs_gsthreads(&ctx); + } + /* In a merged VS+TCS HS, the VS implementation can be completely empty. */ nir_function_impl *func = nir_shader_get_entrypoint(nir); bool empty_shader = nir_cf_list_is_empty_block(&func->body) && @@ -10261,28 +10832,10 @@ void select_program(Program *program, (nir->info.stage == MESA_SHADER_TESS_EVAL && ctx.stage == tess_eval_geometry_gs)); - bool check_merged_wave_info = ctx.tcs_in_out_eq ? i == 0 : (shader_count >= 2 && !empty_shader); + bool check_merged_wave_info = ctx.tcs_in_out_eq ? i == 0 : ((shader_count >= 2 && !empty_shader) || ngg_no_gs); bool endif_merged_wave_info = ctx.tcs_in_out_eq ? i == 1 : check_merged_wave_info; if (check_merged_wave_info) { - Builder bld(ctx.program, ctx.block); - - /* The s_bfm only cares about s0.u[5:0] so we don't need either s_bfe nor s_and here */ - Temp count = i == 0 ? get_arg(&ctx, args->merged_wave_info) - : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc), - get_arg(&ctx, args->merged_wave_info), Operand(i * 8u)); - - Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand(0u)); - Temp cond; - - if (ctx.program->wave_size == 64) { - /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */ - Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count, Operand(6u /* log2(64) */)); - cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), mask, bld.scc(active_64)); - } else { - /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of the register */ - cond = emit_extract_vector(&ctx, mask, 0, bld.lm); - } - + Temp cond = merged_wave_info_to_mask(&ctx, i); begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond); } @@ -10303,11 +10856,14 @@ void select_program(Program *program, visit_cf_list(&ctx, &func->body); - if (ctx.program->info->so.num_outputs && (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs)) + if (ctx.program->info->so.num_outputs && (ctx.stage & hw_vs)) emit_streamout(&ctx, 0); - if (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs) { + if (ctx.stage & hw_vs) { create_vs_exports(&ctx); + ctx.block->kind |= block_kind_export_end; + } else if (ngg_no_gs && ngg_early_prim_export(&ctx)) { + ngg_emit_nogs_output(&ctx); } else if (nir->info.stage == MESA_SHADER_GEOMETRY) { Builder bld(ctx.program, ctx.block); bld.barrier(aco_opcode::p_memory_barrier_gs_data); @@ -10316,14 +10872,19 @@ void select_program(Program *program, write_tcs_tess_factors(&ctx); } - if (ctx.stage == fragment_fs) + if (ctx.stage == fragment_fs) { create_fs_exports(&ctx); + ctx.block->kind |= block_kind_export_end; + } if (endif_merged_wave_info) { begin_divergent_if_else(&ctx, &ic_merged_wave_info); end_divergent_if(&ctx, &ic_merged_wave_info); } + if (ngg_no_gs && !ngg_early_prim_export(&ctx)) + ngg_emit_nogs_output(&ctx); + ralloc_free(ctx.divergent_vals); if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) { @@ -10336,7 +10897,7 @@ void select_program(Program *program, program->config->float_mode = program->blocks[0].fp_mode.val; append_logical_end(ctx.block); - ctx.block->kind |= block_kind_uniform | block_kind_export_end; + ctx.block->kind |= block_kind_uniform; Builder bld(ctx.program, ctx.block); if (ctx.program->wb_smem_l1_on_end) bld.smem(aco_opcode::s_dcache_wb, false);