namespace aco {
namespace {
+#define isel_err(...) _isel_err(ctx, __FILE__, __LINE__, __VA_ARGS__)
+
+static void _isel_err(isel_context *ctx, const char *file, unsigned line,
+ const nir_instr *instr, const char *msg)
+{
+ char *out;
+ size_t outsize;
+ FILE *memf = open_memstream(&out, &outsize);
+
+ fprintf(memf, "%s: ", msg);
+ nir_print_instr(instr, memf);
+ fclose(memf);
+
+ _aco_err(ctx->program, file, line, out);
+ free(out);
+}
+
class loop_info_RAII {
isel_context* ctx;
unsigned header_idx_old;
unsigned BB_if_idx;
unsigned invert_idx;
+ bool uniform_has_then_branch;
bool then_branch_divergent;
Block BB_invert;
Block BB_endif;
if (ctx->program->wave_size == 32) {
return thread_id_lo;
+ } else if (ctx->program->chip_class <= GFX7) {
+ Temp thread_id_hi = bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ return thread_id_hi;
} else {
- Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, dst, mask_hi, thread_id_lo);
return thread_id_hi;
}
}
if (index.regClass() == s1)
return bld.readlane(bld.def(s1), data, index);
- Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
-
- /* Currently not implemented on GFX6-7 */
- assert(ctx->options->chip_class >= GFX8);
-
- if (ctx->options->chip_class <= GFX9 || ctx->program->wave_size == 32) {
+ if (ctx->options->chip_class <= GFX7) {
+ /* GFX6-7: there is no bpermute instruction */
+ Operand index_op(index);
+ Operand input_data(data);
+ index_op.setLateKill(true);
+ input_data.setLateKill(true);
+
+ return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(bld.lm), bld.def(bld.lm, vcc), index_op, input_data);
+ } else if (ctx->options->chip_class >= GFX10 && ctx->program->wave_size == 64) {
+ /* GFX10 wave64 mode: emulate full-wave bpermute */
+ if (!ctx->has_gfx10_wave64_bpermute) {
+ ctx->has_gfx10_wave64_bpermute = true;
+ ctx->program->config->num_shared_vgprs = 8; /* Shared VGPRs are allocated in groups of 8 */
+ ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
+ }
+
+ Temp index_is_lo = bld.vopc(aco_opcode::v_cmp_ge_u32, bld.def(bld.lm), Operand(31u), index);
+ Builder::Result index_is_lo_split = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), bld.def(s1), index_is_lo);
+ Temp index_is_lo_n1 = bld.sop1(aco_opcode::s_not_b32, bld.def(s1), bld.def(s1, scc), index_is_lo_split.def(1).getTemp());
+ Operand same_half = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), index_is_lo_split.def(0).getTemp(), index_is_lo_n1);
+ Operand index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
+ Operand input_data(data);
+
+ index_x4.setLateKill(true);
+ input_data.setLateKill(true);
+ same_half.setLateKill(true);
+
+ return bld.pseudo(aco_opcode::p_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc), index_x4, input_data, same_half);
+ } else {
+ /* GFX8-9 or GFX10 wave32: bpermute works normally */
+ Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
return bld.ds(aco_opcode::ds_bpermute_b32, bld.def(v1), index_x4, data);
}
+}
- /* GFX10, wave64 mode:
- * The bpermute instruction is limited to half-wave operation, which means that it can't
- * properly support subgroup shuffle like older generations (or wave32 mode), so we
- * emulate it here.
- */
- if (!ctx->has_gfx10_wave64_bpermute) {
- ctx->has_gfx10_wave64_bpermute = true;
- ctx->program->config->num_shared_vgprs = 8; /* Shared VGPRs are allocated in groups of 8 */
- ctx->program->vgpr_limit -= 4; /* We allocate 8 shared VGPRs, so we'll have 4 fewer normal VGPRs */
- }
+static Temp emit_masked_swizzle(isel_context *ctx, Builder &bld, Temp src, unsigned mask)
+{
+ if (ctx->options->chip_class >= GFX8) {
+ unsigned and_mask = mask & 0x1f;
+ unsigned or_mask = (mask >> 5) & 0x1f;
+ unsigned xor_mask = (mask >> 10) & 0x1f;
+
+ uint16_t dpp_ctrl = 0xffff;
+
+ // TODO: we could use DPP8 for some swizzles
+ if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
+ unsigned res[4] = {0, 1, 2, 3};
+ for (unsigned i = 0; i < 4; i++)
+ res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
+ dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
+ dpp_ctrl = dpp_row_rr(8);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
+ dpp_ctrl = dpp_row_mirror;
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
+ dpp_ctrl = dpp_row_half_mirror;
+ }
- Temp lane_id = emit_mbcnt(ctx, bld.def(v1));
- Temp lane_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), lane_id);
- Temp index_is_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x20u), index);
- Temp cmp = bld.vopc(aco_opcode::v_cmp_eq_u32, bld.def(bld.lm, vcc), lane_is_hi, index_is_hi);
+ if (dpp_ctrl != 0xffff)
+ return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
+ }
- return bld.reduction(aco_opcode::p_wave64_bpermute, bld.def(v1), bld.def(s2), bld.def(s1, scc),
- bld.vcc(cmp), Operand(v2.as_linear()), index_x4, data, gfx10_wave64_bpermute);
+ return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
}
Temp as_vgpr(isel_context *ctx, Temp val)
assert(idx == 0);
return src;
}
- assert(src.size() > idx);
+
+ assert(src.bytes() > (idx * dst_rc.bytes()));
Builder bld(ctx->program, ctx->block);
auto it = ctx->allocated_vec.find(src.id());
- /* the size check needs to be early because elements other than 0 may be garbage */
- if (it != ctx->allocated_vec.end() && it->second[0].size() == dst_rc.size()) {
+ if (it != ctx->allocated_vec.end() && dst_rc.bytes() == it->second[idx].regClass().bytes()) {
if (it->second[idx].regClass() == dst_rc) {
return it->second[idx];
} else {
- assert(dst_rc.size() == it->second[idx].regClass().size());
+ assert(!dst_rc.is_subdword());
assert(dst_rc.type() == RegType::vgpr && it->second[idx].type() == RegType::sgpr);
return bld.copy(bld.def(dst_rc), it->second[idx]);
}
}
- if (src.size() == dst_rc.size()) {
+ if (dst_rc.is_subdword())
+ src = as_vgpr(ctx, src);
+
+ if (src.bytes() == dst_rc.bytes()) {
assert(idx == 0);
return bld.copy(bld.def(dst_rc), src);
} else {
return;
if (ctx->allocated_vec.find(vec_src.id()) != ctx->allocated_vec.end())
return;
+ RegClass rc;
+ if (num_components > vec_src.size()) {
+ if (vec_src.type() == RegType::sgpr) {
+ /* should still help get_alu_src() */
+ emit_split_vector(ctx, vec_src, vec_src.size());
+ return;
+ }
+ /* sub-dword split */
+ rc = RegClass(RegType::vgpr, vec_src.bytes() / num_components).as_subdword();
+ } else {
+ rc = RegClass(vec_src.type(), vec_src.size() / num_components);
+ }
aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_components)};
split->operands[0] = Operand(vec_src);
std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
for (unsigned i = 0; i < num_components; i++) {
- elems[i] = {ctx->program->allocateId(), RegClass(vec_src.type(), vec_src.size() / num_components)};
+ elems[i] = {ctx->program->allocateId(), rc};
split->definitions[i] = Definition(elems[i]);
}
ctx->block->instructions.emplace_back(std::move(split));
ctx->allocated_vec.emplace(dst.id(), elems);
}
+/* adjust misaligned small bit size loads */
+void byte_align_scalar(isel_context *ctx, Temp vec, Operand offset, Temp dst)
+{
+ Builder bld(ctx->program, ctx->block);
+ Operand shift;
+ Temp select = Temp();
+ if (offset.isConstant()) {
+ assert(offset.constantValue() && offset.constantValue() < 4);
+ shift = Operand(offset.constantValue() * 8);
+ } else {
+ /* bit_offset = 8 * (offset & 0x3) */
+ Temp tmp = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), offset, Operand(3u));
+ select = bld.tmp(s1);
+ shift = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.scc(Definition(select)), tmp, Operand(3u));
+ }
+
+ if (vec.size() == 1) {
+ bld.sop2(aco_opcode::s_lshr_b32, Definition(dst), bld.def(s1, scc), vec, shift);
+ } else if (vec.size() == 2) {
+ Temp tmp = dst.size() == 2 ? dst : bld.tmp(s2);
+ bld.sop2(aco_opcode::s_lshr_b64, Definition(tmp), bld.def(s1, scc), vec, shift);
+ if (tmp == dst)
+ emit_split_vector(ctx, dst, 2);
+ else
+ emit_extract_vector(ctx, tmp, 0, dst);
+ } else if (vec.size() == 4) {
+ Temp lo = bld.tmp(s2), hi = bld.tmp(s2);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
+ hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
+ if (select != Temp())
+ hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), bld.scc(select));
+ lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
+ Temp mid = bld.tmp(s1);
+ lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
+ hi = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), hi, shift);
+ mid = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), hi, mid);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, mid);
+ emit_split_vector(ctx, dst, 2);
+ }
+}
+
+void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
+{
+ Builder bld(ctx->program, ctx->block);
+ if (offset.isTemp()) {
+ Temp tmp[4] = {vec, vec, vec, vec};
+
+ if (vec.size() == 4) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), Definition(tmp[3]), vec);
+ } else if (vec.size() == 3) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
+ } else if (vec.size() == 2) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
+ }
+ for (unsigned i = 0; i < dst.size(); i++)
+ tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
+
+ vec = tmp[0];
+ if (dst.size() == 2)
+ vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
+
+ offset = Operand(0u);
+ }
+
+ unsigned num_components = vec.bytes() / component_size;
+ if (vec.regClass() == dst.regClass()) {
+ assert(offset.constantValue() == 0);
+ bld.copy(Definition(dst), vec);
+ emit_split_vector(ctx, dst, num_components);
+ return;
+ }
+
+ emit_split_vector(ctx, vec, num_components);
+ std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
+ RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
+
+ assert(offset.constantValue() % component_size == 0);
+ unsigned skip = offset.constantValue() / component_size;
+ for (unsigned i = skip; i < num_components; i++)
+ elems[i - skip] = emit_extract_vector(ctx, vec, i, rc);
+
+ /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
+ if (dst.type() == RegType::vgpr) {
+ num_components = dst.bytes() / component_size;
+ aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
+ for (unsigned i = 0; i < num_components; i++)
+ create_vec->operands[i] = Operand(elems[i]);
+ create_vec->definitions[0] = Definition(dst);
+ bld.insert(std::move(create_vec));
+
+ /* if dst is sgpr - split the src, but move the original to sgpr. */
+ } else if (skip) {
+ vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
+ byte_align_scalar(ctx, vec, offset, dst);
+ } else {
+ assert(dst.size() == vec.size());
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
+ }
+
+ ctx->allocated_vec.emplace(dst.id(), elems);
+}
+
Temp bool_to_vector_condition(isel_context *ctx, Temp val, Temp dst = Temp(0, s2))
{
Builder bld(ctx->program, ctx->block);
return emit_wqm(ctx, tmp, dst);
}
+Temp convert_int(isel_context *ctx, Builder& bld, Temp src, unsigned src_bits, unsigned dst_bits, bool is_signed, Temp dst=Temp())
+{
+ if (!dst.id()) {
+ if (dst_bits % 32 == 0 || src.type() == RegType::sgpr)
+ dst = bld.tmp(src.type(), DIV_ROUND_UP(dst_bits, 32u));
+ else
+ dst = bld.tmp(RegClass(RegType::vgpr, dst_bits / 8u).as_subdword());
+ }
+
+ if (dst.bytes() == src.bytes() && dst_bits < src_bits)
+ return bld.copy(Definition(dst), src);
+ else if (dst.bytes() < src.bytes())
+ return bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u));
+
+ Temp tmp = dst;
+ if (dst_bits == 64)
+ tmp = src_bits == 32 ? src : bld.tmp(src.type(), 1);
+
+ if (tmp == src) {
+ } else if (src.regClass() == s1) {
+ if (is_signed)
+ bld.sop1(src_bits == 8 ? aco_opcode::s_sext_i32_i8 : aco_opcode::s_sext_i32_i16, Definition(tmp), src);
+ else
+ bld.sop2(aco_opcode::s_and_b32, Definition(tmp), bld.def(s1, scc), Operand(src_bits == 8 ? 0xFFu : 0xFFFFu), src);
+ } else if (ctx->options->chip_class >= GFX8) {
+ assert(src_bits != 8 || src.regClass() == v1b);
+ assert(src_bits != 16 || src.regClass() == v2b);
+ aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
+ sdwa->operands[0] = Operand(src);
+ sdwa->definitions[0] = Definition(tmp);
+ if (is_signed)
+ sdwa->sel[0] = src_bits == 8 ? sdwa_sbyte : sdwa_sword;
+ else
+ sdwa->sel[0] = src_bits == 8 ? sdwa_ubyte : sdwa_uword;
+ sdwa->dst_sel = tmp.bytes() == 2 ? sdwa_uword : sdwa_udword;
+ bld.insert(std::move(sdwa));
+ } else {
+ assert(ctx->options->chip_class == GFX6 || ctx->options->chip_class == GFX7);
+ aco_opcode opcode = is_signed ? aco_opcode::v_bfe_i32 : aco_opcode::v_bfe_u32;
+ bld.vop3(opcode, Definition(tmp), src, Operand(0u), Operand(src_bits == 8 ? 8u : 16u));
+ }
+
+ if (dst_bits == 64) {
+ if (is_signed && dst.regClass() == s2) {
+ Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), tmp, Operand(31u));
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
+ } else if (is_signed && dst.regClass() == v2) {
+ Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), tmp);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, high);
+ } else {
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u));
+ }
+ }
+
+ return dst;
+}
+
+enum sgpr_extract_mode {
+ sgpr_extract_sext,
+ sgpr_extract_zext,
+ sgpr_extract_undef,
+};
+
+Temp extract_8_16_bit_sgpr_element(isel_context *ctx, Temp dst, nir_alu_src *src, sgpr_extract_mode mode)
+{
+ Temp vec = get_ssa_temp(ctx, src->src.ssa);
+ unsigned src_size = src->src.ssa->bit_size;
+ unsigned swizzle = src->swizzle[0];
+
+ if (vec.size() > 1) {
+ assert(src_size == 16);
+ vec = emit_extract_vector(ctx, vec, swizzle / 2, s1);
+ swizzle = swizzle & 1;
+ }
+
+ Builder bld(ctx->program, ctx->block);
+ unsigned offset = src_size * swizzle;
+ Temp tmp = dst.regClass() == s2 ? bld.tmp(s1) : dst;
+
+ if (mode == sgpr_extract_undef && swizzle == 0) {
+ bld.copy(Definition(tmp), vec);
+ } else if (mode == sgpr_extract_undef || (offset == 24 && mode == sgpr_extract_zext)) {
+ bld.sop2(aco_opcode::s_lshr_b32, Definition(tmp), bld.def(s1, scc), vec, Operand(offset));
+ } else if (src_size == 8 && swizzle == 0 && mode == sgpr_extract_sext) {
+ bld.sop1(aco_opcode::s_sext_i32_i8, Definition(tmp), vec);
+ } else if (src_size == 16 && swizzle == 0 && mode == sgpr_extract_sext) {
+ bld.sop1(aco_opcode::s_sext_i32_i16, Definition(tmp), vec);
+ } else {
+ aco_opcode op = mode == sgpr_extract_zext ? aco_opcode::s_bfe_u32 : aco_opcode::s_bfe_i32;
+ bld.sop2(op, Definition(tmp), bld.def(s1, scc), vec, Operand((src_size << 16) | offset));
+ }
+
+ if (dst.regClass() == s2)
+ convert_int(ctx, bld, tmp, 32, 64, mode == sgpr_extract_sext, dst);
+
+ return dst;
+}
+
Temp get_alu_src(struct isel_context *ctx, nir_alu_src src, unsigned size=1)
{
if (src.src.ssa->num_components == 1 && src.swizzle[0] == 0 && size == 1)
}
Temp vec = get_ssa_temp(ctx, src.src.ssa);
- unsigned elem_size = vec.size() / src.src.ssa->num_components;
- assert(elem_size > 0); /* TODO: 8 and 16-bit vectors not supported */
- assert(vec.size() % elem_size == 0);
+ unsigned elem_size = vec.bytes() / src.src.ssa->num_components;
+ assert(elem_size > 0);
+ assert(vec.bytes() % elem_size == 0);
- RegClass elem_rc = RegClass(vec.type(), elem_size);
+ if (elem_size < 4 && vec.type() == RegType::sgpr) {
+ assert(src.src.ssa->bit_size == 8 || src.src.ssa->bit_size == 16);
+ assert(size == 1);
+ return extract_8_16_bit_sgpr_element(
+ ctx, Temp(ctx->program->allocateId(), s1), &src, sgpr_extract_undef);
+ }
+
+ RegClass elem_rc = elem_size < 4 ? RegClass(vec.type(), elem_size).as_subdword() : RegClass(vec.type(), elem_size / 4);
if (size == 1) {
return emit_extract_vector(ctx, vec, src.swizzle[0], elem_rc);
} else {
elems[i] = emit_extract_vector(ctx, vec, src.swizzle[i], elem_rc);
vec_instr->operands[i] = Operand{elems[i]};
}
- Temp dst{ctx->program->allocateId(), RegClass(vec.type(), elem_size * size)};
+ Temp dst{ctx->program->allocateId(), RegClass(vec.type(), elem_size * size / 4)};
vec_instr->definitions[0] = Definition(dst);
ctx->block->instructions.emplace_back(std::move(vec_instr));
ctx->allocated_vec.emplace(dst.id(), elems);
sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
sop2->definitions[0] = Definition(dst);
+ if (instr->no_unsigned_wrap)
+ sop2->definitions[0].setNUW(true);
if (writes_scc)
sop2->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
ctx->block->instructions.emplace_back(std::move(sop2));
bool commutative, bool swap_srcs=false, bool flush_denorms = false)
{
Builder bld(ctx->program, ctx->block);
+ bld.is_precise = instr->exact;
+
Temp src0 = get_alu_src(ctx, instr->src[swap_srcs ? 1 : 0]);
Temp src1 = get_alu_src(ctx, instr->src[swap_srcs ? 0 : 1]);
if (src1.type() == RegType::sgpr) {
Temp t = src0;
src0 = src1;
src1 = t;
- } else if (src0.type() == RegType::vgpr &&
- op != aco_opcode::v_madmk_f32 &&
- op != aco_opcode::v_madak_f32 &&
- op != aco_opcode::v_madmk_f16 &&
- op != aco_opcode::v_madak_f16) {
- /* If the instruction is not commutative, we emit a VOP3A instruction */
- bld.vop2_e64(op, Definition(dst), src0, src1);
- return;
} else {
- src1 = bld.copy(bld.def(RegType::vgpr, src1.size()), src1); //TODO: as_vgpr
+ src1 = as_vgpr(ctx, src1);
}
}
}
}
+void emit_vop2_instruction_logic64(isel_context *ctx, nir_alu_instr *instr,
+ aco_opcode op, Temp dst)
+{
+ Builder bld(ctx->program, ctx->block);
+ bld.is_precise = instr->exact;
+
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = get_alu_src(ctx, instr->src[1]);
+
+ if (src1.type() == RegType::sgpr) {
+ assert(src0.type() == RegType::vgpr);
+ std::swap(src0, src1);
+ }
+
+ Temp src00 = bld.tmp(src0.type(), 1);
+ Temp src01 = bld.tmp(src0.type(), 1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
+ Temp src10 = bld.tmp(v1);
+ Temp src11 = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
+ Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
+ Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
+}
+
void emit_vop3a_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
bool flush_denorms = false)
{
src2 = as_vgpr(ctx, src2);
Builder bld(ctx->program, ctx->block);
+ bld.is_precise = instr->exact;
if (flush_denorms && ctx->program->chip_class < GFX9) {
assert(dst.size() == 1);
Temp tmp = bld.vop3(op, Definition(dst), src0, src1, src2);
void emit_vop1_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
{
Builder bld(ctx->program, ctx->block);
- bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
+ bld.is_precise = instr->exact;
+ if (dst.type() == RegType::sgpr)
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
+ bld.vop1(op, bld.def(RegType::vgpr, dst.size()), get_alu_src(ctx, instr->src[0])));
+ else
+ bld.vop1(op, Definition(dst), get_alu_src(ctx, instr->src[0]));
}
void emit_vopc_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst)
if (src0.type() == RegType::vgpr) {
/* to swap the operands, we might also have to change the opcode */
switch (op) {
+ case aco_opcode::v_cmp_lt_f16:
+ op = aco_opcode::v_cmp_gt_f16;
+ break;
+ case aco_opcode::v_cmp_ge_f16:
+ op = aco_opcode::v_cmp_le_f16;
+ break;
+ case aco_opcode::v_cmp_lt_i16:
+ op = aco_opcode::v_cmp_gt_i16;
+ break;
+ case aco_opcode::v_cmp_ge_i16:
+ op = aco_opcode::v_cmp_le_i16;
+ break;
+ case aco_opcode::v_cmp_lt_u16:
+ op = aco_opcode::v_cmp_gt_u16;
+ break;
+ case aco_opcode::v_cmp_ge_u16:
+ op = aco_opcode::v_cmp_le_u16;
+ break;
case aco_opcode::v_cmp_lt_f32:
op = aco_opcode::v_cmp_gt_f32;
break;
}
void emit_comparison(isel_context *ctx, nir_alu_instr *instr, Temp dst,
- aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes)
+ aco_opcode v16_op, aco_opcode v32_op, aco_opcode v64_op, aco_opcode s32_op = aco_opcode::num_opcodes, aco_opcode s64_op = aco_opcode::num_opcodes)
{
- aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : s32_op;
- aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : v32_op;
- bool divergent_vals = ctx->divergent_vals[instr->dest.dest.ssa.index];
+ aco_opcode s_op = instr->src[0].src.ssa->bit_size == 64 ? s64_op : instr->src[0].src.ssa->bit_size == 32 ? s32_op : aco_opcode::num_opcodes;
+ aco_opcode v_op = instr->src[0].src.ssa->bit_size == 64 ? v64_op : instr->src[0].src.ssa->bit_size == 32 ? v32_op : v16_op;
bool use_valu = s_op == aco_opcode::num_opcodes ||
- divergent_vals ||
+ nir_dest_is_divergent(instr->dest.dest) ||
ctx->allocated[instr->src[0].src.ssa->index].type() == RegType::vgpr ||
ctx->allocated[instr->src[1].src.ssa->index].type() == RegType::vgpr;
aco_opcode op = use_valu ? v_op : s_op;
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
return;
}
assert(els.regClass() == bld.lm);
}
- if (!ctx->divergent_vals[instr->src[0].src.ssa->index]) { /* uniform condition and values in sgpr */
+ if (!nir_src_is_divergent(instr->src[0].src)) { /* uniform condition and values in sgpr */
if (dst.regClass() == s1 || dst.regClass() == s2) {
assert((then.regClass() == s1 || then.regClass() == s2) && els.regClass() == then.regClass());
assert(dst.size() == then.size());
aco_opcode op = dst.regClass() == s1 ? aco_opcode::s_cselect_b32 : aco_opcode::s_cselect_b64;
bld.sop2(op, Definition(dst), then, els, bld.scc(bool_to_scalar_condition(ctx, cond)));
} else {
- fprintf(stderr, "Unimplemented uniform bcsel bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented uniform bcsel bit size");
}
return;
}
bld.pseudo(aco_opcode::p_split_vector, Definition(val_lo), Definition(val_hi), val);
/* Extract the exponent and compute the unbiased value. */
- Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f64, bld.def(v1), val);
+ Temp exponent = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), val_hi, Operand(20u), Operand(11u));
+ exponent = bld.vsub32(bld.def(v1), exponent, Operand(1023u));
/* Extract the fractional part. */
Temp fract_mask = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), Operand(-1u), Operand(0x000fffffu));
fract_hi = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), val_hi, tmp);
/* Get the sign bit. */
- Temp sign = bld.vop2(aco_opcode::v_ashr_i32, bld.def(v1), Operand(31u), val_hi);
+ Temp sign = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x80000000u), val_hi);
/* Decide the operation to apply depending on the unbiased exponent. */
Temp exp_lt0 = bld.vopc_e64(aco_opcode::v_cmp_lt_i32, bld.hint_vcc(bld.def(bld.lm)), exponent, Operand(0u));
if (ctx->options->chip_class >= GFX7)
return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
- /* GFX6 doesn't support V_FLOOR_F64, lower it. */
+ /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
+ * lowered at NIR level for precision reasons). */
Temp src0 = as_vgpr(ctx, val);
Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr)
{
if (!instr->dest.dest.is_ssa) {
- fprintf(stderr, "nir alu dst not in ssa: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "nir alu dst not in ssa");
abort();
}
Builder bld(ctx->program, ctx->block);
+ bld.is_precise = instr->exact;
Temp dst = get_ssa_temp(ctx, &instr->dest.dest.ssa);
switch(instr->op) {
case nir_op_vec2:
case nir_op_vec3:
case nir_op_vec4: {
std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
- for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i) {
+ unsigned num = instr->dest.dest.ssa.num_components;
+ for (unsigned i = 0; i < num; ++i)
elems[i] = get_alu_src(ctx, instr->src[i]);
- vec->operands[i] = Operand{elems[i]};
+
+ if (instr->dest.dest.ssa.bit_size >= 32 || dst.type() == RegType::vgpr) {
+ aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, instr->dest.dest.ssa.num_components, 1)};
+ RegClass elem_rc = RegClass::get(RegType::vgpr, instr->dest.dest.ssa.bit_size / 8u);
+ for (unsigned i = 0; i < num; ++i) {
+ if (elems[i].type() == RegType::sgpr && elem_rc.is_subdword())
+ vec->operands[i] = Operand(emit_extract_vector(ctx, elems[i], 0, elem_rc));
+ else
+ vec->operands[i] = Operand{elems[i]};
+ }
+ vec->definitions[0] = Definition(dst);
+ ctx->block->instructions.emplace_back(std::move(vec));
+ ctx->allocated_vec.emplace(dst.id(), elems);
+ } else {
+ // TODO: that is a bit suboptimal..
+ Temp mask = bld.copy(bld.def(s1), Operand((1u << instr->dest.dest.ssa.bit_size) - 1));
+ for (unsigned i = 0; i < num - 1; ++i)
+ if (((i+1) * instr->dest.dest.ssa.bit_size) % 32)
+ elems[i] = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), elems[i], mask);
+ for (unsigned i = 0; i < num; ++i) {
+ unsigned bit = i * instr->dest.dest.ssa.bit_size;
+ if (bit % 32 == 0) {
+ elems[bit / 32] = elems[i];
+ } else {
+ elems[i] = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc),
+ elems[i], Operand((i * instr->dest.dest.ssa.bit_size) % 32));
+ elems[bit / 32] = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), elems[bit / 32], elems[i]);
+ }
+ }
+ if (dst.size() == 1)
+ bld.copy(Definition(dst), elems[0]);
+ else
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), elems[0], elems[1]);
}
- vec->definitions[0] = Definition(dst);
- ctx->block->instructions.emplace_back(std::move(vec));
- ctx->allocated_vec.emplace(dst.id(), elems);
break;
}
case nir_op_mov: {
bld.sop1(aco_opcode::s_mov_b64, Definition(dst), src);
else
unreachable("wrong src register class for nir_op_imov");
- } else if (dst.regClass() == v1) {
- bld.vop1(aco_opcode::v_mov_b32, Definition(dst), src);
- } else if (dst.regClass() == v2) {
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
} else {
- nir_print_instr(&instr->instr, stderr);
- unreachable("Should have been lowered to scalar.");
+ if (dst.regClass() == v1)
+ bld.vop1(aco_opcode::v_mov_b32, Definition(dst), src);
+ else if (dst.regClass() == v1b ||
+ dst.regClass() == v2b ||
+ dst.regClass() == v2)
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
+ else
+ unreachable("wrong src register class for nir_op_imov");
}
break;
}
bld.sop2(Builder::s_and, Definition(dst), bld.def(s1, scc), tmp, Operand(exec, bld.lm));
} else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
+ } else if (dst.regClass() == v2) {
+ Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
+ lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
+ hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
} else if (dst.type() == RegType::sgpr) {
aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
Temp src = get_alu_src(ctx, instr->src[0]);
bld.vop2(aco_opcode::v_max_i32, Definition(dst), src, bld.vsub32(bld.def(v1), Operand(0u), src));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_isign: {
Temp src = get_alu_src(ctx, instr->src[0]);
if (dst.regClass() == s1) {
- Temp tmp = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
- Temp gtz = bld.sopc(aco_opcode::s_cmp_gt_i32, bld.def(s1, scc), src, Operand(0u));
- bld.sop2(aco_opcode::s_add_i32, Definition(dst), bld.def(s1, scc), gtz, tmp);
+ Temp tmp = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), src, Operand((uint32_t)-1));
+ bld.sop2(aco_opcode::s_min_i32, Definition(dst), bld.def(s1, scc), tmp, Operand(1u));
} else if (dst.regClass() == s2) {
Temp neg = bld.sop2(aco_opcode::s_ashr_i64, bld.def(s2), bld.def(s1, scc), src, Operand(63u));
Temp neqz;
/* SCC gets zero-extended to 64 bit */
bld.sop2(aco_opcode::s_or_b64, Definition(dst), bld.def(s1, scc), neg, bld.scc(neqz));
} else if (dst.regClass() == v1) {
- Temp tmp = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
- Temp gtz = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
- bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(1u), tmp, gtz);
+ bld.vop3(aco_opcode::v_med3_i32, Definition(dst), Operand((uint32_t)-1), src, Operand(1u));
} else if (dst.regClass() == v2) {
Temp upper = emit_extract_vector(ctx, src, 1, v1);
Temp neg = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), upper);
upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), neg, gtz);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_max_i32, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_max_u32, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_min_i32, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_min_u32, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
emit_boolean_logic(ctx, instr, Builder::s_or, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
} else if (dst.regClass() == s2) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b64, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
emit_boolean_logic(ctx, instr, Builder::s_and, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
} else if (dst.regClass() == s2) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b64, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
} else if (dst.regClass() == s2) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b64, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_lshr_b32, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s2) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_lshl_b64, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s2) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_ashr_i64, dst, true);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (src.regClass() == s2) {
bld.sop1(aco_opcode::s_ff1_i32_b64, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
Temp carry = bld.vsub32(Definition(msb), Operand(31u), Operand(msb_rev), true).def(1).getTemp();
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), msb, Operand((uint32_t)-1), carry);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == v1) {
bld.vop1(aco_opcode::v_bfrev_b32, Definition(dst), get_alu_src(ctx, instr->src[0]));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
Temp dst1 = bld.vadd32(bld.def(v1), src01, src11, false, carry);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), tmp, Operand((uint32_t) -1), carry);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
carry = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), carry);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), carry, Operand(0u));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
Temp upper = bld.vsub32(bld.def(v1), src01, src11, false, borrow);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
borrow = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand(1u), borrow);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), borrow, Operand(0u));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_mul_i32, dst, false);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fmul: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
+ if (dst.regClass() == v2b) {
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f16, dst, true);
+ } else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_mul_f32, dst, true);
- } else if (dst.size() == 2) {
- bld.vop3(aco_opcode::v_mul_f64, Definition(dst), get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ } else if (dst.regClass() == v2) {
+ bld.vop3(aco_opcode::v_mul_f64, Definition(dst), src0, src1);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fadd: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
+ if (dst.regClass() == v2b) {
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f16, dst, true);
+ } else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_add_f32, dst, true);
- } else if (dst.size() == 2) {
- bld.vop3(aco_opcode::v_add_f64, Definition(dst), get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ } else if (dst.regClass() == v2) {
+ bld.vop3(aco_opcode::v_add_f64, Definition(dst), src0, src1);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fsub: {
Temp src0 = get_alu_src(ctx, instr->src[0]);
Temp src1 = get_alu_src(ctx, instr->src[1]);
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f16, dst, false);
+ else
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f16, dst, true);
+ } else if (dst.regClass() == v1) {
if (src1.type() == RegType::vgpr || src0.type() != RegType::vgpr)
emit_vop2_instruction(ctx, instr, aco_opcode::v_sub_f32, dst, false);
else
emit_vop2_instruction(ctx, instr, aco_opcode::v_subrev_f32, dst, true);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst),
- get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ as_vgpr(ctx, src0), as_vgpr(ctx, src1));
VOP3A_instruction* sub = static_cast<VOP3A_instruction*>(add);
sub->neg[1] = true;
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fmax: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
+ if (dst.regClass() == v2b) {
+ // TODO: check fp_mode.must_flush_denorms16_64
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f16, dst, true);
+ } else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_max_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
- Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2),
- get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ Temp tmp = bld.vop3(aco_opcode::v_max_f64, bld.def(v2), src0, src1);
bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
} else {
- bld.vop3(aco_opcode::v_max_f64, Definition(dst),
- get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ bld.vop3(aco_opcode::v_max_f64, Definition(dst), src0, src1);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fmin: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = as_vgpr(ctx, get_alu_src(ctx, instr->src[1]));
+ if (dst.regClass() == v2b) {
+ // TODO: check fp_mode.must_flush_denorms16_64
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f16, dst, true);
+ } else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_min_f32, dst, true, false, ctx->block->fp_mode.must_flush_denorms32);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->block->fp_mode.must_flush_denorms16_64 && ctx->program->chip_class < GFX9) {
- Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2),
- get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ Temp tmp = bld.vop3(aco_opcode::v_min_f64, bld.def(v2), src0, src1);
bld.vop3(aco_opcode::v_mul_f64, Definition(dst), Operand(0x3FF0000000000000lu), tmp);
} else {
- bld.vop3(aco_opcode::v_min_f64, Definition(dst),
- get_alu_src(ctx, instr->src[0]),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[1])));
+ bld.vop3(aco_opcode::v_min_f64, Definition(dst), src0, src1);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_fmax3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_fmin3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_fmed3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_f32, dst, ctx->block->fp_mode.must_flush_denorms32);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_umax3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_u32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_umin3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_u32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_umed3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_u32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_imax3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_max3_i32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_imin3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_min3_i32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_imed3: {
- if (dst.size() == 1) {
- emit_vop3a_instruction(ctx, instr, aco_opcode::v_med3_i32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
ma = bld.vop1(aco_opcode::v_rcp_f32, bld.def(v1), ma);
Temp sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), src[0], src[1], src[2]);
Temp tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), src[0], src[1], src[2]);
- sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, ma, Operand(0x3f000000u/*0.5*/));
- tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, ma, Operand(0x3f000000u/*0.5*/));
+ sc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1),
+ bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), sc, ma), Operand(0x3f000000u/*0.5*/));
+ tc = bld.vop2(aco_opcode::v_add_f32, bld.def(v1),
+ bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), tc, ma), Operand(0x3f000000u/*0.5*/));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), sc, tc);
break;
}
break;
}
case nir_op_frsq: {
- if (dst.size() == 1) {
- emit_rsq(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
- } else if (dst.size() == 2) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f16, dst);
+ } else if (dst.regClass() == v1) {
+ emit_rsq(ctx, bld, Definition(dst), src);
+ } else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fneg: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ if (ctx->block->fp_mode.must_flush_denorms16_64)
+ src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
+ bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x8000u), as_vgpr(ctx, src));
+ } else if (dst.regClass() == v1) {
if (ctx->block->fp_mode.must_flush_denorms32)
src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x80000000u), as_vgpr(ctx, src));
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->block->fp_mode.must_flush_denorms16_64)
src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
upper = bld.vop2(aco_opcode::v_xor_b32, bld.def(v1), Operand(0x80000000u), upper);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fabs: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ if (ctx->block->fp_mode.must_flush_denorms16_64)
+ src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
+ bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFu), as_vgpr(ctx, src));
+ } else if (dst.regClass() == v1) {
if (ctx->block->fp_mode.must_flush_denorms32)
src = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), Operand(0x3f800000u), as_vgpr(ctx, src));
bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFFFFFu), as_vgpr(ctx, src));
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->block->fp_mode.must_flush_denorms16_64)
src = bld.vop3(aco_opcode::v_mul_f64, bld.def(v2), Operand(0x3FF0000000000000lu), as_vgpr(ctx, src));
Temp upper = bld.tmp(v1), lower = bld.tmp(v1);
upper = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7FFFFFFFu), upper);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fsat: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ bld.vop3(aco_opcode::v_med3_f16, Definition(dst), Operand((uint16_t)0u), Operand((uint16_t)0x3c00), src);
+ } else if (dst.regClass() == v1) {
bld.vop3(aco_opcode::v_med3_f32, Definition(dst), Operand(0u), Operand(0x3f800000u), src);
/* apparently, it is not necessary to flush denorms if this instruction is used with these operands */
// TODO: confirm that this holds under any circumstances
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
Instruction* add = bld.vop3(aco_opcode::v_add_f64, Definition(dst), src, Operand(0u));
VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(add);
vop3->clamp = true;
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_flog2: {
- if (dst.size() == 1) {
- emit_log2(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_log_f16, dst);
+ } else if (dst.regClass() == v1) {
+ emit_log2(ctx, bld, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_frcp: {
- if (dst.size() == 1) {
- emit_rcp(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
- } else if (dst.size() == 2) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f16, dst);
+ } else if (dst.regClass() == v1) {
+ emit_rcp(ctx, bld, Definition(dst), src);
+ } else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fexp2: {
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_exp_f32, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fsqrt: {
- if (dst.size() == 1) {
- emit_sqrt(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
- } else if (dst.size() == 2) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f16, dst);
+ } else if (dst.regClass() == v1) {
+ emit_sqrt(ctx, bld, Definition(dst), src);
+ } else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_ffract: {
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f32, dst);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_fract_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_ffloor: {
- if (dst.size() == 1) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_floor_f32, dst);
- } else if (dst.size() == 2) {
- emit_floor_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
+ } else if (dst.regClass() == v2) {
+ emit_floor_f64(ctx, bld, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fceil: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f32, dst);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->options->chip_class >= GFX7) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_ceil_f64, dst);
} else {
/* GFX6 doesn't support V_CEIL_F64, lower it. */
- Temp src0 = get_alu_src(ctx, instr->src[0]);
-
/* trunc = trunc(src0)
* if (src0 > 0.0 && src0 != trunc)
* trunc += 1.0
bld.vop3(aco_opcode::v_add_f64, Definition(dst), trunc, add);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_ftrunc: {
- if (dst.size() == 1) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_trunc_f32, dst);
- } else if (dst.size() == 2) {
- emit_trunc_f64(ctx, bld, Definition(dst), get_alu_src(ctx, instr->src[0]));
+ } else if (dst.regClass() == v2) {
+ emit_trunc_f64(ctx, bld, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fround_even: {
- if (dst.size() == 1) {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f16, dst);
+ } else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f32, dst);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
if (ctx->options->chip_class >= GFX7) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_rndne_f64, dst);
} else {
/* GFX6 doesn't support V_RNDNE_F64, lower it. */
- Temp src0 = get_alu_src(ctx, instr->src[0]);
-
Temp src0_lo = bld.tmp(v1), src0_hi = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(src0_lo), Definition(src0_hi), src0);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), dst0, dst1);
}
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fsin:
case nir_op_fcos: {
- Temp src = get_alu_src(ctx, instr->src[0]);
+ Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
aco_ptr<Instruction> norm;
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ Temp half_pi = bld.copy(bld.def(s1), Operand(0x3118u));
+ Temp tmp = bld.vop2(aco_opcode::v_mul_f16, bld.def(v1), half_pi, src);
+ aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f16 : aco_opcode::v_cos_f16;
+ bld.vop1(opcode, Definition(dst), tmp);
+ } else if (dst.regClass() == v1) {
Temp half_pi = bld.copy(bld.def(s1), Operand(0x3e22f983u));
- Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, as_vgpr(ctx, src));
+ Temp tmp = bld.vop2(aco_opcode::v_mul_f32, bld.def(v1), half_pi, src);
/* before GFX9, v_sin_f32 and v_cos_f32 had a valid input domain of [-256, +256] */
if (ctx->options->chip_class < GFX9)
aco_opcode opcode = instr->op == nir_op_fsin ? aco_opcode::v_sin_f32 : aco_opcode::v_cos_f32;
bld.vop1(opcode, Definition(dst), tmp);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_ldexp: {
- if (dst.size() == 1) {
- bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[0])),
- get_alu_src(ctx, instr->src[1]));
- } else if (dst.size() == 2) {
- bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst),
- as_vgpr(ctx, get_alu_src(ctx, instr->src[0])),
- get_alu_src(ctx, instr->src[1]));
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = get_alu_src(ctx, instr->src[1]);
+ if (dst.regClass() == v2b) {
+ emit_vop2_instruction(ctx, instr, aco_opcode::v_ldexp_f16, dst, false);
+ } else if (dst.regClass() == v1) {
+ bld.vop3(aco_opcode::v_ldexp_f32, Definition(dst), as_vgpr(ctx, src0), src1);
+ } else if (dst.regClass() == v2) {
+ bld.vop3(aco_opcode::v_ldexp_f64, Definition(dst), as_vgpr(ctx, src0), src1);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_frexp_sig: {
- if (dst.size() == 1) {
- bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst),
- get_alu_src(ctx, instr->src[0]));
- } else if (dst.size() == 2) {
- bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst),
- get_alu_src(ctx, instr->src[0]));
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (dst.regClass() == v2b) {
+ bld.vop1(aco_opcode::v_frexp_mant_f16, Definition(dst), src);
+ } else if (dst.regClass() == v1) {
+ bld.vop1(aco_opcode::v_frexp_mant_f32, Definition(dst), src);
+ } else if (dst.regClass() == v2) {
+ bld.vop1(aco_opcode::v_frexp_mant_f64, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_frexp_exp: {
- if (instr->src[0].src.ssa->bit_size == 32) {
- bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst),
- get_alu_src(ctx, instr->src[0]));
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 16) {
+ Temp tmp = bld.vop1(aco_opcode::v_frexp_exp_i16_f16, bld.def(v1), src);
+ tmp = bld.pseudo(aco_opcode::p_extract_vector, bld.def(v1b), tmp, Operand(0u));
+ convert_int(ctx, bld, tmp, 8, 32, true, dst);
+ } else if (instr->src[0].src.ssa->bit_size == 32) {
+ bld.vop1(aco_opcode::v_frexp_exp_i32_f32, Definition(dst), src);
} else if (instr->src[0].src.ssa->bit_size == 64) {
- bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst),
- get_alu_src(ctx, instr->src[0]));
+ bld.vop1(aco_opcode::v_frexp_exp_i32_f64, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_fsign: {
Temp src = as_vgpr(ctx, get_alu_src(ctx, instr->src[0]));
- if (dst.size() == 1) {
+ if (dst.regClass() == v2b) {
+ Temp one = bld.copy(bld.def(v1), Operand(0x3c00u));
+ Temp minus_one = bld.copy(bld.def(v1), Operand(0xbc00u));
+ Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
+ src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), one, src, cond);
+ cond = bld.vopc(aco_opcode::v_cmp_le_f16, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
+ bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), minus_one, src, cond);
+ } else if (dst.regClass() == v1) {
Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
src = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0x3f800000u), src, cond);
cond = bld.vopc(aco_opcode::v_cmp_le_f32, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0xbf800000u), src, cond);
- } else if (dst.size() == 2) {
+ } else if (dst.regClass() == v2) {
Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(bld.lm)), Operand(0u), src);
Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), Operand(0u), upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
+ case nir_op_f2f16:
+ case nir_op_f2f16_rtne: {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 64)
+ src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
+ if (instr->op == nir_op_f2f16_rtne && ctx->block->fp_mode.round16_64 != fp_round_ne)
+ /* We emit s_round_mode/s_setreg_imm32 in lower_to_hw_instr to
+ * keep value numbering and the scheduler simpler.
+ */
+ bld.vop1(aco_opcode::p_cvt_f16_f32_rtne, Definition(dst), src);
+ else
+ bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
+ break;
+ }
+ case nir_op_f2f16_rtz: {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 64)
+ src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
+ bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src, Operand(0u));
+ break;
+ }
case nir_op_f2f32: {
- if (instr->src[0].src.ssa->bit_size == 64) {
+ if (instr->src[0].src.ssa->bit_size == 16) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f16, dst);
+ } else if (instr->src[0].src.ssa->bit_size == 64) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_f2f64: {
- if (instr->src[0].src.ssa->bit_size == 32) {
- emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_f32, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 16)
+ src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
+ bld.vop1(aco_opcode::v_cvt_f64_f32, Definition(dst), src);
+ break;
+ }
+ case nir_op_i2f16: {
+ assert(dst.regClass() == v2b);
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 8)
+ src = convert_int(ctx, bld, src, 8, 16, true);
+ else if (instr->src[0].src.ssa->bit_size == 64)
+ src = convert_int(ctx, bld, src, 64, 32, false);
+ bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
break;
}
case nir_op_i2f32: {
assert(dst.size() == 1);
- emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_i32, dst);
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size <= 16)
+ src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
+ bld.vop1(aco_opcode::v_cvt_f32_i32, Definition(dst), src);
break;
}
case nir_op_i2f64: {
- if (instr->src[0].src.ssa->bit_size == 32) {
- emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_i32, dst);
+ if (instr->src[0].src.ssa->bit_size <= 32) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size <= 16)
+ src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
+ bld.vop1(aco_opcode::v_cvt_f64_i32, Definition(dst), src);
} else if (instr->src[0].src.ssa->bit_size == 64) {
Temp src = get_alu_src(ctx, instr->src[0]);
RegClass rc = RegClass(src.type(), 1);
bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
+ case nir_op_u2f16: {
+ assert(dst.regClass() == v2b);
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 8)
+ src = convert_int(ctx, bld, src, 8, 16, false);
+ else if (instr->src[0].src.ssa->bit_size == 64)
+ src = convert_int(ctx, bld, src, 64, 32, false);
+ bld.vop1(aco_opcode::v_cvt_f16_u16, Definition(dst), src);
+ break;
+ }
case nir_op_u2f32: {
assert(dst.size() == 1);
- emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f32_u32, dst);
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size == 8) {
+ bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src);
+ } else {
+ if (instr->src[0].src.ssa->bit_size == 16)
+ src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, true);
+ bld.vop1(aco_opcode::v_cvt_f32_u32, Definition(dst), src);
+ }
break;
}
case nir_op_u2f64: {
- if (instr->src[0].src.ssa->bit_size == 32) {
- emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_f64_u32, dst);
+ if (instr->src[0].src.ssa->bit_size <= 32) {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ if (instr->src[0].src.ssa->bit_size <= 16)
+ src = convert_int(ctx, bld, src, instr->src[0].src.ssa->bit_size, 32, false);
+ bld.vop1(aco_opcode::v_cvt_f64_u32, Definition(dst), src);
} else if (instr->src[0].src.ssa->bit_size == 64) {
Temp src = get_alu_src(ctx, instr->src[0]);
RegClass rc = RegClass(src.type(), 1);
upper = bld.vop3(aco_opcode::v_ldexp_f64, bld.def(v2), upper, Operand(32u));
bld.vop3(aco_opcode::v_add_f64, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
+ case nir_op_f2i8:
+ case nir_op_f2i16: {
+ if (instr->src[0].src.ssa->bit_size == 16)
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i16_f16, dst);
+ else if (instr->src[0].src.ssa->bit_size == 32)
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
+ else
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
+ break;
+ }
+ case nir_op_f2u8:
+ case nir_op_f2u16: {
+ if (instr->src[0].src.ssa->bit_size == 16)
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u16_f16, dst);
+ else if (instr->src[0].src.ssa->bit_size == 32)
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
+ else
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
+ break;
+ }
case nir_op_f2i32: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32) {
- if (dst.type() == RegType::vgpr)
- bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), src);
- else
+ if (instr->src[0].src.ssa->bit_size == 16) {
+ Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
+ if (dst.type() == RegType::vgpr) {
+ bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(dst), tmp);
+ } else {
bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
- bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), src));
-
+ bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), tmp));
+ }
+ } else if (instr->src[0].src.ssa->bit_size == 32) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f32, dst);
} else if (instr->src[0].src.ssa->bit_size == 64) {
- if (dst.type() == RegType::vgpr)
- bld.vop1(aco_opcode::v_cvt_i32_f64, Definition(dst), src);
- else
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
- bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), src));
-
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_i32_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_f2u32: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32) {
- if (dst.type() == RegType::vgpr)
- bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), src);
- else
+ if (instr->src[0].src.ssa->bit_size == 16) {
+ Temp tmp = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
+ if (dst.type() == RegType::vgpr) {
+ bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(dst), tmp);
+ } else {
bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
- bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), src));
-
+ bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), tmp));
+ }
+ } else if (instr->src[0].src.ssa->bit_size == 32) {
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f32, dst);
} else if (instr->src[0].src.ssa->bit_size == 64) {
- if (dst.type() == RegType::vgpr)
- bld.vop1(aco_opcode::v_cvt_u32_f64, Definition(dst), src);
- else
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst),
- bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), src));
-
+ emit_vop1_instruction(ctx, instr, aco_opcode::v_cvt_u32_f64, dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_f2i64: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) {
+ if (instr->src[0].src.ssa->bit_size == 16)
+ src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
+
+ if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
exponent = bld.vop3(aco_opcode::v_med3_i32, bld.def(v1), Operand(0x0u), exponent, Operand(64u));
Temp mantissa = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0x7fffffu), src);
Temp new_upper = bld.vsub32(bld.def(v1), upper, sign, false, borrow);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), new_lower, new_upper);
- } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) {
+ } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
if (src.type() == RegType::vgpr)
src = bld.as_uniform(src);
Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
- exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
- exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
- exponent = bld.sop2(aco_opcode::s_min_u32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent);
+ exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
+ exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
+ exponent = bld.sop2(aco_opcode::s_min_i32, bld.def(s1), bld.def(s1, scc), Operand(64u), exponent);
Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
Temp sign = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_f2u64: {
Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::vgpr) {
+ if (instr->src[0].src.ssa->bit_size == 16)
+ src = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src);
+
+ if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::vgpr) {
Temp exponent = bld.vop1(aco_opcode::v_frexp_exp_i32_f32, bld.def(v1), src);
Temp exponent_in_range = bld.vopc(aco_opcode::v_cmp_ge_i32, bld.hint_vcc(bld.def(bld.lm)), Operand(64u), exponent);
exponent = bld.vop2(aco_opcode::v_max_i32, bld.def(v1), Operand(0x0u), exponent);
upper = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0xffffffffu), upper, exponent_in_range);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
- } else if (instr->src[0].src.ssa->bit_size == 32 && dst.type() == RegType::sgpr) {
+ } else if (instr->src[0].src.ssa->bit_size <= 32 && dst.type() == RegType::sgpr) {
if (src.type() == RegType::vgpr)
src = bld.as_uniform(src);
Temp exponent = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), src, Operand(0x80017u));
- exponent = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
- exponent = bld.sop2(aco_opcode::s_max_u32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
+ exponent = bld.sop2(aco_opcode::s_sub_i32, bld.def(s1), bld.def(s1, scc), exponent, Operand(126u));
+ exponent = bld.sop2(aco_opcode::s_max_i32, bld.def(s1), bld.def(s1, scc), Operand(0u), exponent);
Temp mantissa = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0x7fffffu), src);
mantissa = bld.sop2(aco_opcode::s_or_b32, bld.def(s1), bld.def(s1, scc), Operand(0x800000u), mantissa);
Temp exponent_small = bld.sop2(aco_opcode::s_sub_u32, bld.def(s1), bld.def(s1, scc), Operand(24u), exponent);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lower, upper);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
+ }
+ break;
+ }
+ case nir_op_b2f16: {
+ Temp src = get_alu_src(ctx, instr->src[0]);
+ assert(src.regClass() == bld.lm);
+
+ if (dst.regClass() == s1) {
+ src = bool_to_scalar_condition(ctx, src);
+ bld.sop2(aco_opcode::s_mul_i32, Definition(dst), Operand(0x3c00u), src);
+ } else if (dst.regClass() == v2b) {
+ Temp one = bld.copy(bld.def(v1), Operand(0x3c00u));
+ bld.vop2(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), one, src);
+ } else {
+ unreachable("Wrong destination register class for nir_op_b2f16.");
}
break;
}
}
break;
}
- case nir_op_i2i32: {
- Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 64) {
- /* we can actually just say dst = src, as it would map the lower register */
- emit_extract_vector(ctx, src, 0, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
- case nir_op_u2u32: {
- Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 16) {
- if (dst.regClass() == s1) {
- bld.sop2(aco_opcode::s_and_b32, Definition(dst), bld.def(s1, scc), Operand(0xFFFFu), src);
- } else {
- // TODO: do better with SDWA
- bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0xFFFFu), src);
- }
- } else if (instr->src[0].src.ssa->bit_size == 64) {
- /* we can actually just say dst = src, as it would map the lower register */
- emit_extract_vector(ctx, src, 0, dst);
- } else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
- }
- break;
- }
+ case nir_op_i2i8:
+ case nir_op_i2i16:
+ case nir_op_i2i32:
case nir_op_i2i64: {
- Temp src = get_alu_src(ctx, instr->src[0]);
- if (src.regClass() == s1) {
- Temp high = bld.sop2(aco_opcode::s_ashr_i32, bld.def(s1), bld.def(s1, scc), src, Operand(31u));
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
- } else if (src.regClass() == v1) {
- Temp high = bld.vop2(aco_opcode::v_ashrrev_i32, bld.def(v1), Operand(31u), src);
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, high);
+ if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
+ /* no need to do the extract in get_alu_src() */
+ sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size ?
+ sgpr_extract_sext : sgpr_extract_undef;
+ extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]),
+ instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, true, dst);
}
break;
}
+ case nir_op_u2u8:
+ case nir_op_u2u16:
+ case nir_op_u2u32:
case nir_op_u2u64: {
- Temp src = get_alu_src(ctx, instr->src[0]);
- if (instr->src[0].src.ssa->bit_size == 32) {
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src, Operand(0u));
+ if (dst.type() == RegType::sgpr && instr->src[0].src.ssa->bit_size < 32) {
+ /* no need to do the extract in get_alu_src() */
+ sgpr_extract_mode mode = instr->dest.dest.ssa.bit_size > instr->src[0].src.ssa->bit_size ?
+ sgpr_extract_zext : sgpr_extract_undef;
+ extract_8_16_bit_sgpr_element(ctx, dst, &instr->src[0], mode);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ convert_int(ctx, bld, get_alu_src(ctx, instr->src[0]),
+ instr->src[0].src.ssa->bit_size, instr->dest.dest.ssa.bit_size, false, dst);
}
break;
}
- case nir_op_b2i32: {
+ case nir_op_b2b32:
+ case nir_op_b2i8:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
+ case nir_op_b2i64: {
Temp src = get_alu_src(ctx, instr->src[0]);
assert(src.regClass() == bld.lm);
- if (dst.regClass() == s1) {
+ Temp tmp = dst.bytes() == 8 ? bld.tmp(RegClass::get(dst.type(), 4)) : dst;
+ if (tmp.regClass() == s1) {
// TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ
- bool_to_scalar_condition(ctx, src, dst);
- } else if (dst.regClass() == v1) {
- bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), src);
+ bool_to_scalar_condition(ctx, src, tmp);
+ } else if (tmp.type() == RegType::vgpr) {
+ bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(tmp), Operand(0u), Operand(1u), src);
} else {
unreachable("Invalid register class for b2i32");
}
+
+ if (tmp != dst)
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u));
break;
}
+ case nir_op_b2b1:
case nir_op_i2b1: {
Temp src = get_alu_src(ctx, instr->src[0]);
assert(dst.regClass() == bld.lm);
case nir_op_unpack_64_2x32_split_y:
bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
break;
+ case nir_op_unpack_32_2x16_split_x:
+ if (dst.type() == RegType::vgpr) {
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(dst.regClass()), get_alu_src(ctx, instr->src[0]));
+ } else {
+ bld.copy(Definition(dst), get_alu_src(ctx, instr->src[0]));
+ }
+ break;
+ case nir_op_unpack_32_2x16_split_y:
+ if (dst.type() == RegType::vgpr) {
+ bld.pseudo(aco_opcode::p_split_vector, bld.def(dst.regClass()), Definition(dst), get_alu_src(ctx, instr->src[0]));
+ } else {
+ bld.sop2(aco_opcode::s_bfe_u32, Definition(dst), bld.def(s1, scc), get_alu_src(ctx, instr->src[0]), Operand(uint32_t(16 << 16 | 16)));
+ }
+ break;
+ case nir_op_pack_32_2x16_split: {
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = get_alu_src(ctx, instr->src[1]);
+ if (dst.regClass() == v1) {
+ src0 = emit_extract_vector(ctx, src0, 0, v2b);
+ src1 = emit_extract_vector(ctx, src1, 0, v2b);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src0, src1);
+ } else {
+ src0 = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), src0, Operand(0xFFFFu));
+ src1 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), src1, Operand(16u));
+ bld.sop2(aco_opcode::s_or_b32, Definition(dst), bld.def(s1, scc), src0, src1);
+ }
+ break;
+ }
case nir_op_pack_half_2x16: {
Temp src = get_alu_src(ctx, instr->src[0], 2);
Temp src0 = bld.tmp(v1);
Temp src1 = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(src0), Definition(src1), src);
- if (!ctx->block->fp_mode.care_about_round32 || ctx->block->fp_mode.round32 == fp_round_tz)
+ if (0 && (!ctx->block->fp_mode.care_about_round32 || ctx->block->fp_mode.round32 == fp_round_tz)) {
bld.vop3(aco_opcode::v_cvt_pkrtz_f16_f32, Definition(dst), src0, src1);
- else
- bld.vop3(aco_opcode::v_cvt_pk_u16_u32, Definition(dst),
- bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src0),
- bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), src1));
+ } else {
+ src0 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src0);
+ src1 = bld.vop1(aco_opcode::v_cvt_f16_f32, bld.def(v1), src1);
+ if (ctx->program->chip_class >= GFX10) {
+ /* the high bits of v_cvt_f16_f32 isn't zero'd on GFX10 */
+ bld.vop3(aco_opcode::v_pack_b32_f16, Definition(dst), src0, src1);
+ } else {
+ bld.vop3(aco_opcode::v_cvt_pk_u16_u32, Definition(dst), src0, src1);
+ }
+ }
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_unpack_half_2x16_split_x: {
if (dst.regClass() == v1) {
- Builder bld(ctx->program, ctx->block);
bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst), get_alu_src(ctx, instr->src[0]));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_unpack_half_2x16_split_y: {
if (dst.regClass() == v1) {
- Builder bld(ctx->program, ctx->block);
/* TODO: use SDWA here */
bld.vop1(aco_opcode::v_cvt_f32_f16, Definition(dst),
bld.vop2(aco_opcode::v_lshrrev_b32, bld.def(v1), Operand(16u), as_vgpr(ctx, get_alu_src(ctx, instr->src[0]))));
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (dst.regClass() == v1) {
bld.vop3(aco_opcode::v_bfm_b32, Definition(dst), bits, offset);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
bld.vop3(aco_opcode::v_bfi_b32, Definition(dst), bitmask, insert, base);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
} else if (src.regClass() == s2) {
bld.sop1(aco_opcode::s_bcnt1_i32_b64, Definition(dst), bld.def(s1, scc), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_op_flt: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_f16, aco_opcode::v_cmp_lt_f32, aco_opcode::v_cmp_lt_f64);
break;
}
case nir_op_fge: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_f16, aco_opcode::v_cmp_ge_f32, aco_opcode::v_cmp_ge_f64);
break;
}
case nir_op_feq: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_f16, aco_opcode::v_cmp_eq_f32, aco_opcode::v_cmp_eq_f64);
break;
}
- case nir_op_fne: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64);
+ case nir_op_fneu: {
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_neq_f16, aco_opcode::v_cmp_neq_f32, aco_opcode::v_cmp_neq_f64);
break;
}
case nir_op_ilt: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_i16, aco_opcode::v_cmp_lt_i32, aco_opcode::v_cmp_lt_i64, aco_opcode::s_cmp_lt_i32);
break;
}
case nir_op_ige: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_i16, aco_opcode::v_cmp_ge_i32, aco_opcode::v_cmp_ge_i64, aco_opcode::s_cmp_ge_i32);
break;
}
case nir_op_ieq: {
if (instr->src[0].src.ssa->bit_size == 1)
emit_boolean_logic(ctx, instr, Builder::s_xnor, dst);
else
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_eq_i16, aco_opcode::v_cmp_eq_i32, aco_opcode::v_cmp_eq_i64, aco_opcode::s_cmp_eq_i32,
ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_eq_u64 : aco_opcode::num_opcodes);
break;
}
if (instr->src[0].src.ssa->bit_size == 1)
emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
else
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lg_i16, aco_opcode::v_cmp_lg_i32, aco_opcode::v_cmp_lg_i64, aco_opcode::s_cmp_lg_i32,
ctx->program->chip_class >= GFX8 ? aco_opcode::s_cmp_lg_u64 : aco_opcode::num_opcodes);
break;
}
case nir_op_ult: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_lt_u16, aco_opcode::v_cmp_lt_u32, aco_opcode::v_cmp_lt_u64, aco_opcode::s_cmp_lt_u32);
break;
}
case nir_op_uge: {
- emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
+ emit_comparison(ctx, instr, dst, aco_opcode::v_cmp_ge_u16, aco_opcode::v_cmp_ge_u32, aco_opcode::v_cmp_ge_u64, aco_opcode::s_cmp_ge_u32);
break;
}
case nir_op_fddx:
break;
}
default:
- fprintf(stderr, "Unknown NIR ALU instr: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unknown NIR ALU instr");
}
}
int val = instr->value[0].b ? -1 : 0;
Operand op = bld.lm.size() == 1 ? Operand((uint32_t) val) : Operand((uint64_t) val);
bld.sop1(Builder::s_mov, Definition(dst), op);
+ } else if (instr->def.bit_size == 8) {
+ /* ensure that the value is correctly represented in the low byte of the register */
+ bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u8);
+ } else if (instr->def.bit_size == 16) {
+ /* ensure that the value is correctly represented in the low half of the register */
+ bld.sopk(aco_opcode::s_movk_i32, Definition(dst), instr->value[0].u16);
} else if (dst.size() == 1) {
bld.copy(Definition(dst), Operand(instr->value[0].u32));
} else {
return new_mask;
}
-Operand load_lds_size_m0(isel_context *ctx)
-{
- /* TODO: m0 does not need to be initialized on GFX9+ */
- Builder bld(ctx->program, ctx->block);
- return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff));
-}
+struct LoadEmitInfo {
+ Operand offset;
+ Temp dst;
+ unsigned num_components;
+ unsigned component_size;
+ Temp resource = Temp(0, s1);
+ unsigned component_stride = 0;
+ unsigned const_offset = 0;
+ unsigned align_mul = 0;
+ unsigned align_offset = 0;
+
+ bool glc = false;
+ unsigned swizzle_component_size = 0;
+ memory_sync_info sync;
+ Temp soffset = Temp(0, s1);
+};
-Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst,
- Temp address, unsigned base_offset, unsigned align)
+using LoadCallback = Temp(*)(
+ Builder& bld, const LoadEmitInfo* info, Temp offset, unsigned bytes_needed,
+ unsigned align, unsigned const_offset, Temp dst_hint);
+
+template <LoadCallback callback, bool byte_align_loads, bool supports_8bit_16bit_loads, unsigned max_const_offset_plus_one>
+void emit_load(isel_context *ctx, Builder& bld, const LoadEmitInfo *info)
{
- assert(util_is_power_of_two_nonzero(align) && align >= 4);
+ unsigned load_size = info->num_components * info->component_size;
+ unsigned component_size = info->component_size;
- Builder bld(ctx->program, ctx->block);
+ unsigned num_vals = 0;
+ Temp vals[info->dst.bytes()];
- Operand m = load_lds_size_m0(ctx);
+ unsigned const_offset = info->const_offset;
+
+ unsigned align_mul = info->align_mul ? info->align_mul : component_size;
+ unsigned align_offset = (info->align_offset + const_offset) % align_mul;
- unsigned num_components = dst.size() * 4u / elem_size_bytes;
unsigned bytes_read = 0;
- unsigned result_size = 0;
- unsigned total_bytes = num_components * elem_size_bytes;
- std::array<Temp, NIR_MAX_VEC_COMPONENTS> result;
- bool large_ds_read = ctx->options->chip_class >= GFX7;
- bool usable_read2 = ctx->options->chip_class >= GFX7;
-
- while (bytes_read < total_bytes) {
- unsigned todo = total_bytes - bytes_read;
- bool aligned8 = bytes_read % 8 == 0 && align % 8 == 0;
- bool aligned16 = bytes_read % 16 == 0 && align % 16 == 0;
-
- aco_opcode op = aco_opcode::last_opcode;
- bool read2 = false;
- if (todo >= 16 && aligned16 && large_ds_read) {
- op = aco_opcode::ds_read_b128;
- todo = 16;
- } else if (todo >= 16 && aligned8 && usable_read2) {
- op = aco_opcode::ds_read2_b64;
- read2 = true;
- todo = 16;
- } else if (todo >= 12 && aligned16 && large_ds_read) {
- op = aco_opcode::ds_read_b96;
- todo = 12;
- } else if (todo >= 8 && aligned8) {
- op = aco_opcode::ds_read_b64;
- todo = 8;
- } else if (todo >= 8 && usable_read2) {
- op = aco_opcode::ds_read2_b32;
- read2 = true;
- todo = 8;
- } else if (todo >= 4) {
- op = aco_opcode::ds_read_b32;
- todo = 4;
- } else {
- assert(false);
+ while (bytes_read < load_size) {
+ unsigned bytes_needed = load_size - bytes_read;
+
+ /* add buffer for unaligned loads */
+ int byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
+
+ if (byte_align) {
+ if ((bytes_needed > 2 ||
+ (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) ||
+ !supports_8bit_16bit_loads) && byte_align_loads) {
+ if (info->component_stride) {
+ assert(supports_8bit_16bit_loads && "unimplemented");
+ bytes_needed = 2;
+ byte_align = 0;
+ } else {
+ bytes_needed += byte_align == -1 ? 4 - info->align_mul : byte_align;
+ bytes_needed = align(bytes_needed, 4);
+ }
+ } else {
+ byte_align = 0;
+ }
}
- assert(todo % elem_size_bytes == 0);
- unsigned num_elements = todo / elem_size_bytes;
- unsigned offset = base_offset + bytes_read;
- unsigned max_offset = read2 ? 1019 : 65535;
- Temp address_offset = address;
- if (offset > max_offset) {
- address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset);
- offset = bytes_read;
+ if (info->swizzle_component_size)
+ bytes_needed = MIN2(bytes_needed, info->swizzle_component_size);
+ if (info->component_stride)
+ bytes_needed = MIN2(bytes_needed, info->component_size);
+
+ bool need_to_align_offset = byte_align && (align_mul % 4 || align_offset % 4);
+
+ /* reduce constant offset */
+ Operand offset = info->offset;
+ unsigned reduced_const_offset = const_offset;
+ bool remove_const_offset_completely = need_to_align_offset;
+ if (const_offset && (remove_const_offset_completely || const_offset >= max_const_offset_plus_one)) {
+ unsigned to_add = const_offset;
+ if (remove_const_offset_completely) {
+ reduced_const_offset = 0;
+ } else {
+ to_add = const_offset / max_const_offset_plus_one * max_const_offset_plus_one;
+ reduced_const_offset %= max_const_offset_plus_one;
+ }
+ Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
+ if (offset.isConstant()) {
+ offset = Operand(offset.constantValue() + to_add);
+ } else if (offset_tmp.regClass() == s1) {
+ offset = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
+ offset_tmp, Operand(to_add));
+ } else if (offset_tmp.regClass() == v1) {
+ offset = bld.vadd32(bld.def(v1), offset_tmp, Operand(to_add));
+ } else {
+ Temp lo = bld.tmp(offset_tmp.type(), 1);
+ Temp hi = bld.tmp(offset_tmp.type(), 1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
+
+ if (offset_tmp.regClass() == s2) {
+ Temp carry = bld.tmp(s1);
+ lo = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.scc(Definition(carry)), lo, Operand(to_add));
+ hi = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), hi, carry);
+ offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(s2), lo, hi);
+ } else {
+ Temp new_lo = bld.tmp(v1);
+ Temp carry = bld.vadd32(Definition(new_lo), lo, Operand(to_add), true).def(1).getTemp();
+ hi = bld.vadd32(bld.def(v1), hi, Operand(0u), false, carry);
+ offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_lo, hi);
+ }
+ }
}
- assert(offset <= max_offset); /* bytes_read shouldn't be large enough for this to happen */
- Temp res;
- if (num_components == 1 && dst.type() == RegType::vgpr)
- res = dst;
- else
- res = bld.tmp(RegClass(RegType::vgpr, todo / 4));
+ /* align offset down if needed */
+ Operand aligned_offset = offset;
+ unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
+ if (need_to_align_offset) {
+ align = 4;
+ Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
+ if (offset.isConstant()) {
+ aligned_offset = Operand(offset.constantValue() & 0xfffffffcu);
+ } else if (offset_tmp.regClass() == s1) {
+ aligned_offset = bld.sop2(aco_opcode::s_and_b32, bld.def(s1), bld.def(s1, scc), Operand(0xfffffffcu), offset_tmp);
+ } else if (offset_tmp.regClass() == s2) {
+ aligned_offset = bld.sop2(aco_opcode::s_and_b64, bld.def(s2), bld.def(s1, scc), Operand((uint64_t)0xfffffffffffffffcllu), offset_tmp);
+ } else if (offset_tmp.regClass() == v1) {
+ aligned_offset = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), offset_tmp);
+ } else if (offset_tmp.regClass() == v2) {
+ Temp hi = bld.tmp(v1), lo = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), offset_tmp);
+ lo = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xfffffffcu), lo);
+ aligned_offset = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), lo, hi);
+ }
+ }
+ Temp aligned_offset_tmp = aligned_offset.isTemp() ? aligned_offset.getTemp() :
+ bld.copy(bld.def(s1), aligned_offset);
- if (read2)
- res = bld.ds(op, Definition(res), address_offset, m, offset / (todo / 2), (offset / (todo / 2)) + 1);
- else
- res = bld.ds(op, Definition(res), address_offset, m, offset);
+ Temp val = callback(bld, info, aligned_offset_tmp, bytes_needed, align,
+ reduced_const_offset, byte_align ? Temp() : info->dst);
- if (num_components == 1) {
- assert(todo == total_bytes);
- if (dst.type() == RegType::sgpr)
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), res);
- return dst;
+ /* the callback wrote directly to dst */
+ if (val == info->dst) {
+ assert(num_vals == 0);
+ emit_split_vector(ctx, info->dst, info->num_components);
+ return;
}
- if (dst.type() == RegType::sgpr) {
- Temp new_res = bld.tmp(RegType::sgpr, res.size());
- expand_vector(ctx, res, new_res, res.size(), (1 << res.size()) - 1);
- res = new_res;
+ /* shift result right if needed */
+ if (info->component_size < 4 && byte_align_loads) {
+ Operand align((uint32_t)byte_align);
+ if (byte_align == -1) {
+ if (offset.isConstant())
+ align = Operand(offset.constantValue() % 4u);
+ else if (offset.size() == 2)
+ align = Operand(emit_extract_vector(ctx, offset.getTemp(), 0, RegClass(offset.getTemp().type(), 1)));
+ else
+ align = offset;
+ }
+
+ assert(val.bytes() >= load_size && "unimplemented");
+ if (val.type() == RegType::sgpr)
+ byte_align_scalar(ctx, val, align, info->dst);
+ else
+ byte_align_vector(ctx, val, align, info->dst, component_size);
+ return;
}
- if (num_elements == 1) {
- result[result_size++] = res;
+ /* add result to list and advance */
+ if (info->component_stride) {
+ assert(val.bytes() == info->component_size && "unimplemented");
+ const_offset += info->component_stride;
+ align_offset = (align_offset + info->component_stride) % align_mul;
} else {
- assert(res != dst && res.size() % num_elements == 0);
- aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, num_elements)};
- split->operands[0] = Operand(res);
- for (unsigned i = 0; i < num_elements; i++)
- split->definitions[i] = Definition(result[result_size++] = bld.tmp(res.type(), elem_size_bytes / 4));
- ctx->block->instructions.emplace_back(std::move(split));
+ const_offset += val.bytes();
+ align_offset = (align_offset + val.bytes()) % align_mul;
+ }
+ bytes_read += val.bytes();
+ vals[num_vals++] = val;
+ }
+
+ /* create array of components */
+ unsigned components_split = 0;
+ std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
+ bool has_vgprs = false;
+ for (unsigned i = 0; i < num_vals;) {
+ Temp tmp[num_vals];
+ unsigned num_tmps = 0;
+ unsigned tmp_size = 0;
+ RegType reg_type = RegType::sgpr;
+ while ((!tmp_size || (tmp_size % component_size)) && i < num_vals) {
+ if (vals[i].type() == RegType::vgpr)
+ reg_type = RegType::vgpr;
+ tmp_size += vals[i].bytes();
+ tmp[num_tmps++] = vals[i++];
+ }
+ if (num_tmps > 1) {
+ aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
+ aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)};
+ for (unsigned i = 0; i < num_tmps; i++)
+ vec->operands[i] = Operand(tmp[i]);
+ tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size));
+ vec->definitions[0] = Definition(tmp[0]);
+ bld.insert(std::move(vec));
+ }
+
+ if (tmp[0].bytes() % component_size) {
+ /* trim tmp[0] */
+ assert(i == num_vals);
+ RegClass new_rc = RegClass::get(reg_type, tmp[0].bytes() / component_size * component_size);
+ tmp[0] = bld.pseudo(aco_opcode::p_extract_vector, bld.def(new_rc), tmp[0], Operand(0u));
+ }
+
+ RegClass elem_rc = RegClass::get(reg_type, component_size);
+
+ unsigned start = components_split;
+
+ if (tmp_size == elem_rc.bytes()) {
+ allocated_vec[components_split++] = tmp[0];
+ } else {
+ assert(tmp_size % elem_rc.bytes() == 0);
+ aco_ptr<Pseudo_instruction> split{create_instruction<Pseudo_instruction>(
+ aco_opcode::p_split_vector, Format::PSEUDO, 1, tmp_size / elem_rc.bytes())};
+ for (unsigned i = 0; i < split->definitions.size(); i++) {
+ Temp component = bld.tmp(elem_rc);
+ allocated_vec[components_split++] = component;
+ split->definitions[i] = Definition(component);
+ }
+ split->operands[0] = Operand(tmp[0]);
+ bld.insert(std::move(split));
}
- bytes_read += todo;
+ /* try to p_as_uniform early so we can create more optimizable code and
+ * also update allocated_vec */
+ for (unsigned j = start; j < components_split; j++) {
+ if (allocated_vec[j].bytes() % 4 == 0 && info->dst.type() == RegType::sgpr)
+ allocated_vec[j] = bld.as_uniform(allocated_vec[j]);
+ has_vgprs |= allocated_vec[j].type() == RegType::vgpr;
+ }
}
- assert(result_size == num_components && result_size > 1);
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, result_size, 1)};
- for (unsigned i = 0; i < result_size; i++)
- vec->operands[i] = Operand(result[i]);
- vec->definitions[0] = Definition(dst);
- ctx->block->instructions.emplace_back(std::move(vec));
- ctx->allocated_vec.emplace(dst.id(), result);
+ /* concatenate components and p_as_uniform() result if needed */
+ if (info->dst.type() == RegType::vgpr || !has_vgprs)
+ ctx->allocated_vec.emplace(info->dst.id(), allocated_vec);
+
+ int padding_bytes = MAX2((int)info->dst.bytes() - int(allocated_vec[0].bytes() * info->num_components), 0);
+
+ aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
+ aco_opcode::p_create_vector, Format::PSEUDO, info->num_components + !!padding_bytes, 1)};
+ for (unsigned i = 0; i < info->num_components; i++)
+ vec->operands[i] = Operand(allocated_vec[i]);
+ if (padding_bytes)
+ vec->operands[info->num_components] = Operand(RegClass::get(RegType::vgpr, padding_bytes));
+ if (info->dst.type() == RegType::sgpr && has_vgprs) {
+ Temp tmp = bld.tmp(RegType::vgpr, info->dst.size());
+ vec->definitions[0] = Definition(tmp);
+ bld.insert(std::move(vec));
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(info->dst), tmp);
+ } else {
+ vec->definitions[0] = Definition(info->dst);
+ bld.insert(std::move(vec));
+ }
+}
+
+Operand load_lds_size_m0(Builder& bld)
+{
+ /* TODO: m0 does not need to be initialized on GFX9+ */
+ return bld.m0((Temp)bld.sopk(aco_opcode::s_movk_i32, bld.def(s1, m0), 0xffff));
+}
+
+Temp lds_load_callback(Builder& bld, const LoadEmitInfo *info,
+ Temp offset, unsigned bytes_needed,
+ unsigned align, unsigned const_offset,
+ Temp dst_hint)
+{
+ offset = offset.regClass() == s1 ? bld.copy(bld.def(v1), offset) : offset;
+
+ Operand m = load_lds_size_m0(bld);
+
+ bool large_ds_read = bld.program->chip_class >= GFX7;
+ bool usable_read2 = bld.program->chip_class >= GFX7;
+
+ bool read2 = false;
+ unsigned size = 0;
+ aco_opcode op;
+ //TODO: use ds_read_u8_d16_hi/ds_read_u16_d16_hi if beneficial
+ if (bytes_needed >= 16 && align % 16 == 0 && large_ds_read) {
+ size = 16;
+ op = aco_opcode::ds_read_b128;
+ } else if (bytes_needed >= 16 && align % 8 == 0 && const_offset % 8 == 0 && usable_read2) {
+ size = 16;
+ read2 = true;
+ op = aco_opcode::ds_read2_b64;
+ } else if (bytes_needed >= 12 && align % 16 == 0 && large_ds_read) {
+ size = 12;
+ op = aco_opcode::ds_read_b96;
+ } else if (bytes_needed >= 8 && align % 8 == 0) {
+ size = 8;
+ op = aco_opcode::ds_read_b64;
+ } else if (bytes_needed >= 8 && align % 4 == 0 && const_offset % 4 == 0) {
+ size = 8;
+ read2 = true;
+ op = aco_opcode::ds_read2_b32;
+ } else if (bytes_needed >= 4 && align % 4 == 0) {
+ size = 4;
+ op = aco_opcode::ds_read_b32;
+ } else if (bytes_needed >= 2 && align % 2 == 0) {
+ size = 2;
+ op = aco_opcode::ds_read_u16;
+ } else {
+ size = 1;
+ op = aco_opcode::ds_read_u8;
+ }
+
+ unsigned max_offset_plus_one = read2 ? 254 * (size / 2u) + 1 : 65536;
+ if (const_offset >= max_offset_plus_one) {
+ offset = bld.vadd32(bld.def(v1), offset, Operand(const_offset / max_offset_plus_one));
+ const_offset %= max_offset_plus_one;
+ }
+
+ if (read2)
+ const_offset /= (size / 2u);
+
+ RegClass rc = RegClass(RegType::vgpr, DIV_ROUND_UP(size, 4));
+ Temp val = rc == info->dst.regClass() && dst_hint.id() ? dst_hint : bld.tmp(rc);
+ Instruction *instr;
+ if (read2)
+ instr = bld.ds(op, Definition(val), offset, m, const_offset, const_offset + 1);
+ else
+ instr = bld.ds(op, Definition(val), offset, m, const_offset);
+ static_cast<DS_instruction *>(instr)->sync = info->sync;
+
+ if (size < 4)
+ val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, size)), val, Operand(0u));
+
+ return val;
+}
+
+static auto emit_lds_load = emit_load<lds_load_callback, false, true, UINT32_MAX>;
+
+Temp smem_load_callback(Builder& bld, const LoadEmitInfo *info,
+ Temp offset, unsigned bytes_needed,
+ unsigned align, unsigned const_offset,
+ Temp dst_hint)
+{
+ unsigned size = 0;
+ aco_opcode op;
+ if (bytes_needed <= 4) {
+ size = 1;
+ op = info->resource.id() ? aco_opcode::s_buffer_load_dword : aco_opcode::s_load_dword;
+ } else if (bytes_needed <= 8) {
+ size = 2;
+ op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx2 : aco_opcode::s_load_dwordx2;
+ } else if (bytes_needed <= 16) {
+ size = 4;
+ op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx4 : aco_opcode::s_load_dwordx4;
+ } else if (bytes_needed <= 32) {
+ size = 8;
+ op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx8 : aco_opcode::s_load_dwordx8;
+ } else {
+ size = 16;
+ op = info->resource.id() ? aco_opcode::s_buffer_load_dwordx16 : aco_opcode::s_load_dwordx16;
+ }
+ aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
+ if (info->resource.id()) {
+ load->operands[0] = Operand(info->resource);
+ load->operands[1] = Operand(offset);
+ } else {
+ load->operands[0] = Operand(offset);
+ load->operands[1] = Operand(0u);
+ }
+ RegClass rc(RegType::sgpr, size);
+ Temp val = dst_hint.id() && dst_hint.regClass() == rc ? dst_hint : bld.tmp(rc);
+ load->definitions[0] = Definition(val);
+ load->glc = info->glc;
+ load->dlc = info->glc && bld.program->chip_class >= GFX10;
+ load->sync = info->sync;
+ bld.insert(std::move(load));
+ return val;
+}
+
+static auto emit_smem_load = emit_load<smem_load_callback, true, false, 1024>;
+
+Temp mubuf_load_callback(Builder& bld, const LoadEmitInfo *info,
+ Temp offset, unsigned bytes_needed,
+ unsigned align_, unsigned const_offset,
+ Temp dst_hint)
+{
+ Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
+ Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
+
+ if (info->soffset.id()) {
+ if (soffset.isTemp())
+ vaddr = bld.copy(bld.def(v1), soffset);
+ soffset = Operand(info->soffset);
+ }
+
+ unsigned bytes_size = 0;
+ aco_opcode op;
+ if (bytes_needed == 1 || align_ % 2) {
+ bytes_size = 1;
+ op = aco_opcode::buffer_load_ubyte;
+ } else if (bytes_needed == 2 || align_ % 4) {
+ bytes_size = 2;
+ op = aco_opcode::buffer_load_ushort;
+ } else if (bytes_needed <= 4) {
+ bytes_size = 4;
+ op = aco_opcode::buffer_load_dword;
+ } else if (bytes_needed <= 8) {
+ bytes_size = 8;
+ op = aco_opcode::buffer_load_dwordx2;
+ } else if (bytes_needed <= 12 && bld.program->chip_class > GFX6) {
+ bytes_size = 12;
+ op = aco_opcode::buffer_load_dwordx3;
+ } else {
+ bytes_size = 16;
+ op = aco_opcode::buffer_load_dwordx4;
+ }
+ aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
+ mubuf->operands[0] = Operand(info->resource);
+ mubuf->operands[1] = vaddr;
+ mubuf->operands[2] = soffset;
+ mubuf->offen = (offset.type() == RegType::vgpr);
+ mubuf->glc = info->glc;
+ mubuf->dlc = info->glc && bld.program->chip_class >= GFX10;
+ mubuf->sync = info->sync;
+ mubuf->offset = const_offset;
+ mubuf->swizzled = info->swizzle_component_size != 0;
+ RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
+ Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
+ mubuf->definitions[0] = Definition(val);
+ bld.insert(std::move(mubuf));
+
+ return val;
+}
+
+static auto emit_mubuf_load = emit_load<mubuf_load_callback, true, true, 4096>;
+static auto emit_scratch_load = emit_load<mubuf_load_callback, false, true, 4096>;
+
+Temp get_gfx6_global_rsrc(Builder& bld, Temp addr)
+{
+ uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+
+ if (addr.type() == RegType::vgpr)
+ return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf));
+ return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf));
+}
+
+Temp global_load_callback(Builder& bld, const LoadEmitInfo *info,
+ Temp offset, unsigned bytes_needed,
+ unsigned align_, unsigned const_offset,
+ Temp dst_hint)
+{
+ unsigned bytes_size = 0;
+ bool mubuf = bld.program->chip_class == GFX6;
+ bool global = bld.program->chip_class >= GFX9;
+ aco_opcode op;
+ if (bytes_needed == 1) {
+ bytes_size = 1;
+ op = mubuf ? aco_opcode::buffer_load_ubyte : global ? aco_opcode::global_load_ubyte : aco_opcode::flat_load_ubyte;
+ } else if (bytes_needed == 2) {
+ bytes_size = 2;
+ op = mubuf ? aco_opcode::buffer_load_ushort : global ? aco_opcode::global_load_ushort : aco_opcode::flat_load_ushort;
+ } else if (bytes_needed <= 4) {
+ bytes_size = 4;
+ op = mubuf ? aco_opcode::buffer_load_dword : global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword;
+ } else if (bytes_needed <= 8) {
+ bytes_size = 8;
+ op = mubuf ? aco_opcode::buffer_load_dwordx2 : global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2;
+ } else if (bytes_needed <= 12 && !mubuf) {
+ bytes_size = 12;
+ op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
+ } else {
+ bytes_size = 16;
+ op = mubuf ? aco_opcode::buffer_load_dwordx4 : global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4;
+ }
+ RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4));
+ Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
+ if (mubuf) {
+ aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
+ mubuf->operands[0] = Operand(get_gfx6_global_rsrc(bld, offset));
+ mubuf->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
+ mubuf->operands[2] = Operand(0u);
+ mubuf->glc = info->glc;
+ mubuf->dlc = false;
+ mubuf->offset = 0;
+ mubuf->addr64 = offset.type() == RegType::vgpr;
+ mubuf->disable_wqm = false;
+ mubuf->sync = info->sync;
+ mubuf->definitions[0] = Definition(val);
+ bld.insert(std::move(mubuf));
+ } else {
+ offset = offset.regClass() == s2 ? bld.copy(bld.def(v2), offset) : offset;
+
+ aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
+ flat->operands[0] = Operand(offset);
+ flat->operands[1] = Operand(s1);
+ flat->glc = info->glc;
+ flat->dlc = info->glc && bld.program->chip_class >= GFX10;
+ flat->sync = info->sync;
+ flat->offset = 0u;
+ flat->definitions[0] = Definition(val);
+ bld.insert(std::move(flat));
+ }
+
+ return val;
+}
+
+static auto emit_global_load = emit_load<global_load_callback, true, true, 1>;
+
+Temp load_lds(isel_context *ctx, unsigned elem_size_bytes, Temp dst,
+ Temp address, unsigned base_offset, unsigned align)
+{
+ assert(util_is_power_of_two_nonzero(align));
+
+ Builder bld(ctx->program, ctx->block);
+
+ unsigned num_components = dst.bytes() / elem_size_bytes;
+ LoadEmitInfo info = {Operand(as_vgpr(ctx, address)), dst, num_components, elem_size_bytes};
+ info.align_mul = align;
+ info.align_offset = 0;
+ info.sync = memory_sync_info(storage_shared);
+ info.const_offset = base_offset;
+ emit_lds_load(ctx, bld, &info);
return dst;
}
-Temp extract_subvector(isel_context *ctx, Temp data, unsigned start, unsigned size, RegType type)
+void split_store_data(isel_context *ctx, RegType dst_type, unsigned count, Temp *dst, unsigned *offsets, Temp src)
{
- if (start == 0 && size == data.size())
- return type == RegType::vgpr ? as_vgpr(ctx, data) : data;
+ if (!count)
+ return;
- unsigned size_hint = 1;
- auto it = ctx->allocated_vec.find(data.id());
- if (it != ctx->allocated_vec.end())
- size_hint = it->second[0].size();
- if (size % size_hint || start % size_hint)
- size_hint = 1;
+ Builder bld(ctx->program, ctx->block);
- start /= size_hint;
- size /= size_hint;
+ ASSERTED bool is_subdword = false;
+ for (unsigned i = 0; i < count; i++)
+ is_subdword |= offsets[i] % 4;
+ is_subdword |= (src.bytes() - offsets[count - 1]) % 4;
+ assert(!is_subdword || dst_type == RegType::vgpr);
- Temp elems[size];
- for (unsigned i = 0; i < size; i++)
- elems[i] = emit_extract_vector(ctx, data, start + i, RegClass(type, size_hint));
+ /* count == 1 fast path */
+ if (count == 1) {
+ if (dst_type == RegType::sgpr)
+ dst[0] = bld.as_uniform(src);
+ else
+ dst[0] = as_vgpr(ctx, src);
+ return;
+ }
- if (size == 1)
- return type == RegType::vgpr ? as_vgpr(ctx, elems[0]) : elems[0];
+ for (unsigned i = 0; i < count - 1; i++)
+ dst[i] = bld.tmp(RegClass::get(dst_type, offsets[i + 1] - offsets[i]));
+ dst[count - 1] = bld.tmp(RegClass::get(dst_type, src.bytes() - offsets[count - 1]));
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, size, 1)};
- for (unsigned i = 0; i < size; i++)
- vec->operands[i] = Operand(elems[i]);
- Temp res = {ctx->program->allocateId(), RegClass(type, size * size_hint)};
- vec->definitions[0] = Definition(res);
- ctx->block->instructions.emplace_back(std::move(vec));
- return res;
+ if (is_subdword && src.type() == RegType::sgpr) {
+ src = as_vgpr(ctx, src);
+ } else {
+ /* use allocated_vec if possible */
+ auto it = ctx->allocated_vec.find(src.id());
+ if (it != ctx->allocated_vec.end()) {
+ if (!it->second[0].id())
+ goto split;
+ unsigned elem_size = it->second[0].bytes();
+ assert(src.bytes() % elem_size == 0);
+
+ for (unsigned i = 0; i < src.bytes() / elem_size; i++) {
+ if (!it->second[i].id())
+ goto split;
+ }
+
+ for (unsigned i = 0; i < count; i++) {
+ if (offsets[i] % elem_size || dst[i].bytes() % elem_size)
+ goto split;
+ }
+
+ for (unsigned i = 0; i < count; i++) {
+ unsigned start_idx = offsets[i] / elem_size;
+ unsigned op_count = dst[i].bytes() / elem_size;
+ if (op_count == 1) {
+ if (dst_type == RegType::sgpr)
+ dst[i] = bld.as_uniform(it->second[start_idx]);
+ else
+ dst[i] = as_vgpr(ctx, it->second[start_idx]);
+ continue;
+ }
+
+ aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, op_count, 1)};
+ for (unsigned j = 0; j < op_count; j++) {
+ Temp tmp = it->second[start_idx + j];
+ if (dst_type == RegType::sgpr)
+ tmp = bld.as_uniform(tmp);
+ vec->operands[j] = Operand(tmp);
+ }
+ vec->definitions[0] = Definition(dst[i]);
+ bld.insert(std::move(vec));
+ }
+ return;
+ }
+ }
+
+ split:
+
+ if (dst_type == RegType::sgpr)
+ src = bld.as_uniform(src);
+
+ /* just split it */
+ aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, count)};
+ split->operands[0] = Operand(src);
+ for (unsigned i = 0; i < count; i++)
+ split->definitions[i] = Definition(dst[i]);
+ bld.insert(std::move(split));
+}
+
+bool scan_write_mask(uint32_t mask, uint32_t todo_mask,
+ int *start, int *count)
+{
+ unsigned start_elem = ffs(todo_mask) - 1;
+ bool skip = !(mask & (1 << start_elem));
+ if (skip)
+ mask = ~mask & todo_mask;
+
+ mask &= todo_mask;
+
+ u_bit_scan_consecutive_range(&mask, start, count);
+
+ return !skip;
}
-void ds_write_helper(isel_context *ctx, Operand m, Temp address, Temp data, unsigned data_start, unsigned total_size, unsigned offset0, unsigned offset1, unsigned align)
+void advance_write_mask(uint32_t *todo_mask, int start, int count)
{
+ *todo_mask &= ~u_bit_consecutive(0, count) << start;
+}
+
+void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask,
+ Temp address, unsigned base_offset, unsigned align)
+{
+ assert(util_is_power_of_two_nonzero(align));
+ assert(util_is_power_of_two_nonzero(elem_size_bytes) && elem_size_bytes <= 8);
+
Builder bld(ctx->program, ctx->block);
- unsigned bytes_written = 0;
bool large_ds_write = ctx->options->chip_class >= GFX7;
bool usable_write2 = ctx->options->chip_class >= GFX7;
- while (bytes_written < total_size * 4) {
- unsigned todo = total_size * 4 - bytes_written;
- bool aligned8 = bytes_written % 8 == 0 && align % 8 == 0;
- bool aligned16 = bytes_written % 16 == 0 && align % 16 == 0;
+ unsigned write_count = 0;
+ Temp write_datas[32];
+ unsigned offsets[32];
+ aco_opcode opcodes[32];
+
+ wrmask = widen_mask(wrmask, elem_size_bytes);
+
+ uint32_t todo = u_bit_consecutive(0, data.bytes());
+ while (todo) {
+ int offset, bytes;
+ if (!scan_write_mask(wrmask, todo, &offset, &bytes)) {
+ offsets[write_count] = offset;
+ opcodes[write_count] = aco_opcode::num_opcodes;
+ write_count++;
+ advance_write_mask(&todo, offset, bytes);
+ continue;
+ }
+
+ bool aligned2 = offset % 2 == 0 && align % 2 == 0;
+ bool aligned4 = offset % 4 == 0 && align % 4 == 0;
+ bool aligned8 = offset % 8 == 0 && align % 8 == 0;
+ bool aligned16 = offset % 16 == 0 && align % 16 == 0;
- aco_opcode op = aco_opcode::last_opcode;
- bool write2 = false;
- unsigned size = 0;
- if (todo >= 16 && aligned16 && large_ds_write) {
+ //TODO: use ds_write_b8_d16_hi/ds_write_b16_d16_hi if beneficial
+ aco_opcode op = aco_opcode::num_opcodes;
+ if (bytes >= 16 && aligned16 && large_ds_write) {
op = aco_opcode::ds_write_b128;
- size = 4;
- } else if (todo >= 16 && aligned8 && usable_write2) {
- op = aco_opcode::ds_write2_b64;
- write2 = true;
- size = 4;
- } else if (todo >= 12 && aligned16 && large_ds_write) {
+ bytes = 16;
+ } else if (bytes >= 12 && aligned16 && large_ds_write) {
op = aco_opcode::ds_write_b96;
- size = 3;
- } else if (todo >= 8 && aligned8) {
+ bytes = 12;
+ } else if (bytes >= 8 && aligned8) {
op = aco_opcode::ds_write_b64;
- size = 2;
- } else if (todo >= 8 && usable_write2) {
- op = aco_opcode::ds_write2_b32;
- write2 = true;
- size = 2;
- } else if (todo >= 4) {
+ bytes = 8;
+ } else if (bytes >= 4 && aligned4) {
op = aco_opcode::ds_write_b32;
- size = 1;
+ bytes = 4;
+ } else if (bytes >= 2 && aligned2) {
+ op = aco_opcode::ds_write_b16;
+ bytes = 2;
+ } else if (bytes >= 1) {
+ op = aco_opcode::ds_write_b8;
+ bytes = 1;
} else {
assert(false);
}
- unsigned offset = offset0 + offset1 + bytes_written;
- unsigned max_offset = write2 ? 1020 : 65535;
- Temp address_offset = address;
- if (offset > max_offset) {
- address_offset = bld.vadd32(bld.def(v1), Operand(offset0), address_offset);
- offset = offset1 + bytes_written;
- }
- assert(offset <= max_offset); /* offset1 shouldn't be large enough for this to happen */
+ offsets[write_count] = offset;
+ opcodes[write_count] = op;
+ write_count++;
+ advance_write_mask(&todo, offset, bytes);
+ }
- if (write2) {
- Temp val0 = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size / 2, RegType::vgpr);
- Temp val1 = extract_subvector(ctx, data, data_start + (bytes_written >> 2) + 1, size / 2, RegType::vgpr);
- bld.ds(op, address_offset, val0, val1, m, offset / size / 2, (offset / size / 2) + 1);
- } else {
- Temp val = extract_subvector(ctx, data, data_start + (bytes_written >> 2), size, RegType::vgpr);
- bld.ds(op, address_offset, val, m, offset);
- }
+ Operand m = load_lds_size_m0(bld);
- bytes_written += size * 4;
- }
-}
+ split_store_data(ctx, RegType::vgpr, write_count, write_datas, offsets, data);
-void store_lds(isel_context *ctx, unsigned elem_size_bytes, Temp data, uint32_t wrmask,
- Temp address, unsigned base_offset, unsigned align)
-{
- assert(util_is_power_of_two_nonzero(align) && align >= 4);
- assert(elem_size_bytes == 4 || elem_size_bytes == 8);
+ for (unsigned i = 0; i < write_count; i++) {
+ aco_opcode op = opcodes[i];
+ if (op == aco_opcode::num_opcodes)
+ continue;
- Operand m = load_lds_size_m0(ctx);
+ Temp data = write_datas[i];
- /* we need at most two stores, assuming that the writemask is at most 4 bits wide */
- assert(wrmask <= 0x0f);
- int start[2], count[2];
- u_bit_scan_consecutive_range(&wrmask, &start[0], &count[0]);
- u_bit_scan_consecutive_range(&wrmask, &start[1], &count[1]);
- assert(wrmask == 0);
+ unsigned second = write_count;
+ if (usable_write2 && (op == aco_opcode::ds_write_b32 || op == aco_opcode::ds_write_b64)) {
+ for (second = i + 1; second < write_count; second++) {
+ if (opcodes[second] == op && (offsets[second] - offsets[i]) % data.bytes() == 0) {
+ op = data.bytes() == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
+ opcodes[second] = aco_opcode::num_opcodes;
+ break;
+ }
+ }
+ }
- /* one combined store is sufficient */
- if (count[0] == count[1] && (align % elem_size_bytes) == 0 && (base_offset % elem_size_bytes) == 0) {
- Builder bld(ctx->program, ctx->block);
+ bool write2 = op == aco_opcode::ds_write2_b32 || op == aco_opcode::ds_write2_b64;
+ unsigned write2_off = (offsets[second] - offsets[i]) / data.bytes();
+ unsigned inline_offset = base_offset + offsets[i];
+ unsigned max_offset = write2 ? (255 - write2_off) * data.bytes() : 65535;
Temp address_offset = address;
- if ((base_offset / elem_size_bytes) + start[1] > 255) {
+ if (inline_offset > max_offset) {
address_offset = bld.vadd32(bld.def(v1), Operand(base_offset), address_offset);
- base_offset = 0;
+ inline_offset = offsets[i];
}
+ assert(inline_offset <= max_offset); /* offsets[i] shouldn't be large enough for this to happen */
- assert(count[0] == 1);
- RegClass xtract_rc(RegType::vgpr, elem_size_bytes / 4);
-
- Temp val0 = emit_extract_vector(ctx, data, start[0], xtract_rc);
- Temp val1 = emit_extract_vector(ctx, data, start[1], xtract_rc);
- aco_opcode op = elem_size_bytes == 4 ? aco_opcode::ds_write2_b32 : aco_opcode::ds_write2_b64;
- base_offset = base_offset / elem_size_bytes;
- bld.ds(op, address_offset, val0, val1, m,
- base_offset + start[0], base_offset + start[1]);
- return;
- }
-
- for (unsigned i = 0; i < 2; i++) {
- if (count[i] == 0)
- continue;
-
- unsigned elem_size_words = elem_size_bytes / 4;
- ds_write_helper(ctx, m, address, data, start[i] * elem_size_words, count[i] * elem_size_words,
- base_offset, start[i] * elem_size_bytes, align);
+ Instruction *instr;
+ if (write2) {
+ Temp second_data = write_datas[second];
+ inline_offset /= data.bytes();
+ instr = bld.ds(op, address_offset, data, second_data, m, inline_offset, inline_offset + write2_off);
+ } else {
+ instr = bld.ds(op, address_offset, data, m, inline_offset);
+ }
+ static_cast<DS_instruction *>(instr)->sync =
+ memory_sync_info(storage_shared);
}
- return;
}
unsigned calculate_lds_alignment(isel_context *ctx, unsigned const_offset)
}
-Temp create_vec_from_array(isel_context *ctx, Temp arr[], unsigned cnt, RegType reg_type, unsigned split_cnt = 0u, Temp dst = Temp())
+aco_opcode get_buffer_store_op(bool smem, unsigned bytes)
+{
+ switch (bytes) {
+ case 1:
+ assert(!smem);
+ return aco_opcode::buffer_store_byte;
+ case 2:
+ assert(!smem);
+ return aco_opcode::buffer_store_short;
+ case 4:
+ return smem ? aco_opcode::s_buffer_store_dword : aco_opcode::buffer_store_dword;
+ case 8:
+ return smem ? aco_opcode::s_buffer_store_dwordx2 : aco_opcode::buffer_store_dwordx2;
+ case 12:
+ assert(!smem);
+ return aco_opcode::buffer_store_dwordx3;
+ case 16:
+ return smem ? aco_opcode::s_buffer_store_dwordx4 : aco_opcode::buffer_store_dwordx4;
+ }
+ unreachable("Unexpected store size");
+ return aco_opcode::num_opcodes;
+}
+
+void split_buffer_store(isel_context *ctx, nir_intrinsic_instr *instr, bool smem, RegType dst_type,
+ Temp data, unsigned writemask, int swizzle_element_size,
+ unsigned *write_count, Temp *write_datas, unsigned *offsets)
+{
+ unsigned write_count_with_skips = 0;
+ bool skips[16];
+
+ /* determine how to split the data */
+ unsigned todo = u_bit_consecutive(0, data.bytes());
+ while (todo) {
+ int offset, bytes;
+ skips[write_count_with_skips] = !scan_write_mask(writemask, todo, &offset, &bytes);
+ offsets[write_count_with_skips] = offset;
+ if (skips[write_count_with_skips]) {
+ advance_write_mask(&todo, offset, bytes);
+ write_count_with_skips++;
+ continue;
+ }
+
+ /* only supported sizes are 1, 2, 4, 8, 12 and 16 bytes and can't be
+ * larger than swizzle_element_size */
+ bytes = MIN2(bytes, swizzle_element_size);
+ if (bytes % 4)
+ bytes = bytes > 4 ? bytes & ~0x3 : MIN2(bytes, 2);
+
+ /* SMEM and GFX6 VMEM can't emit 12-byte stores */
+ if ((ctx->program->chip_class == GFX6 || smem) && bytes == 12)
+ bytes = 8;
+
+ /* dword or larger stores have to be dword-aligned */
+ unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
+ unsigned align_offset = (instr ? nir_intrinsic_align_offset(instr) : 0) + offset;
+ bool dword_aligned = align_offset % 4 == 0 && align_mul % 4 == 0;
+ if (!dword_aligned)
+ bytes = MIN2(bytes, (align_offset % 2 == 0 && align_mul % 2 == 0) ? 2 : 1);
+
+ advance_write_mask(&todo, offset, bytes);
+ write_count_with_skips++;
+ }
+
+ /* actually split data */
+ split_store_data(ctx, dst_type, write_count_with_skips, write_datas, offsets, data);
+
+ /* remove skips */
+ for (unsigned i = 0; i < write_count_with_skips; i++) {
+ if (skips[i])
+ continue;
+ write_datas[*write_count] = write_datas[i];
+ offsets[*write_count] = offsets[i];
+ (*write_count)++;
+ }
+}
+
+Temp create_vec_from_array(isel_context *ctx, Temp arr[], unsigned cnt, RegType reg_type, unsigned elem_size_bytes,
+ unsigned split_cnt = 0u, Temp dst = Temp())
{
Builder bld(ctx->program, ctx->block);
+ unsigned dword_size = elem_size_bytes / 4;
if (!dst.id())
- dst = bld.tmp(RegClass(reg_type, cnt * arr[0].size()));
+ dst = bld.tmp(RegClass(reg_type, cnt * dword_size));
std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
aco_ptr<Pseudo_instruction> instr {create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, cnt, 1)};
instr->definitions[0] = Definition(dst);
for (unsigned i = 0; i < cnt; ++i) {
- assert(arr[i].size() == arr[0].size());
- allocated_vec[i] = arr[i];
- instr->operands[i] = Operand(arr[i]);
+ if (arr[i].id()) {
+ assert(arr[i].size() == dword_size);
+ allocated_vec[i] = arr[i];
+ instr->operands[i] = Operand(arr[i]);
+ } else {
+ Temp zero = bld.copy(bld.def(RegClass(reg_type, dword_size)), Operand(0u, dword_size == 2));
+ allocated_vec[i] = zero;
+ instr->operands[i] = Operand(zero);
+ }
}
bld.insert(std::move(instr));
}
void emit_single_mubuf_store(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
- unsigned const_offset = 0u, bool allow_reorder = true, bool slc = false)
+ unsigned const_offset = 0u, memory_sync_info sync=memory_sync_info(),
+ bool slc = false, bool swizzled = false)
{
assert(vdata.id());
assert(vdata.size() != 3 || ctx->program->chip_class != GFX6);
assert(vdata.size() >= 1 && vdata.size() <= 4);
Builder bld(ctx->program, ctx->block);
- aco_opcode op = (aco_opcode) ((unsigned) aco_opcode::buffer_store_dword + vdata.size() - 1);
+ aco_opcode op = get_buffer_store_op(false, vdata.bytes());
const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
Builder::Result r = bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
- /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
- /* disable_wqm */ false, /* glc */ true, /* dlc*/ false, /* slc */ slc);
+ /* offen */ !voffset_op.isUndefined(), /* swizzled */ swizzled,
+ /* idxen*/ false, /* addr64 */ false, /* disable_wqm */ false, /* glc */ true,
+ /* dlc*/ false, /* slc */ slc);
- static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
+ static_cast<MUBUF_instruction *>(r.instr)->sync = sync;
}
void store_vmem_mubuf(isel_context *ctx, Temp src, Temp descriptor, Temp voffset, Temp soffset,
unsigned base_const_offset, unsigned elem_size_bytes, unsigned write_mask,
- bool allow_combining = true, bool reorder = true, bool slc = false)
+ bool allow_combining = true, memory_sync_info sync=memory_sync_info(), bool slc = false)
{
Builder bld(ctx->program, ctx->block);
- assert(elem_size_bytes == 4 || elem_size_bytes == 8);
+ assert(elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
assert(write_mask);
+ write_mask = widen_mask(write_mask, elem_size_bytes);
- if (elem_size_bytes == 8) {
- elem_size_bytes = 4;
- write_mask = widen_mask(write_mask, 2);
- }
-
- while (write_mask) {
- int start = 0;
- int count = 0;
- u_bit_scan_consecutive_range(&write_mask, &start, &count);
- assert(count > 0);
- assert(start >= 0);
-
- while (count > 0) {
- unsigned sub_count = allow_combining ? MIN2(count, 4) : 1;
- unsigned const_offset = (unsigned) start * elem_size_bytes + base_const_offset;
+ unsigned write_count = 0;
+ Temp write_datas[32];
+ unsigned offsets[32];
+ split_buffer_store(ctx, NULL, false, RegType::vgpr, src, write_mask,
+ allow_combining ? 16 : 4, &write_count, write_datas, offsets);
- /* GFX6 doesn't have buffer_store_dwordx3, so make sure not to emit that here either. */
- if (unlikely(ctx->program->chip_class == GFX6 && sub_count == 3))
- sub_count = 2;
-
- Temp elem = extract_subvector(ctx, src, start, sub_count, RegType::vgpr);
- emit_single_mubuf_store(ctx, descriptor, voffset, soffset, elem, const_offset, reorder, slc);
-
- count -= sub_count;
- start += sub_count;
- }
-
- assert(count == 0);
+ for (unsigned i = 0; i < write_count; i++) {
+ unsigned const_offset = offsets[i] + base_const_offset;
+ emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, sync, slc, !allow_combining);
}
}
-Temp emit_single_mubuf_load(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset,
- unsigned const_offset, unsigned size_dwords, bool allow_reorder = true)
-{
- assert(size_dwords != 3 || ctx->program->chip_class != GFX6);
- assert(size_dwords >= 1 && size_dwords <= 4);
-
- Builder bld(ctx->program, ctx->block);
- Temp vdata = bld.tmp(RegClass(RegType::vgpr, size_dwords));
- aco_opcode op = (aco_opcode) ((unsigned) aco_opcode::buffer_load_dword + size_dwords - 1);
- const_offset = resolve_excess_vmem_const_offset(bld, voffset, const_offset);
-
- Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
- Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
- Builder::Result r = bld.mubuf(op, Definition(vdata), Operand(descriptor), voffset_op, soffset_op, const_offset,
- /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
- /* disable_wqm */ false, /* glc */ true,
- /* dlc*/ ctx->program->chip_class >= GFX10, /* slc */ false);
-
- static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
-
- return vdata;
-}
-
void load_vmem_mubuf(isel_context *ctx, Temp dst, Temp descriptor, Temp voffset, Temp soffset,
unsigned base_const_offset, unsigned elem_size_bytes, unsigned num_components,
unsigned stride = 0u, bool allow_combining = true, bool allow_reorder = true)
{
- assert(elem_size_bytes == 4 || elem_size_bytes == 8);
- assert((num_components * elem_size_bytes / 4) == dst.size());
+ assert(elem_size_bytes == 2 || elem_size_bytes == 4 || elem_size_bytes == 8);
+ assert((num_components * elem_size_bytes) == dst.bytes());
assert(!!stride != allow_combining);
Builder bld(ctx->program, ctx->block);
- unsigned split_cnt = num_components;
-
- if (elem_size_bytes == 8) {
- elem_size_bytes = 4;
- num_components *= 2;
- }
-
- if (!stride)
- stride = elem_size_bytes;
-
- unsigned load_size = 1;
- if (allow_combining) {
- if ((num_components % 4) == 0)
- load_size = 4;
- else if ((num_components % 3) == 0 && ctx->program->chip_class != GFX6)
- load_size = 3;
- else if ((num_components % 2) == 0)
- load_size = 2;
- }
-
- unsigned num_loads = num_components / load_size;
- std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
-
- for (unsigned i = 0; i < num_loads; ++i) {
- unsigned const_offset = i * stride * load_size + base_const_offset;
- elems[i] = emit_single_mubuf_load(ctx, descriptor, voffset, soffset, const_offset, load_size, allow_reorder);
- }
- create_vec_from_array(ctx, elems.data(), num_loads, RegType::vgpr, split_cnt, dst);
+ LoadEmitInfo info = {Operand(voffset), dst, num_components, elem_size_bytes, descriptor};
+ info.component_stride = allow_combining ? 0 : stride;
+ info.glc = true;
+ info.swizzle_component_size = allow_combining ? 0 : 4;
+ info.align_mul = MIN2(elem_size_bytes, 4);
+ info.align_offset = 0;
+ info.soffset = soffset;
+ info.const_offset = base_const_offset;
+ emit_mubuf_load(ctx, bld, &info);
}
std::pair<Temp, unsigned> offset_add_from_nir(isel_context *ctx, const std::pair<Temp, unsigned> &base_offset, nir_src *off_src, unsigned stride = 1u)
/* Calculate indirect offset with stride */
if (likely(indirect_offset_arg.regClass() == v1))
- with_stride = bld.v_mul_imm(bld.def(v1), indirect_offset_arg, stride);
+ with_stride = bld.v_mul24_imm(bld.def(v1), indirect_offset_arg, stride);
else if (indirect_offset_arg.regClass() == s1)
with_stride = bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(stride), indirect_offset_arg);
else
Temp offset = unlikely(offs.first.regClass() == s1)
? bld.sop2(aco_opcode::s_mul_i32, bld.def(s1), Operand(multiplier), offs.first)
- : bld.v_mul_imm(bld.def(v1), offs.first, multiplier);
+ : bld.v_mul24_imm(bld.def(v1), offs.first, multiplier);
return std::make_pair(offset, const_offset);
}
Builder bld(ctx->program, ctx->block);
uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * ctx->tcs_num_inputs * 16;
- uint32_t num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
- uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->args->shader_info->tcs.patch_outputs_written);
- uint32_t output_vertex_size = num_tcs_outputs * 16;
+ uint32_t output_vertex_size = ctx->tcs_num_outputs * 16;
uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
- uint32_t output_patch_stride = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
+ uint32_t output_patch_stride = pervertex_output_patch_size + ctx->tcs_num_patch_outputs * 16;
std::pair<Temp, unsigned> offs = instr
? get_intrinsic_io_basic_offset(ctx, instr, 4u)
{
Builder bld(ctx->program, ctx->block);
- unsigned num_tcs_outputs = ctx->shader->info.stage == MESA_SHADER_TESS_CTRL
- ? util_last_bit64(ctx->args->shader_info->tcs.outputs_written)
- : ctx->args->options->key.tes.tcs_num_outputs;
-
- unsigned output_vertex_size = num_tcs_outputs * 16;
+ unsigned output_vertex_size = ctx->tcs_num_outputs * 16;
unsigned per_vertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
unsigned per_patch_data_offset = per_vertex_output_patch_size * ctx->tcs_num_patches;
unsigned attr_stride = ctx->tcs_num_patches;
offs.second += const_base_offset * attr_stride;
Temp rel_patch_id = get_tess_rel_patch_id(ctx);
- Temp patch_off = bld.v_mul_imm(bld.def(v1), rel_patch_id, 16u);
+ Temp patch_off = bld.v_mul24_imm(bld.def(v1), rel_patch_id, 16u);
offs = offset_add(ctx, offs, std::make_pair(patch_off, per_patch_data_offset));
return offs;
}
+bool tcs_driver_location_matches_api_mask(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex, uint64_t mask, bool *indirect)
+{
+ assert(per_vertex || ctx->shader->info.stage == MESA_SHADER_TESS_CTRL);
+
+ if (mask == 0)
+ return false;
+
+ unsigned drv_loc = nir_intrinsic_base(instr);
+ nir_src *off_src = nir_get_io_offset_src(instr);
+
+ if (!nir_src_is_const(*off_src)) {
+ *indirect = true;
+ return false;
+ }
+
+ *indirect = false;
+ uint64_t slot = per_vertex
+ ? ctx->output_drv_loc_to_var_slot[ctx->shader->info.stage][drv_loc / 4]
+ : (ctx->output_tcs_patch_drv_loc_to_var_slot[drv_loc / 4] - VARYING_SLOT_PATCH0);
+ return (((uint64_t) 1) << slot) & mask;
+}
+
+bool store_output_to_temps(isel_context *ctx, nir_intrinsic_instr *instr)
+{
+ unsigned write_mask = nir_intrinsic_write_mask(instr);
+ unsigned component = nir_intrinsic_component(instr);
+ unsigned idx = nir_intrinsic_base(instr) + component;
+
+ nir_instr *off_instr = instr->src[1].ssa->parent_instr;
+ if (off_instr->type != nir_instr_type_load_const)
+ return false;
+
+ Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
+ idx += nir_src_as_uint(instr->src[1]) * 4u;
+
+ if (instr->src[0].ssa->bit_size == 64)
+ write_mask = widen_mask(write_mask, 2);
+
+ RegClass rc = instr->src[0].ssa->bit_size == 16 ? v2b : v1;
+
+ for (unsigned i = 0; i < 8; ++i) {
+ if (write_mask & (1 << i)) {
+ ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
+ ctx->outputs.temps[idx] = emit_extract_vector(ctx, src, i, rc);
+ }
+ idx++;
+ }
+
+ return true;
+}
+
+bool load_input_from_temps(isel_context *ctx, nir_intrinsic_instr *instr, Temp dst)
+{
+ /* Only TCS per-vertex inputs are supported by this function.
+ * Per-vertex inputs only match between the VS/TCS invocation id when the number of invocations is the same.
+ */
+ if (ctx->shader->info.stage != MESA_SHADER_TESS_CTRL || !ctx->tcs_in_out_eq)
+ return false;
+
+ nir_src *off_src = nir_get_io_offset_src(instr);
+ nir_src *vertex_index_src = nir_get_io_vertex_index_src(instr);
+ nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
+ bool can_use_temps = nir_src_is_const(*off_src) &&
+ vertex_index_instr->type == nir_instr_type_intrinsic &&
+ nir_instr_as_intrinsic(vertex_index_instr)->intrinsic == nir_intrinsic_load_invocation_id;
+
+ if (!can_use_temps)
+ return false;
+
+ unsigned idx = nir_intrinsic_base(instr) + nir_intrinsic_component(instr) + 4 * nir_src_as_uint(*off_src);
+ Temp *src = &ctx->inputs.temps[idx];
+ create_vec_from_array(ctx, src, dst.size(), dst.regClass().type(), 4u, 0, dst);
+
+ return true;
+}
+
void visit_store_ls_or_es_output(isel_context *ctx, nir_intrinsic_instr *instr)
{
Builder bld(ctx->program, ctx->block);
+ if (ctx->tcs_in_out_eq && store_output_to_temps(ctx, instr)) {
+ /* When the TCS only reads this output directly and for the same vertices as its invocation id, it is unnecessary to store the VS output to LDS. */
+ bool indirect_write;
+ bool temp_only_input = tcs_driver_location_matches_api_mask(ctx, instr, true, ctx->tcs_temp_only_inputs, &indirect_write);
+ if (temp_only_input && !indirect_write)
+ return;
+ }
+
std::pair<Temp, unsigned> offs = get_intrinsic_io_basic_offset(ctx, instr, 4u);
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
unsigned write_mask = nir_intrinsic_write_mask(instr);
/* GFX6-8: ES stage is not merged into GS, data is passed from ES to GS in VMEM. */
Temp esgs_ring = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_ESGS_VS * 16u));
Temp es2gs_offset = get_arg(ctx, ctx->args->es2gs_offset);
- store_vmem_mubuf(ctx, src, esgs_ring, offs.first, es2gs_offset, offs.second, elem_size_bytes, write_mask, false, true, true);
+ store_vmem_mubuf(ctx, src, esgs_ring, offs.first, es2gs_offset, offs.second, elem_size_bytes, write_mask, false, memory_sync_info(), true);
} else {
Temp lds_base;
/* GFX6-8: VS runs on LS stage when tessellation is used, but LS shares LDS space with HS.
* GFX9+: LS is merged into HS, but still uses the same LDS layout.
*/
- unsigned num_tcs_inputs = util_last_bit64(ctx->args->shader_info->vs.ls_outputs_written);
Temp vertex_idx = get_arg(ctx, ctx->args->rel_auto_id);
- lds_base = bld.v_mul_imm(bld.def(v1), vertex_idx, num_tcs_inputs * 16u);
+ lds_base = bld.v_mul24_imm(bld.def(v1), vertex_idx, ctx->tcs_num_inputs * 16u);
} else {
unreachable("Invalid LS or ES stage");
}
}
}
-bool should_write_tcs_patch_output_to_vmem(isel_context *ctx, nir_intrinsic_instr *instr)
+bool tcs_output_is_tess_factor(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
{
- unsigned off = nir_intrinsic_base(instr) * 4u;
- nir_src *off_src = nir_get_io_offset_src(instr);
-
- /* Indirect offset, we can't be sure if this is a tess factor, always write to VMEM */
- if (!nir_src_is_const(*off_src))
- return true;
-
- off += nir_src_as_uint(*off_src) * 16u;
+ if (per_vertex)
+ return false;
- const unsigned tess_index_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
- const unsigned tess_index_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
+ unsigned off = nir_intrinsic_base(instr) * 4u;
+ return off == ctx->tcs_tess_lvl_out_loc ||
+ off == ctx->tcs_tess_lvl_in_loc;
- return (off != (tess_index_inner * 16u)) &&
- (off != (tess_index_outer * 16u));
}
-bool should_write_tcs_patch_output_to_lds(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
+bool tcs_output_is_read_by_tes(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
{
- unsigned off = nir_intrinsic_base(instr) * 4u;
- nir_src *off_src = nir_get_io_offset_src(instr);
-
- /* When none of the appropriate outputs are read, we are OK to never write to LDS */
- if (per_vertex ? ctx->shader->info.outputs_read == 0U : ctx->shader->info.patch_outputs_read == 0u)
- return false;
-
- /* Indirect offset, we can't be sure if this is read or not, always write to LDS */
- if (!nir_src_is_const(*off_src))
- return true;
+ uint64_t mask = per_vertex
+ ? ctx->program->info->tcs.tes_inputs_read
+ : ctx->program->info->tcs.tes_patch_inputs_read;
- off += nir_src_as_uint(*off_src) * 16u;
+ bool indirect_write = false;
+ bool output_read_by_tes = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write);
+ return indirect_write || output_read_by_tes;
+}
- uint64_t out_rd = per_vertex
- ? ctx->shader->info.outputs_read
- : ctx->shader->info.patch_outputs_read;
- while (out_rd) {
- unsigned slot = u_bit_scan64(&out_rd) + (per_vertex ? 0 : VARYING_SLOT_PATCH0);
- if (off == shader_io_get_unique_index((gl_varying_slot) slot) * 16u)
- return true;
- }
+bool tcs_output_is_read_by_tcs(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
+{
+ uint64_t mask = per_vertex
+ ? ctx->shader->info.outputs_read
+ : ctx->shader->info.patch_outputs_read;
- return false;
+ bool indirect_write = false;
+ bool output_read = tcs_driver_location_matches_api_mask(ctx, instr, per_vertex, mask, &indirect_write);
+ return indirect_write || output_read;
}
void visit_store_tcs_output(isel_context *ctx, nir_intrinsic_instr *instr, bool per_vertex)
unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
unsigned write_mask = nir_intrinsic_write_mask(instr);
- /* Only write to VMEM if the output is per-vertex or it's per-patch non tess factor */
- bool write_to_vmem = per_vertex || should_write_tcs_patch_output_to_vmem(ctx, instr);
- /* Only write to LDS if the output is read by the shader, or it's per-patch tess factor */
- bool write_to_lds = !write_to_vmem || should_write_tcs_patch_output_to_lds(ctx, instr, per_vertex);
+ bool is_tess_factor = tcs_output_is_tess_factor(ctx, instr, per_vertex);
+ bool write_to_vmem = !is_tess_factor && tcs_output_is_read_by_tes(ctx, instr, per_vertex);
+ bool write_to_lds = is_tess_factor || tcs_output_is_read_by_tcs(ctx, instr, per_vertex);
if (write_to_vmem) {
std::pair<Temp, unsigned> vmem_offs = per_vertex
Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
- store_vmem_mubuf(ctx, store_val, hs_ring_tess_offchip, vmem_offs.first, oc_lds, vmem_offs.second, elem_size_bytes, write_mask, false, false);
+ store_vmem_mubuf(ctx, store_val, hs_ring_tess_offchip, vmem_offs.first, oc_lds, vmem_offs.second, elem_size_bytes, write_mask, true, memory_sync_info(storage_vmem_output));
}
if (write_to_lds) {
if (ctx->stage == vertex_vs ||
ctx->stage == tess_eval_vs ||
ctx->stage == fragment_fs ||
+ ctx->stage == ngg_vertex_gs ||
+ ctx->stage == ngg_tess_eval_gs ||
ctx->shader->info.stage == MESA_SHADER_GEOMETRY) {
- unsigned write_mask = nir_intrinsic_write_mask(instr);
- unsigned component = nir_intrinsic_component(instr);
- Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- unsigned idx = nir_intrinsic_base(instr) + component;
-
- nir_instr *off_instr = instr->src[1].ssa->parent_instr;
- if (off_instr->type != nir_instr_type_load_const) {
- fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
- nir_print_instr(off_instr, stderr);
- fprintf(stderr, "\n");
- }
- idx += nir_instr_as_load_const(off_instr)->value[0].u32 * 4u;
-
- if (instr->src[0].ssa->bit_size == 64)
- write_mask = widen_mask(write_mask, 2);
-
- for (unsigned i = 0; i < 8; ++i) {
- if (write_mask & (1 << i)) {
- ctx->outputs.mask[idx / 4u] |= 1 << (idx % 4u);
- ctx->outputs.outputs[idx / 4u][idx % 4u] = emit_extract_vector(ctx, src, i, v1);
- }
- idx++;
+ bool stored_to_temps = store_output_to_temps(ctx, instr);
+ if (!stored_to_temps) {
+ isel_err(instr->src[1].ssa->parent_instr, "Unimplemented output offset instruction");
+ abort();
}
} else if (ctx->stage == vertex_es ||
ctx->stage == vertex_ls ||
Temp coord2 = emit_extract_vector(ctx, src, 1, v1);
Builder bld(ctx->program, ctx->block);
- Builder::Result interp_p1 = bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1, bld.m0(prim_mask), idx, component);
- if (ctx->program->has_16bank_lds)
- interp_p1.instr->operands[0].setLateKill(true);
- bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2, bld.m0(prim_mask), interp_p1, idx, component);
+
+ if (dst.regClass() == v2b) {
+ if (ctx->program->has_16bank_lds) {
+ assert(ctx->options->chip_class <= GFX8);
+ Builder::Result interp_p1 =
+ bld.vintrp(aco_opcode::v_interp_mov_f32, bld.def(v1),
+ Operand(2u) /* P0 */, bld.m0(prim_mask), idx, component);
+ interp_p1 = bld.vintrp(aco_opcode::v_interp_p1lv_f16, bld.def(v2b),
+ coord1, bld.m0(prim_mask), interp_p1, idx, component);
+ bld.vintrp(aco_opcode::v_interp_p2_legacy_f16, Definition(dst), coord2,
+ bld.m0(prim_mask), interp_p1, idx, component);
+ } else {
+ aco_opcode interp_p2_op = aco_opcode::v_interp_p2_f16;
+
+ if (ctx->options->chip_class == GFX8)
+ interp_p2_op = aco_opcode::v_interp_p2_legacy_f16;
+
+ Builder::Result interp_p1 =
+ bld.vintrp(aco_opcode::v_interp_p1ll_f16, bld.def(v1),
+ coord1, bld.m0(prim_mask), idx, component);
+ bld.vintrp(interp_p2_op, Definition(dst), coord2, bld.m0(prim_mask),
+ interp_p1, idx, component);
+ }
+ } else {
+ Builder::Result interp_p1 =
+ bld.vintrp(aco_opcode::v_interp_p1_f32, bld.def(v1), coord1,
+ bld.m0(prim_mask), idx, component);
+
+ if (ctx->program->has_16bank_lds)
+ interp_p1.instr->operands[0].setLateKill(true);
+
+ bld.vintrp(aco_opcode::v_interp_p2_f32, Definition(dst), coord2,
+ bld.m0(prim_mask), interp_p1, idx, component);
+ }
}
void emit_load_frag_coord(isel_context *ctx, Temp dst, unsigned num_components)
nir_instr *off_instr = instr->src[0].ssa->parent_instr;
if (off_instr->type != nir_instr_type_load_const) {
- fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
- nir_print_instr(off_instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(off_instr, "Unimplemented nir_intrinsic_load_input offset");
}
uint32_t offset = nir_instr_as_load_const(off_instr)->value[0].u32;
unsigned location = nir_intrinsic_base(instr) / 4 - VERT_ATTRIB_GENERIC0 + offset;
unsigned component = nir_intrinsic_component(instr);
+ unsigned bitsize = instr->dest.ssa.bit_size;
unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[location];
uint32_t attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[location];
uint32_t attrib_stride = ctx->options->key.vs.vertex_attribute_strides[location];
/* load channels */
while (channel_start < num_channels) {
- unsigned fetch_size = num_channels - channel_start;
+ unsigned fetch_component = num_channels - channel_start;
unsigned fetch_offset = attrib_offset + channel_start * vtx_info->chan_byte_size;
bool expanded = false;
vtx_info->chan_byte_size == 4;
unsigned fetch_dfmt = V_008F0C_BUF_DATA_FORMAT_INVALID;
if (!use_mubuf) {
- fetch_dfmt = get_fetch_data_format(ctx, vtx_info, fetch_offset, attrib_stride, &fetch_size);
+ fetch_dfmt = get_fetch_data_format(ctx, vtx_info, fetch_offset, attrib_stride, &fetch_component);
} else {
- if (fetch_size == 3 && ctx->options->chip_class == GFX6) {
+ if (fetch_component == 3 && ctx->options->chip_class == GFX6) {
/* GFX6 only supports loading vec3 with MTBUF, expand to vec4. */
- fetch_size = 4;
+ fetch_component = 4;
expanded = true;
}
}
+ unsigned fetch_bytes = fetch_component * bitsize / 8;
+
Temp fetch_index = index;
if (attrib_stride != 0 && fetch_offset > attrib_stride) {
fetch_index = bld.vadd32(bld.def(v1), Operand(fetch_offset / attrib_stride), fetch_index);
}
aco_opcode opcode;
- switch (fetch_size) {
- case 1:
- opcode = use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
- break;
+ switch (fetch_bytes) {
case 2:
- opcode = use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
+ assert(!use_mubuf && bitsize == 16);
+ opcode = aco_opcode::tbuffer_load_format_d16_x;
break;
- case 3:
+ case 4:
+ if (bitsize == 16) {
+ assert(!use_mubuf);
+ opcode = aco_opcode::tbuffer_load_format_d16_xy;
+ } else {
+ opcode = use_mubuf ? aco_opcode::buffer_load_dword : aco_opcode::tbuffer_load_format_x;
+ }
+ break;
+ case 6:
+ assert(!use_mubuf && bitsize == 16);
+ opcode = aco_opcode::tbuffer_load_format_d16_xyz;
+ break;
+ case 8:
+ if (bitsize == 16) {
+ assert(!use_mubuf);
+ opcode = aco_opcode::tbuffer_load_format_d16_xyzw;
+ } else {
+ opcode = use_mubuf ? aco_opcode::buffer_load_dwordx2 : aco_opcode::tbuffer_load_format_xy;
+ }
+ break;
+ case 12:
assert(ctx->options->chip_class >= GFX7 ||
(!use_mubuf && ctx->options->chip_class == GFX6));
opcode = use_mubuf ? aco_opcode::buffer_load_dwordx3 : aco_opcode::tbuffer_load_format_xyz;
break;
- case 4:
+ case 16:
opcode = use_mubuf ? aco_opcode::buffer_load_dwordx4 : aco_opcode::tbuffer_load_format_xyzw;
break;
default:
}
Temp fetch_dst;
- if (channel_start == 0 && fetch_size == dst.size() && !post_shuffle &&
+ if (channel_start == 0 && fetch_bytes == dst.bytes() && !post_shuffle &&
!expanded && (alpha_adjust == RADV_ALPHA_ADJUST_NONE ||
num_channels <= 3)) {
direct_fetch = true;
fetch_dst = dst;
} else {
- fetch_dst = bld.tmp(RegType::vgpr, fetch_size);
+ fetch_dst = bld.tmp(RegClass::get(RegType::vgpr, fetch_bytes));
}
if (use_mubuf) {
- Instruction *mubuf = bld.mubuf(opcode,
- Definition(fetch_dst), list, fetch_index, soffset,
- fetch_offset, false, true).instr;
- static_cast<MUBUF_instruction*>(mubuf)->can_reorder = true;
+ bld.mubuf(opcode,
+ Definition(fetch_dst), list, fetch_index, soffset,
+ fetch_offset, false, false, true).instr;
} else {
- Instruction *mtbuf = bld.mtbuf(opcode,
- Definition(fetch_dst), list, fetch_index, soffset,
- fetch_dfmt, nfmt, fetch_offset, false, true).instr;
- static_cast<MTBUF_instruction*>(mtbuf)->can_reorder = true;
+ bld.mtbuf(opcode,
+ Definition(fetch_dst), list, fetch_index, soffset,
+ fetch_dfmt, nfmt, fetch_offset, false, true).instr;
}
emit_split_vector(ctx, fetch_dst, fetch_dst.size());
- if (fetch_size == 1) {
+ if (fetch_component == 1) {
channels[channel_start] = fetch_dst;
} else {
- for (unsigned i = 0; i < MIN2(fetch_size, num_channels - channel_start); i++)
- channels[channel_start + i] = emit_extract_vector(ctx, fetch_dst, i, v1);
+ for (unsigned i = 0; i < MIN2(fetch_component, num_channels - channel_start); i++)
+ channels[channel_start + i] = emit_extract_vector(ctx, fetch_dst, i,
+ bitsize == 16 ? v2b : v1);
}
- channel_start += fetch_size;
+ channel_start += fetch_component;
}
if (!direct_fetch) {
nir_instr *off_instr = instr->src[offset_idx].ssa->parent_instr;
if (off_instr->type != nir_instr_type_load_const ||
nir_instr_as_load_const(off_instr)->value[0].u32 != 0) {
- fprintf(stderr, "Unimplemented nir_intrinsic_load_input offset\n");
- nir_print_instr(off_instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(off_instr, "Unimplemented nir_intrinsic_load_input offset");
}
Temp prim_mask = get_arg(ctx, ctx->args->ac.prim_mask);
Builder bld(ctx->program, ctx->block);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+
+ if (load_input_from_temps(ctx, instr, dst))
+ return;
+
std::pair<Temp, unsigned> offs = get_tcs_per_vertex_input_lds_offset(ctx, instr);
unsigned elem_size_bytes = instr->dest.ssa.bit_size / 8;
unsigned lds_align = calculate_lds_alignment(ctx, offs.second);
{
Builder bld(ctx->program, ctx->block);
Temp index = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!ctx->divergent_vals[instr->dest.ssa.index])
+ if (!nir_dest_is_divergent(instr->dest))
index = bld.as_uniform(index);
unsigned desc_set = nir_intrinsic_desc_set(instr);
unsigned binding = nir_intrinsic_binding(instr);
bld.copy(Definition(get_ssa_temp(ctx, &instr->dest.ssa)), index);
}
-void load_buffer(isel_context *ctx, unsigned num_components, Temp dst,
- Temp rsrc, Temp offset, bool glc=false, bool readonly=true)
+void load_buffer(isel_context *ctx, unsigned num_components, unsigned component_size,
+ Temp dst, Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset,
+ bool glc=false, bool allow_smem=true, memory_sync_info sync=memory_sync_info())
{
Builder bld(ctx->program, ctx->block);
- unsigned num_bytes = dst.size() * 4;
- bool dlc = glc && ctx->options->chip_class >= GFX10;
-
- aco_opcode op;
- if (dst.type() == RegType::vgpr || (ctx->options->chip_class < GFX8 && !readonly)) {
- Operand vaddr = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
- Operand soffset = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
- unsigned const_offset = 0;
-
- Temp lower = Temp();
- if (num_bytes > 16) {
- assert(num_components == 3 || num_components == 4);
- op = aco_opcode::buffer_load_dwordx4;
- lower = bld.tmp(v4);
- aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
- mubuf->definitions[0] = Definition(lower);
- mubuf->operands[0] = Operand(rsrc);
- mubuf->operands[1] = vaddr;
- mubuf->operands[2] = soffset;
- mubuf->offen = (offset.type() == RegType::vgpr);
- mubuf->glc = glc;
- mubuf->dlc = dlc;
- mubuf->barrier = readonly ? barrier_none : barrier_buffer;
- mubuf->can_reorder = readonly;
- bld.insert(std::move(mubuf));
- emit_split_vector(ctx, lower, 2);
- num_bytes -= 16;
- const_offset = 16;
- } else if (num_bytes == 12 && ctx->options->chip_class == GFX6) {
- /* GFX6 doesn't support loading vec3, expand to vec4. */
- num_bytes = 16;
- }
-
- switch (num_bytes) {
- case 4:
- op = aco_opcode::buffer_load_dword;
- break;
- case 8:
- op = aco_opcode::buffer_load_dwordx2;
- break;
- case 12:
- assert(ctx->options->chip_class > GFX6);
- op = aco_opcode::buffer_load_dwordx3;
- break;
- case 16:
- op = aco_opcode::buffer_load_dwordx4;
- break;
- default:
- unreachable("Load SSBO not implemented for this size.");
- }
- aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
- mubuf->operands[0] = Operand(rsrc);
- mubuf->operands[1] = vaddr;
- mubuf->operands[2] = soffset;
- mubuf->offen = (offset.type() == RegType::vgpr);
- mubuf->glc = glc;
- mubuf->dlc = dlc;
- mubuf->barrier = readonly ? barrier_none : barrier_buffer;
- mubuf->can_reorder = readonly;
- mubuf->offset = const_offset;
- aco_ptr<Instruction> instr = std::move(mubuf);
-
- if (dst.size() > 4) {
- assert(lower != Temp());
- Temp upper = bld.tmp(RegType::vgpr, dst.size() - lower.size());
- instr->definitions[0] = Definition(upper);
- bld.insert(std::move(instr));
- if (dst.size() == 8)
- emit_split_vector(ctx, upper, 2);
- instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, dst.size() / 2, 1));
- instr->operands[0] = Operand(emit_extract_vector(ctx, lower, 0, v2));
- instr->operands[1] = Operand(emit_extract_vector(ctx, lower, 1, v2));
- instr->operands[2] = Operand(emit_extract_vector(ctx, upper, 0, v2));
- if (dst.size() == 8)
- instr->operands[3] = Operand(emit_extract_vector(ctx, upper, 1, v2));
- } else if (dst.size() == 3 && ctx->options->chip_class == GFX6) {
- Temp vec = bld.tmp(v4);
- instr->definitions[0] = Definition(vec);
- bld.insert(std::move(instr));
- emit_split_vector(ctx, vec, 4);
-
- instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1));
- instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1));
- instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1));
- instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1));
- }
+ bool use_smem = dst.type() != RegType::vgpr && (!glc || ctx->options->chip_class >= GFX8) && allow_smem;
+ if (use_smem)
+ offset = bld.as_uniform(offset);
- if (dst.type() == RegType::sgpr) {
- Temp vec = bld.tmp(RegType::vgpr, dst.size());
- instr->definitions[0] = Definition(vec);
- bld.insert(std::move(instr));
- expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1);
- } else {
- instr->definitions[0] = Definition(dst);
- bld.insert(std::move(instr));
- emit_split_vector(ctx, dst, num_components);
- }
- } else {
- switch (num_bytes) {
- case 4:
- op = aco_opcode::s_buffer_load_dword;
- break;
- case 8:
- op = aco_opcode::s_buffer_load_dwordx2;
- break;
- case 12:
- case 16:
- op = aco_opcode::s_buffer_load_dwordx4;
- break;
- case 24:
- case 32:
- op = aco_opcode::s_buffer_load_dwordx8;
- break;
- default:
- unreachable("Load SSBO not implemented for this size.");
- }
- aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
- load->operands[0] = Operand(rsrc);
- load->operands[1] = Operand(bld.as_uniform(offset));
- assert(load->operands[1].getTemp().type() == RegType::sgpr);
- load->definitions[0] = Definition(dst);
- load->glc = glc;
- load->dlc = dlc;
- load->barrier = readonly ? barrier_none : barrier_buffer;
- load->can_reorder = false; // FIXME: currently, it doesn't seem beneficial due to how our scheduler works
- assert(ctx->options->chip_class >= GFX8 || !glc);
-
- /* trim vector */
- if (dst.size() == 3) {
- Temp vec = bld.tmp(s4);
- load->definitions[0] = Definition(vec);
- bld.insert(std::move(load));
- emit_split_vector(ctx, vec, 4);
-
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- emit_extract_vector(ctx, vec, 0, s1),
- emit_extract_vector(ctx, vec, 1, s1),
- emit_extract_vector(ctx, vec, 2, s1));
- } else if (dst.size() == 6) {
- Temp vec = bld.tmp(s8);
- load->definitions[0] = Definition(vec);
- bld.insert(std::move(load));
- emit_split_vector(ctx, vec, 4);
-
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- emit_extract_vector(ctx, vec, 0, s2),
- emit_extract_vector(ctx, vec, 1, s2),
- emit_extract_vector(ctx, vec, 2, s2));
- } else {
- bld.insert(std::move(load));
- }
- emit_split_vector(ctx, dst, num_components);
- }
+ LoadEmitInfo info = {Operand(offset), dst, num_components, component_size, rsrc};
+ info.glc = glc;
+ info.sync = sync;
+ info.align_mul = align_mul;
+ info.align_offset = align_offset;
+ if (use_smem)
+ emit_smem_load(ctx, bld, &info);
+ else
+ emit_mubuf_load(ctx, bld, &info);
}
void visit_load_ubo(isel_context *ctx, nir_intrinsic_instr *instr)
rsrc = convert_pointer_to_64_bit(ctx, rsrc);
rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
}
-
- load_buffer(ctx, instr->num_components, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa));
+ unsigned size = instr->dest.ssa.bit_size / 8;
+ load_buffer(ctx, instr->num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
+ nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr));
}
void visit_load_push_constant(isel_context *ctx, nir_intrinsic_instr *instr)
{
Builder bld(ctx->program, ctx->block);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
-
unsigned offset = nir_intrinsic_base(instr);
+ unsigned count = instr->dest.ssa.num_components;
nir_const_value *index_cv = nir_src_as_const_value(instr->src[0]);
- if (index_cv && instr->dest.ssa.bit_size == 32) {
- unsigned count = instr->dest.ssa.num_components;
+ if (index_cv && instr->dest.ssa.bit_size == 32) {
unsigned start = (offset + index_cv->u32) / 4u;
start -= ctx->args->ac.base_inline_push_consts;
if (start + count <= ctx->args->ac.num_inline_push_consts) {
Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
if (offset != 0) // TODO check if index != 0 as well
- index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
+ index = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
Temp vec = dst;
bool trim = false;
+ bool aligned = true;
+
+ if (instr->dest.ssa.bit_size == 8) {
+ aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
+ bool fits_in_dword = count == 1 || (index_cv && ((offset + index_cv->u32) % 4 + count) <= 4);
+ if (!aligned)
+ vec = fits_in_dword ? bld.tmp(s1) : bld.tmp(s2);
+ } else if (instr->dest.ssa.bit_size == 16) {
+ aligned = index_cv && (offset + index_cv->u32) % 4 == 0;
+ if (!aligned)
+ vec = count == 4 ? bld.tmp(s4) : count > 1 ? bld.tmp(s2) : bld.tmp(s1);
+ }
+
aco_opcode op;
- switch (dst.size()) {
+ switch (vec.size()) {
case 1:
op = aco_opcode::s_load_dword;
break;
unreachable("unimplemented or forbidden load_push_constant.");
}
- bld.smem(op, Definition(vec), ptr, index);
+ static_cast<SMEM_instruction*>(bld.smem(op, Definition(vec), ptr, index).instr)->prevent_overflow = true;
+
+ if (!aligned) {
+ Operand byte_offset = index_cv ? Operand((offset + index_cv->u32) % 4) : Operand(index);
+ byte_align_scalar(ctx, vec, byte_offset, dst);
+ return;
+ }
if (trim) {
emit_split_vector(ctx, vec, 4);
Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
if (base && offset.type() == RegType::sgpr)
- offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
+ offset = bld.nuw().sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
else if (base && offset.type() == RegType::vgpr)
offset = bld.vadd32(bld.def(v1), Operand(base), offset);
bld.sop1(aco_opcode::p_constaddr, bld.def(s2), bld.def(s1, scc), Operand(ctx->constant_data_offset)),
Operand(MIN2(base + range, ctx->shader->constant_data_size)),
Operand(desc_type));
-
- load_buffer(ctx, instr->num_components, dst, rsrc, offset);
+ unsigned size = instr->dest.ssa.bit_size / 8;
+ // TODO: get alignment information for subdword constants
+ load_buffer(ctx, instr->num_components, size, dst, rsrc, offset, size, 0);
}
void visit_discard_if(isel_context *ctx, nir_intrinsic_instr *instr)
assert(nir_instr_is_last(&instr->instr));
ctx->block->kind |= block_kind_uniform;
ctx->cf_info.has_branch = true;
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_linear_edge(ctx->block->index, linear_target);
return;
}
ctx->cf_info.nir_to_aco[instr->instr.block->index] = idx;
/* remove critical edges from linear CFG */
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
Block* break_block = ctx->program->create_and_insert_block();
break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
break_block->kind |= block_kind_uniform;
add_linear_edge(idx, break_block);
add_linear_edge(break_block->index, linear_target);
bld.reset(break_block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
Block* continue_block = ctx->program->create_and_insert_block();
continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
load->unrm = true;
load->da = da;
load->dim = dim;
- load->can_reorder = true; /* fmask images shouldn't be modified */
ctx->block->instructions.emplace_back(std::move(load));
Operand sample_index4;
- if (sample_index.isConstant() && sample_index.constantValue() < 16) {
- sample_index4 = Operand(sample_index.constantValue() << 2);
+ if (sample_index.isConstant()) {
+ if (sample_index.constantValue() < 16) {
+ sample_index4 = Operand(sample_index.constantValue() << 2);
+ } else {
+ sample_index4 = Operand(0u);
+ }
} else if (sample_index.regClass() == s1) {
sample_index4 = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), sample_index, Operand(2u));
} else {
}
+memory_sync_info get_memory_sync_info(nir_intrinsic_instr *instr, storage_class storage, unsigned semantics)
+{
+ /* atomicrmw might not have NIR_INTRINSIC_ACCESS and there's nothing interesting there anyway */
+ if (semantics & semantic_atomicrmw)
+ return memory_sync_info(storage, semantics);
+
+ unsigned access = nir_intrinsic_access(instr);
+
+ if (access & ACCESS_VOLATILE)
+ semantics |= semantic_volatile;
+ if (access & ACCESS_CAN_REORDER)
+ semantics |= semantic_can_reorder | semantic_private;
+
+ return memory_sync_info(storage, semantics);
+}
+
void visit_image_load(isel_context *ctx, nir_intrinsic_instr *instr)
{
Builder bld(ctx->program, ctx->block);
bool is_array = glsl_sampler_type_is_array(type);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
+ unsigned access = var->data.access | nir_intrinsic_access(instr);
+
if (dim == GLSL_SAMPLER_DIM_BUF) {
unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
unsigned num_channels = util_last_bit(mask);
tmp = {ctx->program->allocateId(), RegClass(RegType::vgpr, num_channels)};
load->definitions[0] = Definition(tmp);
load->idxen = true;
- load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT);
+ load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
load->dlc = load->glc && ctx->options->chip_class >= GFX10;
- load->barrier = barrier_image;
+ load->sync = sync;
ctx->block->instructions.emplace_back(std::move(load));
expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, (1 << num_channels) - 1);
load->operands[1] = Operand(s4); /* no sampler */
load->operands[2] = Operand(coords);
load->definitions[0] = Definition(tmp);
- load->glc = var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
+ load->glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT) ? 1 : 0;
load->dlc = load->glc && ctx->options->chip_class >= GFX10;
load->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
load->dmask = dmask;
load->unrm = true;
load->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
- load->barrier = barrier_image;
+ load->sync = sync;
ctx->block->instructions.emplace_back(std::move(load));
expand_vector(ctx, tmp, dst, instr->dest.ssa.num_components, dmask);
bool is_array = glsl_sampler_type_is_array(type);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[3].ssa));
- bool glc = ctx->options->chip_class == GFX6 || var->data.access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0;
+ memory_sync_info sync = get_memory_sync_info(instr, storage_image, 0);
+ unsigned access = var->data.access | nir_intrinsic_access(instr);
+ bool glc = ctx->options->chip_class == GFX6 || access & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE) ? 1 : 0;
if (dim == GLSL_SAMPLER_DIM_BUF) {
Temp rsrc = get_sampler_desc(ctx, nir_instr_as_deref(instr->src[0].ssa->parent_instr), ACO_DESC_BUFFER, nullptr, true, true);
store->glc = glc;
store->dlc = false;
store->disable_wqm = true;
- store->barrier = barrier_image;
+ store->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(store));
return;
store->unrm = true;
store->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
store->disable_wqm = true;
- store->barrier = barrier_image;
+ store->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(store));
return;
}
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ memory_sync_info sync = get_memory_sync_info(instr, storage_image, semantic_atomicrmw);
if (dim == GLSL_SAMPLER_DIM_BUF) {
Temp vindex = emit_extract_vector(ctx, get_ssa_temp(ctx, instr->src[1].ssa), 0, v1);
mubuf->glc = return_previous;
mubuf->dlc = false; /* Not needed for atomics */
mubuf->disable_wqm = true;
- mubuf->barrier = barrier_image;
+ mubuf->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(mubuf));
return;
mimg->unrm = true;
mimg->da = should_declare_array(ctx, dim, glsl_sampler_type_is_array(type));
mimg->disable_wqm = true;
- mimg->barrier = barrier_image;
+ mimg->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(mimg));
return;
}
/* LOD */
+ assert(nir_src_as_uint(instr->src[1]) == 0);
Temp lod = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0u));
/* Resource */
mimg->dim = ac_get_image_dim(ctx->options->chip_class, dim, is_array);
mimg->dmask = (1 << instr->dest.ssa.num_components) - 1;
mimg->da = glsl_sampler_type_is_array(type);
- mimg->can_reorder = true;
Definition& def = mimg->definitions[0];
ctx->block->instructions.emplace_back(std::move(mimg));
Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
- bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
- load_buffer(ctx, num_components, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa), glc, false);
+ unsigned access = nir_intrinsic_access(instr);
+ bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
+ unsigned size = instr->dest.ssa.bit_size / 8;
+
+ uint32_t flags = get_all_buffer_resource_flags(ctx, instr->src[0].ssa, access);
+ /* GLC bypasses VMEM/SMEM caches, so GLC SMEM loads/stores are coherent with GLC VMEM loads/stores
+ * TODO: this optimization is disabled for now because we still need to ensure correct ordering
+ */
+ bool allow_smem = !(flags & (0 && glc ? has_nonglc_vmem_store : has_vmem_store));
+ allow_smem |= ((access & ACCESS_RESTRICT) && (access & ACCESS_NON_WRITEABLE)) || (access & ACCESS_CAN_REORDER);
+
+ load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
+ nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, allow_smem,
+ get_memory_sync_info(instr, storage_buffer, 0));
}
void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
Builder bld(ctx->program, ctx->block);
Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
- unsigned writemask = nir_intrinsic_write_mask(instr);
+ unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
Temp offset = get_ssa_temp(ctx, instr->src[2].ssa);
Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
- bool smem = !ctx->divergent_vals[instr->src[2].ssa->index] &&
- ctx->options->chip_class >= GFX8;
+ memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
+ bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ uint32_t flags = get_all_buffer_resource_flags(ctx, instr->src[1].ssa, nir_intrinsic_access(instr));
+ /* GLC bypasses VMEM/SMEM caches, so GLC SMEM loads/stores are coherent with GLC VMEM loads/stores
+ * TODO: this optimization is disabled for now because we still need to ensure correct ordering
+ */
+ bool allow_smem = !(flags & (0 && glc ? has_nonglc_vmem_loadstore : has_vmem_loadstore));
+
+ bool smem = !nir_src_is_divergent(instr->src[2]) &&
+ ctx->options->chip_class >= GFX8 &&
+ ctx->options->chip_class < GFX10_3 &&
+ (elem_size_bytes >= 4 || can_subdword_ssbo_store_use_smem(instr)) &&
+ allow_smem;
if (smem)
offset = bld.as_uniform(offset);
bool smem_nonfs = smem && ctx->stage != fragment_fs;
- while (writemask) {
- int start, count;
- u_bit_scan_consecutive_range(&writemask, &start, &count);
- if (count == 3 && (smem || ctx->options->chip_class == GFX6)) {
- /* GFX6 doesn't support storing vec3, split it. */
- writemask |= 1u << (start + 2);
- count = 2;
- }
- int num_bytes = count * elem_size_bytes;
-
- if (num_bytes > 16) {
- assert(elem_size_bytes == 8);
- writemask |= (((count - 2) << 1) - 1) << (start + 2);
- count = 2;
- num_bytes = 16;
- }
-
- // TODO: check alignment of sub-dword stores
- // TODO: split 3 bytes. there is no store instruction for that
-
- Temp write_data;
- if (count != instr->num_components) {
- emit_split_vector(ctx, data, instr->num_components);
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
- for (int i = 0; i < count; i++) {
- Temp elem = emit_extract_vector(ctx, data, start + i, RegClass(data.type(), elem_size_bytes / 4));
- vec->operands[i] = Operand(smem_nonfs ? bld.as_uniform(elem) : elem);
- }
- write_data = bld.tmp(!smem ? RegType::vgpr : smem_nonfs ? RegType::sgpr : data.type(), count * elem_size_bytes / 4);
- vec->definitions[0] = Definition(write_data);
- ctx->block->instructions.emplace_back(std::move(vec));
- } else if (!smem && data.type() != RegType::vgpr) {
- assert(num_bytes % 4 == 0);
- write_data = bld.copy(bld.def(RegType::vgpr, num_bytes / 4), data);
- } else if (smem_nonfs && data.type() == RegType::vgpr) {
- assert(num_bytes % 4 == 0);
- write_data = bld.as_uniform(data);
- } else {
- write_data = data;
- }
+ unsigned write_count = 0;
+ Temp write_datas[32];
+ unsigned offsets[32];
+ split_buffer_store(ctx, instr, smem, smem_nonfs ? RegType::sgpr : (smem ? data.type() : RegType::vgpr),
+ data, writemask, 16, &write_count, write_datas, offsets);
- aco_opcode vmem_op, smem_op;
- switch (num_bytes) {
- case 4:
- vmem_op = aco_opcode::buffer_store_dword;
- smem_op = aco_opcode::s_buffer_store_dword;
- break;
- case 8:
- vmem_op = aco_opcode::buffer_store_dwordx2;
- smem_op = aco_opcode::s_buffer_store_dwordx2;
- break;
- case 12:
- vmem_op = aco_opcode::buffer_store_dwordx3;
- smem_op = aco_opcode::last_opcode;
- assert(!smem && ctx->options->chip_class > GFX6);
- break;
- case 16:
- vmem_op = aco_opcode::buffer_store_dwordx4;
- smem_op = aco_opcode::s_buffer_store_dwordx4;
- break;
- default:
- unreachable("Store SSBO not implemented for this size.");
- }
- if (ctx->stage == fragment_fs)
- smem_op = aco_opcode::p_fs_buffer_store_smem;
+ for (unsigned i = 0; i < write_count; i++) {
+ aco_opcode op = get_buffer_store_op(smem, write_datas[i].bytes());
+ if (smem && ctx->stage == fragment_fs)
+ op = aco_opcode::p_fs_buffer_store_smem;
if (smem) {
- aco_ptr<SMEM_instruction> store{create_instruction<SMEM_instruction>(smem_op, Format::SMEM, 3, 0)};
+ aco_ptr<SMEM_instruction> store{create_instruction<SMEM_instruction>(op, Format::SMEM, 3, 0)};
store->operands[0] = Operand(rsrc);
- if (start) {
- Temp off = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
- offset, Operand(start * elem_size_bytes));
+ if (offsets[i]) {
+ Temp off = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
+ offset, Operand(offsets[i]));
store->operands[1] = Operand(off);
} else {
store->operands[1] = Operand(offset);
}
- if (smem_op != aco_opcode::p_fs_buffer_store_smem)
+ if (op != aco_opcode::p_fs_buffer_store_smem)
store->operands[1].setFixed(m0);
- store->operands[2] = Operand(write_data);
- store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ store->operands[2] = Operand(write_datas[i]);
+ store->glc = glc;
store->dlc = false;
store->disable_wqm = true;
- store->barrier = barrier_buffer;
+ store->sync = sync;
ctx->block->instructions.emplace_back(std::move(store));
ctx->program->wb_smem_l1_on_end = true;
- if (smem_op == aco_opcode::p_fs_buffer_store_smem) {
+ if (op == aco_opcode::p_fs_buffer_store_smem) {
ctx->block->kind |= block_kind_needs_lowering;
ctx->program->needs_exact = true;
}
} else {
- aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(vmem_op, Format::MUBUF, 4, 0)};
+ aco_ptr<MUBUF_instruction> store{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 4, 0)};
store->operands[0] = Operand(rsrc);
store->operands[1] = offset.type() == RegType::vgpr ? Operand(offset) : Operand(v1);
store->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
- store->operands[3] = Operand(write_data);
- store->offset = start * elem_size_bytes;
+ store->operands[3] = Operand(write_datas[i]);
+ store->offset = offsets[i];
store->offen = (offset.type() == RegType::vgpr);
- store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ store->glc = glc;
store->dlc = false;
store->disable_wqm = true;
- store->barrier = barrier_buffer;
+ store->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(store));
}
mubuf->operands[2] = offset.type() == RegType::sgpr ? Operand(offset) : Operand((uint32_t) 0);
mubuf->operands[3] = Operand(data);
if (return_previous)
- mubuf->definitions[0] = Definition(dst);
- mubuf->offset = 0;
- mubuf->offen = (offset.type() == RegType::vgpr);
- mubuf->glc = return_previous;
- mubuf->dlc = false; /* Not needed for atomics */
- mubuf->disable_wqm = true;
- mubuf->barrier = barrier_buffer;
- ctx->program->needs_exact = true;
- ctx->block->instructions.emplace_back(std::move(mubuf));
-}
-
-void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) {
-
- Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
- Builder bld(ctx->program, ctx->block);
- Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u));
- get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false);
-}
-
-Temp get_gfx6_global_rsrc(Builder& bld, Temp addr)
-{
- uint32_t rsrc_conf = S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
- S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
-
- if (addr.type() == RegType::vgpr)
- return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), Operand(0u), Operand(0u), Operand(-1u), Operand(rsrc_conf));
- return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), addr, Operand(-1u), Operand(rsrc_conf));
-}
-
-void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr)
-{
- Builder bld(ctx->program, ctx->block);
- unsigned num_components = instr->num_components;
- unsigned num_bytes = num_components * instr->dest.ssa.bit_size / 8;
-
- Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
-
- bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
- bool dlc = glc && ctx->options->chip_class >= GFX10;
- aco_opcode op;
- if (dst.type() == RegType::vgpr || (glc && ctx->options->chip_class < GFX8)) {
- bool global = ctx->options->chip_class >= GFX9;
-
- if (ctx->options->chip_class >= GFX7) {
- aco_opcode op;
- switch (num_bytes) {
- case 4:
- op = global ? aco_opcode::global_load_dword : aco_opcode::flat_load_dword;
- break;
- case 8:
- op = global ? aco_opcode::global_load_dwordx2 : aco_opcode::flat_load_dwordx2;
- break;
- case 12:
- op = global ? aco_opcode::global_load_dwordx3 : aco_opcode::flat_load_dwordx3;
- break;
- case 16:
- op = global ? aco_opcode::global_load_dwordx4 : aco_opcode::flat_load_dwordx4;
- break;
- default:
- unreachable("load_global not implemented for this size.");
- }
-
- aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 2, 1)};
- flat->operands[0] = Operand(addr);
- flat->operands[1] = Operand(s1);
- flat->glc = glc;
- flat->dlc = dlc;
- flat->barrier = barrier_buffer;
-
- if (dst.type() == RegType::sgpr) {
- Temp vec = bld.tmp(RegType::vgpr, dst.size());
- flat->definitions[0] = Definition(vec);
- ctx->block->instructions.emplace_back(std::move(flat));
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
- } else {
- flat->definitions[0] = Definition(dst);
- ctx->block->instructions.emplace_back(std::move(flat));
- }
- emit_split_vector(ctx, dst, num_components);
- } else {
- assert(ctx->options->chip_class == GFX6);
-
- /* GFX6 doesn't support loading vec3, expand to vec4. */
- num_bytes = num_bytes == 12 ? 16 : num_bytes;
-
- aco_opcode op;
- switch (num_bytes) {
- case 4:
- op = aco_opcode::buffer_load_dword;
- break;
- case 8:
- op = aco_opcode::buffer_load_dwordx2;
- break;
- case 16:
- op = aco_opcode::buffer_load_dwordx4;
- break;
- default:
- unreachable("load_global not implemented for this size.");
- }
+ mubuf->definitions[0] = Definition(dst);
+ mubuf->offset = 0;
+ mubuf->offen = (offset.type() == RegType::vgpr);
+ mubuf->glc = return_previous;
+ mubuf->dlc = false; /* Not needed for atomics */
+ mubuf->disable_wqm = true;
+ mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
+ ctx->program->needs_exact = true;
+ ctx->block->instructions.emplace_back(std::move(mubuf));
+}
- Temp rsrc = get_gfx6_global_rsrc(bld, addr);
+void visit_get_buffer_size(isel_context *ctx, nir_intrinsic_instr *instr) {
- aco_ptr<MUBUF_instruction> mubuf{create_instruction<MUBUF_instruction>(op, Format::MUBUF, 3, 1)};
- mubuf->operands[0] = Operand(rsrc);
- mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
- mubuf->operands[2] = Operand(0u);
- mubuf->glc = glc;
- mubuf->dlc = false;
- mubuf->offset = 0;
- mubuf->addr64 = addr.type() == RegType::vgpr;
- mubuf->disable_wqm = false;
- mubuf->barrier = barrier_buffer;
- aco_ptr<Instruction> instr = std::move(mubuf);
-
- /* expand vector */
- if (dst.size() == 3) {
- Temp vec = bld.tmp(v4);
- instr->definitions[0] = Definition(vec);
- bld.insert(std::move(instr));
- emit_split_vector(ctx, vec, 4);
-
- instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, 3, 1));
- instr->operands[0] = Operand(emit_extract_vector(ctx, vec, 0, v1));
- instr->operands[1] = Operand(emit_extract_vector(ctx, vec, 1, v1));
- instr->operands[2] = Operand(emit_extract_vector(ctx, vec, 2, v1));
- }
+ Temp index = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
+ Builder bld(ctx->program, ctx->block);
+ Temp desc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), index, Operand(0u));
+ get_buffer_size(ctx, desc, get_ssa_temp(ctx, &instr->dest.ssa), false);
+}
- if (dst.type() == RegType::sgpr) {
- Temp vec = bld.tmp(RegType::vgpr, dst.size());
- instr->definitions[0] = Definition(vec);
- bld.insert(std::move(instr));
- expand_vector(ctx, vec, dst, num_components, (1 << num_components) - 1);
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
- } else {
- instr->definitions[0] = Definition(dst);
- bld.insert(std::move(instr));
- emit_split_vector(ctx, dst, num_components);
- }
- }
+void visit_load_global(isel_context *ctx, nir_intrinsic_instr *instr)
+{
+ Builder bld(ctx->program, ctx->block);
+ unsigned num_components = instr->num_components;
+ unsigned component_size = instr->dest.ssa.bit_size / 8;
+
+ LoadEmitInfo info = {Operand(get_ssa_temp(ctx, instr->src[0].ssa)),
+ get_ssa_temp(ctx, &instr->dest.ssa),
+ num_components, component_size};
+ info.glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
+ info.align_mul = nir_intrinsic_align_mul(instr);
+ info.align_offset = nir_intrinsic_align_offset(instr);
+ info.sync = get_memory_sync_info(instr, storage_buffer, 0);
+ /* VMEM stores don't update the SMEM cache and it's difficult to prove that
+ * it's safe to use SMEM */
+ bool can_use_smem = nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE;
+ if (info.dst.type() == RegType::vgpr || (info.glc && ctx->options->chip_class < GFX8) || !can_use_smem) {
+ emit_global_load(ctx, bld, &info);
} else {
- switch (num_bytes) {
- case 4:
- op = aco_opcode::s_load_dword;
- break;
- case 8:
- op = aco_opcode::s_load_dwordx2;
- break;
- case 12:
- case 16:
- op = aco_opcode::s_load_dwordx4;
- break;
- default:
- unreachable("load_global not implemented for this size.");
- }
- aco_ptr<SMEM_instruction> load{create_instruction<SMEM_instruction>(op, Format::SMEM, 2, 1)};
- load->operands[0] = Operand(addr);
- load->operands[1] = Operand(0u);
- load->definitions[0] = Definition(dst);
- load->glc = glc;
- load->dlc = dlc;
- load->barrier = barrier_buffer;
- assert(ctx->options->chip_class >= GFX8 || !glc);
-
- if (dst.size() == 3) {
- /* trim vector */
- Temp vec = bld.tmp(s4);
- load->definitions[0] = Definition(vec);
- ctx->block->instructions.emplace_back(std::move(load));
- emit_split_vector(ctx, vec, 4);
-
- bld.pseudo(aco_opcode::p_create_vector, Definition(dst),
- emit_extract_vector(ctx, vec, 0, s1),
- emit_extract_vector(ctx, vec, 1, s1),
- emit_extract_vector(ctx, vec, 2, s1));
- } else {
- ctx->block->instructions.emplace_back(std::move(load));
- }
+ info.offset = Operand(bld.as_uniform(info.offset));
+ emit_smem_load(ctx, bld, &info);
}
}
{
Builder bld(ctx->program, ctx->block);
unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
+ unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Temp addr = get_ssa_temp(ctx, instr->src[1].ssa);
+ memory_sync_info sync = get_memory_sync_info(instr, storage_buffer, 0);
+ bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
if (ctx->options->chip_class >= GFX7)
addr = as_vgpr(ctx, addr);
- unsigned writemask = nir_intrinsic_write_mask(instr);
- while (writemask) {
- int start, count;
- u_bit_scan_consecutive_range(&writemask, &start, &count);
- if (count == 3 && ctx->options->chip_class == GFX6) {
- /* GFX6 doesn't support storing vec3, split it. */
- writemask |= 1u << (start + 2);
- count = 2;
- }
- unsigned num_bytes = count * elem_size_bytes;
-
- Temp write_data = data;
- if (count != instr->num_components) {
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
- for (int i = 0; i < count; i++)
- vec->operands[i] = Operand(emit_extract_vector(ctx, data, start + i, v1));
- write_data = bld.tmp(RegType::vgpr, count);
- vec->definitions[0] = Definition(write_data);
- ctx->block->instructions.emplace_back(std::move(vec));
- }
-
- bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
- unsigned offset = start * elem_size_bytes;
+ unsigned write_count = 0;
+ Temp write_datas[32];
+ unsigned offsets[32];
+ split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask,
+ 16, &write_count, write_datas, offsets);
+ for (unsigned i = 0; i < write_count; i++) {
if (ctx->options->chip_class >= GFX7) {
+ unsigned offset = offsets[i];
+ Temp store_addr = addr;
if (offset > 0 && ctx->options->chip_class < GFX9) {
Temp addr0 = bld.tmp(v1), addr1 = bld.tmp(v1);
Temp new_addr0 = bld.tmp(v1), new_addr1 = bld.tmp(v1);
Operand(0u), addr1,
carry).def(1).setHint(vcc);
- addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_addr0, new_addr1);
+ store_addr = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), new_addr0, new_addr1);
offset = 0;
}
bool global = ctx->options->chip_class >= GFX9;
aco_opcode op;
- switch (num_bytes) {
+ switch (write_datas[i].bytes()) {
+ case 1:
+ op = global ? aco_opcode::global_store_byte : aco_opcode::flat_store_byte;
+ break;
+ case 2:
+ op = global ? aco_opcode::global_store_short : aco_opcode::flat_store_short;
+ break;
case 4:
op = global ? aco_opcode::global_store_dword : aco_opcode::flat_store_dword;
break;
}
aco_ptr<FLAT_instruction> flat{create_instruction<FLAT_instruction>(op, global ? Format::GLOBAL : Format::FLAT, 3, 0)};
- flat->operands[0] = Operand(addr);
+ flat->operands[0] = Operand(store_addr);
flat->operands[1] = Operand(s1);
- flat->operands[2] = Operand(data);
+ flat->operands[2] = Operand(write_datas[i]);
flat->glc = glc;
flat->dlc = false;
flat->offset = offset;
flat->disable_wqm = true;
- flat->barrier = barrier_buffer;
+ flat->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(flat));
} else {
assert(ctx->options->chip_class == GFX6);
- aco_opcode op;
- switch (num_bytes) {
- case 4:
- op = aco_opcode::buffer_store_dword;
- break;
- case 8:
- op = aco_opcode::buffer_store_dwordx2;
- break;
- case 16:
- op = aco_opcode::buffer_store_dwordx4;
- break;
- default:
- unreachable("store_global not implemented for this size.");
- }
+ aco_opcode op = get_buffer_store_op(false, write_datas[i].bytes());
Temp rsrc = get_gfx6_global_rsrc(bld, addr);
mubuf->operands[0] = Operand(rsrc);
mubuf->operands[1] = addr.type() == RegType::vgpr ? Operand(addr) : Operand(v1);
mubuf->operands[2] = Operand(0u);
- mubuf->operands[3] = Operand(write_data);
+ mubuf->operands[3] = Operand(write_datas[i]);
mubuf->glc = glc;
mubuf->dlc = false;
- mubuf->offset = offset;
+ mubuf->offset = offsets[i];
mubuf->addr64 = addr.type() == RegType::vgpr;
mubuf->disable_wqm = true;
- mubuf->barrier = barrier_buffer;
+ mubuf->sync = sync;
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(mubuf));
}
flat->dlc = false; /* Not needed for atomics */
flat->offset = 0;
flat->disable_wqm = true;
- flat->barrier = barrier_buffer;
+ flat->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(flat));
} else {
mubuf->offset = 0;
mubuf->addr64 = addr.type() == RegType::vgpr;
mubuf->disable_wqm = true;
- mubuf->barrier = barrier_buffer;
+ mubuf->sync = get_memory_sync_info(instr, storage_buffer, semantic_atomicrmw);
ctx->program->needs_exact = true;
ctx->block->instructions.emplace_back(std::move(mubuf));
}
}
-void emit_memory_barrier(isel_context *ctx, nir_intrinsic_instr *instr) {
- Builder bld(ctx->program, ctx->block);
- switch(instr->intrinsic) {
- case nir_intrinsic_group_memory_barrier:
- case nir_intrinsic_memory_barrier:
- bld.barrier(aco_opcode::p_memory_barrier_common);
- break;
- case nir_intrinsic_memory_barrier_buffer:
- bld.barrier(aco_opcode::p_memory_barrier_buffer);
- break;
- case nir_intrinsic_memory_barrier_image:
- bld.barrier(aco_opcode::p_memory_barrier_image);
- break;
- case nir_intrinsic_memory_barrier_tcs_patch:
- case nir_intrinsic_memory_barrier_shared:
- bld.barrier(aco_opcode::p_memory_barrier_shared);
- break;
- default:
- unreachable("Unimplemented memory barrier intrinsic");
- break;
+sync_scope translate_nir_scope(nir_scope scope)
+{
+ switch (scope) {
+ case NIR_SCOPE_NONE:
+ case NIR_SCOPE_INVOCATION:
+ return scope_invocation;
+ case NIR_SCOPE_SUBGROUP:
+ return scope_subgroup;
+ case NIR_SCOPE_WORKGROUP:
+ return scope_workgroup;
+ case NIR_SCOPE_QUEUE_FAMILY:
+ return scope_queuefamily;
+ case NIR_SCOPE_DEVICE:
+ return scope_device;
}
+ unreachable("invalid scope");
+}
+
+void emit_scoped_barrier(isel_context *ctx, nir_intrinsic_instr *instr) {
+ Builder bld(ctx->program, ctx->block);
+
+ unsigned semantics = 0;
+ unsigned storage = 0;
+ sync_scope mem_scope = translate_nir_scope(nir_intrinsic_memory_scope(instr));
+ sync_scope exec_scope = translate_nir_scope(nir_intrinsic_execution_scope(instr));
+
+ unsigned nir_storage = nir_intrinsic_memory_modes(instr);
+ if (nir_storage & (nir_var_mem_ssbo | nir_var_mem_global))
+ storage |= storage_buffer | storage_image; //TODO: split this when NIR gets nir_var_mem_image
+ if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && (nir_storage & nir_var_mem_shared))
+ storage |= storage_shared;
+ if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL && (nir_storage & nir_var_shader_out))
+ storage |= storage_shared;
+
+ unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
+ if (nir_semantics & NIR_MEMORY_ACQUIRE)
+ semantics |= semantic_acquire | semantic_release;
+ if (nir_semantics & NIR_MEMORY_RELEASE)
+ semantics |= semantic_acquire | semantic_release;
+
+ assert(!(nir_semantics & (NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_MAKE_VISIBLE)));
+
+ bld.barrier(aco_opcode::p_barrier,
+ memory_sync_info((storage_class)storage, (memory_semantics)semantics, mem_scope),
+ exec_scope);
}
void visit_load_shared(isel_context *ctx, nir_intrinsic_instr *instr)
{
// TODO: implement sparse reads using ds_read2_b32 and nir_ssa_def_components_read()
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- assert(instr->dest.ssa.bit_size >= 32 && "Bitsize not supported in load_shared.");
Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Builder bld(ctx->program, ctx->block);
Temp data = get_ssa_temp(ctx, instr->src[0].ssa);
Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
- assert(elem_size_bytes >= 4 && "Only 32bit & 64bit store_shared currently supported.");
unsigned align = nir_intrinsic_align_mul(instr) ? nir_intrinsic_align(instr) : elem_size_bytes;
store_lds(ctx, elem_size_bytes, data, writemask, address, nir_intrinsic_base(instr), align);
void visit_shared_atomic(isel_context *ctx, nir_intrinsic_instr *instr)
{
unsigned offset = nir_intrinsic_base(instr);
- Operand m = load_lds_size_m0(ctx);
+ Builder bld(ctx->program, ctx->block);
+ Operand m = load_lds_size_m0(bld);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
Temp address = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
op32 = aco_opcode::ds_write_b32;
op64 = aco_opcode::ds_write_b64;
op32_rtn = aco_opcode::ds_wrxchg_rtn_b32;
- op64_rtn = aco_opcode::ds_wrxchg2_rtn_b64;
+ op64_rtn = aco_opcode::ds_wrxchg_rtn_b64;
break;
case nir_intrinsic_shared_atomic_comp_swap:
op32 = aco_opcode::ds_cmpst_b32;
op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
num_operands = 4;
break;
+ case nir_intrinsic_shared_atomic_fadd:
+ op32 = aco_opcode::ds_add_f32;
+ op32_rtn = aco_opcode::ds_add_rtn_f32;
+ op64 = aco_opcode::num_opcodes;
+ op64_rtn = aco_opcode::num_opcodes;
+ break;
default:
unreachable("Unhandled shared atomic intrinsic");
}
}
if (offset > 65535) {
- Builder bld(ctx->program, ctx->block);
address = bld.vadd32(bld.def(v1), Operand(offset), address);
offset = 0;
}
ds->offset0 = offset;
if (return_previous)
ds->definitions[0] = Definition(get_ssa_temp(ctx, &instr->dest.ssa));
+ ds->sync = memory_sync_info(storage_shared, semantic_atomicrmw);
ctx->block->instructions.emplace_back(std::move(ds));
}
scratch_addr = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand(0u));
uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
- S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);;
+ S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);
if (ctx->program->chip_class >= GFX10) {
rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
}
- /* older generations need element size = 16 bytes. element size removed in GFX9 */
+ /* older generations need element size = 4 bytes. element size removed in GFX9 */
if (ctx->program->chip_class <= GFX8)
- rsrc_conf |= S_008F0C_ELEMENT_SIZE(3);
+ rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand(-1u), Operand(rsrc_conf));
}
void visit_load_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
- assert(instr->dest.ssa.bit_size == 32 || instr->dest.ssa.bit_size == 64);
Builder bld(ctx->program, ctx->block);
Temp rsrc = get_scratch_resource(ctx);
Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- aco_opcode op;
- switch (dst.size()) {
- case 1:
- op = aco_opcode::buffer_load_dword;
- break;
- case 2:
- op = aco_opcode::buffer_load_dwordx2;
- break;
- case 3:
- op = aco_opcode::buffer_load_dwordx3;
- break;
- case 4:
- op = aco_opcode::buffer_load_dwordx4;
- break;
- case 6:
- case 8: {
- std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
- Temp lower = bld.mubuf(aco_opcode::buffer_load_dwordx4,
- bld.def(v4), rsrc, offset,
- ctx->program->scratch_offset, 0, true);
- Temp upper = bld.mubuf(dst.size() == 6 ? aco_opcode::buffer_load_dwordx2 :
- aco_opcode::buffer_load_dwordx4,
- dst.size() == 6 ? bld.def(v2) : bld.def(v4),
- rsrc, offset, ctx->program->scratch_offset, 16, true);
- emit_split_vector(ctx, lower, 2);
- elems[0] = emit_extract_vector(ctx, lower, 0, v2);
- elems[1] = emit_extract_vector(ctx, lower, 1, v2);
- if (dst.size() == 8) {
- emit_split_vector(ctx, upper, 2);
- elems[2] = emit_extract_vector(ctx, upper, 0, v2);
- elems[3] = emit_extract_vector(ctx, upper, 1, v2);
- } else {
- elems[2] = upper;
- }
-
- aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
- Format::PSEUDO, dst.size() / 2, 1)};
- for (unsigned i = 0; i < dst.size() / 2; i++)
- vec->operands[i] = Operand(elems[i]);
- vec->definitions[0] = Definition(dst);
- bld.insert(std::move(vec));
- ctx->allocated_vec.emplace(dst.id(), elems);
- return;
- }
- default:
- unreachable("Wrong dst size for nir_intrinsic_load_scratch");
- }
-
- bld.mubuf(op, Definition(dst), rsrc, offset, ctx->program->scratch_offset, 0, true);
- emit_split_vector(ctx, dst, instr->num_components);
+ LoadEmitInfo info = {Operand(offset), dst, instr->dest.ssa.num_components,
+ instr->dest.ssa.bit_size / 8u, rsrc};
+ info.align_mul = nir_intrinsic_align_mul(instr);
+ info.align_offset = nir_intrinsic_align_offset(instr);
+ info.swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 0;
+ info.sync = memory_sync_info(storage_scratch, semantic_private);
+ info.soffset = ctx->program->scratch_offset;
+ emit_scratch_load(ctx, bld, &info);
}
void visit_store_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
- assert(instr->src[0].ssa->bit_size == 32 || instr->src[0].ssa->bit_size == 64);
Builder bld(ctx->program, ctx->block);
Temp rsrc = get_scratch_resource(ctx);
Temp data = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
Temp offset = as_vgpr(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
unsigned elem_size_bytes = instr->src[0].ssa->bit_size / 8;
- unsigned writemask = nir_intrinsic_write_mask(instr);
-
- while (writemask) {
- int start, count;
- u_bit_scan_consecutive_range(&writemask, &start, &count);
- int num_bytes = count * elem_size_bytes;
-
- if (num_bytes > 16) {
- assert(elem_size_bytes == 8);
- writemask |= (((count - 2) << 1) - 1) << (start + 2);
- count = 2;
- num_bytes = 16;
- }
-
- // TODO: check alignment of sub-dword stores
- // TODO: split 3 bytes. there is no store instruction for that
-
- Temp write_data;
- if (count != instr->num_components) {
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, count, 1)};
- for (int i = 0; i < count; i++) {
- Temp elem = emit_extract_vector(ctx, data, start + i, RegClass(RegType::vgpr, elem_size_bytes / 4));
- vec->operands[i] = Operand(elem);
- }
- write_data = bld.tmp(RegClass(RegType::vgpr, count * elem_size_bytes / 4));
- vec->definitions[0] = Definition(write_data);
- ctx->block->instructions.emplace_back(std::move(vec));
- } else {
- write_data = data;
- }
+ unsigned writemask = widen_mask(nir_intrinsic_write_mask(instr), elem_size_bytes);
- aco_opcode op;
- switch (num_bytes) {
- case 4:
- op = aco_opcode::buffer_store_dword;
- break;
- case 8:
- op = aco_opcode::buffer_store_dwordx2;
- break;
- case 12:
- op = aco_opcode::buffer_store_dwordx3;
- break;
- case 16:
- op = aco_opcode::buffer_store_dwordx4;
- break;
- default:
- unreachable("Invalid data size for nir_intrinsic_store_scratch.");
- }
+ unsigned write_count = 0;
+ Temp write_datas[32];
+ unsigned offsets[32];
+ unsigned swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 16;
+ split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask,
+ swizzle_component_size, &write_count, write_datas, offsets);
- bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_data, start * elem_size_bytes, true);
+ for (unsigned i = 0; i < write_count; i++) {
+ aco_opcode op = get_buffer_store_op(false, write_datas[i].bytes());
+ Instruction *instr = bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_datas[i], offsets[i], true, true);
+ static_cast<MUBUF_instruction *>(instr)->sync = memory_sync_info(storage_scratch, semantic_private);
}
}
mtbuf->operands[0] = Operand(gsvs_ring);
mtbuf->operands[1] = vaddr_offset;
mtbuf->operands[2] = Operand(get_arg(ctx, ctx->args->gs2vs_offset));
- mtbuf->operands[3] = Operand(ctx->outputs.outputs[i][j]);
+ mtbuf->operands[3] = Operand(ctx->outputs.temps[i * 4u + j]);
mtbuf->offen = !vaddr_offset.isUndefined();
mtbuf->dfmt = V_008F0C_BUF_DATA_FORMAT_32;
mtbuf->nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
mtbuf->offset = const_offset;
mtbuf->glc = true;
mtbuf->slc = true;
- mtbuf->barrier = barrier_gs_data;
- mtbuf->can_reorder = true;
+ mtbuf->sync = memory_sync_info(storage_vmem_output, semantic_can_reorder);
bld.insert(std::move(mtbuf));
}
} else if (src.regClass() == s2) {
bld.sop1(aco_opcode::s_mov_b64, dst, src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
}
}
/* res_k = p_k + ddx_k * pos1 + ddy_k * pos2 */
- Temp tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_1, pos1, p1);
- Temp tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddx_2, pos1, p2);
- tmp1 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_1, pos2, tmp1);
- tmp2 = bld.vop3(aco_opcode::v_mad_f32, bld.def(v1), ddy_2, pos2, tmp2);
+ aco_opcode mad = ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fma_f32 : aco_opcode::v_mad_f32;
+ Temp tmp1 = bld.vop3(mad, bld.def(v1), ddx_1, pos1, p1);
+ Temp tmp2 = bld.vop3(mad, bld.def(v1), ddx_2, pos1, p2);
+ tmp1 = bld.vop3(mad, bld.def(v1), ddy_1, pos2, tmp1);
+ tmp2 = bld.vop3(mad, bld.def(v1), ddy_2, pos2, tmp2);
Temp wqm1 = bld.tmp(v1);
emit_wqm(ctx, tmp1, wqm1, true);
Temp wqm2 = bld.tmp(v1);
Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
Temp private_segment_buffer = ctx->program->private_segment_buffer;
+ //TODO: bounds checking?
if (addr.type() == RegType::sgpr) {
Operand offset;
if (const_addr) {
load->glc = false;
load->dlc = false;
load->disable_wqm = false;
- load->barrier = barrier_none;
- load->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(load));
}
case nir_intrinsic_shared_atomic_xor:
case nir_intrinsic_shared_atomic_exchange:
case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_fadd:
visit_shared_atomic(ctx, instr);
break;
case nir_intrinsic_image_deref_load:
case nir_intrinsic_get_buffer_size:
visit_get_buffer_size(ctx, instr);
break;
- case nir_intrinsic_control_barrier: {
- if (ctx->program->chip_class == GFX6 && ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
- /* GFX6 only (thanks to a hw bug workaround):
- * The real barrier instruction isn’t needed, because an entire patch
- * always fits into a single wave.
- */
- break;
- }
-
- if (ctx->shader->info.stage == MESA_SHADER_COMPUTE) {
- unsigned* bsize = ctx->program->info->cs.block_size;
- unsigned workgroup_size = bsize[0] * bsize[1] * bsize[2];
- if (workgroup_size > ctx->program->wave_size)
- bld.sopp(aco_opcode::s_barrier);
- } else if (ctx->shader->info.stage == MESA_SHADER_TESS_CTRL) {
- /* For each patch provided during rendering, n​ TCS shader invocations will be processed,
- * where n​ is the number of vertices in the output patch.
- */
- unsigned workgroup_size = ctx->tcs_num_patches * ctx->shader->info.tess.tcs_vertices_out;
- if (workgroup_size > ctx->program->wave_size)
- bld.sopp(aco_opcode::s_barrier);
- } else {
- /* We don't know the workgroup size, so always emit the s_barrier. */
- bld.sopp(aco_opcode::s_barrier);
- }
-
- break;
- }
- case nir_intrinsic_memory_barrier_tcs_patch:
- case nir_intrinsic_group_memory_barrier:
- case nir_intrinsic_memory_barrier:
- case nir_intrinsic_memory_barrier_buffer:
- case nir_intrinsic_memory_barrier_image:
- case nir_intrinsic_memory_barrier_shared:
- emit_memory_barrier(ctx, instr);
+ case nir_intrinsic_scoped_barrier:
+ emit_scoped_barrier(ctx, instr);
break;
case nir_intrinsic_load_num_work_groups: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
} else if (instr->src[0].ssa->bit_size == 64 && src.regClass() == v2) {
bld.vopc(aco_opcode::v_cmp_lg_u64, lanemask_tmp, Operand(0u), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
if (dst.size() != bld.lm.size()) {
/* Wave32 with ballot size set to 64 */
case nir_intrinsic_shuffle:
case nir_intrinsic_read_invocation: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!ctx->divergent_vals[instr->src[0].ssa->index]) {
+ if (!nir_src_is_divergent(instr->src[0])) {
emit_uniform_subgroup(ctx, instr, src);
} else {
Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
- if (instr->intrinsic == nir_intrinsic_read_invocation || !ctx->divergent_vals[instr->src[1].ssa->index])
+ if (instr->intrinsic == nir_intrinsic_read_invocation || !nir_src_is_divergent(instr->src[1]))
tid = bld.as_uniform(tid);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- if (src.regClass() == v1) {
+ if (src.regClass() == v1b || src.regClass() == v2b) {
+ Temp tmp = bld.tmp(v1);
+ tmp = emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), tmp);
+ if (dst.type() == RegType::vgpr)
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(src.regClass() == v1b ? v3b : v2b), tmp);
+ else
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
+ } else if (src.regClass() == v1) {
emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), dst);
} else if (src.regClass() == v2) {
Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
tmp = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(1u), tmp);
emit_wqm(ctx, bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), tmp), dst);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
}
break;
case nir_intrinsic_read_first_invocation: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
- if (src.regClass() == v1) {
+ if (src.regClass() == v1b || src.regClass() == v2b || src.regClass() == v1) {
emit_wqm(ctx,
bld.vop1(aco_opcode::v_readfirstlane_b32, bld.def(s1), src),
dst);
} else if (src.regClass() == s2) {
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), src);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
nir_intrinsic_cluster_size(instr) : 0;
cluster_size = util_next_power_of_two(MIN2(cluster_size ? cluster_size : ctx->program->wave_size, ctx->program->wave_size));
- if (!ctx->divergent_vals[instr->src[0].ssa->index] && (op == nir_op_ior || op == nir_op_iand)) {
+ if (!nir_src_is_divergent(instr->src[0]) && (op == nir_op_ior || op == nir_op_iand)) {
emit_uniform_subgroup(ctx, instr, src);
} else if (instr->dest.ssa.bit_size == 1) {
if (op == nir_op_imul || op == nir_op_umin || op == nir_op_imin)
} else if (cluster_size == 1) {
bld.copy(Definition(dst), src);
} else {
- src = as_vgpr(ctx, src);
+ unsigned bit_size = instr->src[0].ssa->bit_size;
+
+ src = emit_extract_vector(ctx, src, 0, RegClass::get(RegType::vgpr, bit_size / 8));
ReduceOp reduce_op;
switch (op) {
- #define CASE(name) case nir_op_##name: reduce_op = (src.regClass() == v1) ? name##32 : name##64; break;
- CASE(iadd)
- CASE(imul)
- CASE(fadd)
- CASE(fmul)
- CASE(imin)
- CASE(umin)
- CASE(fmin)
- CASE(imax)
- CASE(umax)
- CASE(fmax)
- CASE(iand)
- CASE(ior)
- CASE(ixor)
+ #define CASEI(name) case nir_op_##name: reduce_op = (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : (bit_size == 8) ? name##8 : name##64; break;
+ #define CASEF(name) case nir_op_##name: reduce_op = (bit_size == 32) ? name##32 : (bit_size == 16) ? name##16 : name##64; break;
+ CASEI(iadd)
+ CASEI(imul)
+ CASEI(imin)
+ CASEI(umin)
+ CASEI(imax)
+ CASEI(umax)
+ CASEI(iand)
+ CASEI(ior)
+ CASEI(ixor)
+ CASEF(fadd)
+ CASEF(fmul)
+ CASEF(fmin)
+ CASEF(fmax)
default:
unreachable("unknown reduction op");
- #undef CASE
+ #undef CASEI
+ #undef CASEF
}
aco_opcode aco_op;
}
case nir_intrinsic_quad_broadcast: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!ctx->divergent_vals[instr->dest.ssa.index]) {
+ if (!nir_dest_is_divergent(instr->dest)) {
emit_uniform_subgroup(ctx, instr, src);
} else {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), mask_tmp,
bld.sop2(Builder::s_and, bld.def(bld.lm), bld.def(s1, scc), src, Operand(exec, bld.lm))));
emit_wqm(ctx, tmp, dst);
+ } else if (instr->dest.ssa.bit_size == 8) {
+ Temp tmp = bld.tmp(v1);
+ if (ctx->program->chip_class >= GFX8)
+ emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ else
+ emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), tmp);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v3b), tmp);
+ } else if (instr->dest.ssa.bit_size == 16) {
+ Temp tmp = bld.tmp(v1);
+ if (ctx->program->chip_class >= GFX8)
+ emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ else
+ emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, (1 << 15) | dpp_ctrl), tmp);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp);
} else if (instr->dest.ssa.bit_size == 32) {
if (ctx->program->chip_class >= GFX8)
emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), dst);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
}
break;
case nir_intrinsic_quad_swap_diagonal:
case nir_intrinsic_quad_swizzle_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!ctx->divergent_vals[instr->dest.ssa.index]) {
+ if (!nir_dest_is_divergent(instr->dest)) {
emit_uniform_subgroup(ctx, instr, src);
break;
}
src = bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl);
Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
emit_wqm(ctx, tmp, dst);
+ } else if (instr->dest.ssa.bit_size == 8) {
+ Temp tmp = bld.tmp(v1);
+ if (ctx->program->chip_class >= GFX8)
+ emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ else
+ emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v3b), tmp);
+ } else if (instr->dest.ssa.bit_size == 16) {
+ Temp tmp = bld.tmp(v1);
+ if (ctx->program->chip_class >= GFX8)
+ emit_wqm(ctx, bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ else
+ emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, dpp_ctrl), tmp);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(dst), bld.def(v2b), tmp);
} else if (instr->dest.ssa.bit_size == 32) {
Temp tmp;
if (ctx->program->chip_class >= GFX8)
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
case nir_intrinsic_masked_swizzle_amd: {
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
- if (!ctx->divergent_vals[instr->dest.ssa.index]) {
+ if (!nir_dest_is_divergent(instr->dest)) {
emit_uniform_subgroup(ctx, instr, src);
break;
}
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
uint32_t mask = nir_intrinsic_swizzle_mask(instr);
- if (dst.regClass() == v1) {
- emit_wqm(ctx,
- bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false),
- dst);
+ if (instr->dest.ssa.bit_size == 1) {
+ assert(src.regClass() == bld.lm);
+ src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src);
+ src = emit_masked_swizzle(ctx, bld, src, mask);
+ Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
+ emit_wqm(ctx, tmp, dst);
+ } else if (dst.regClass() == v1b) {
+ Temp tmp = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask));
+ emit_extract_vector(ctx, tmp, 0, dst);
+ } else if (dst.regClass() == v2b) {
+ Temp tmp = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask));
+ emit_extract_vector(ctx, tmp, 0, dst);
+ } else if (dst.regClass() == v1) {
+ emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask), dst);
} else if (dst.regClass() == v2) {
Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
- lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, mask, 0, false));
- hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, mask, 0, false));
+ lo = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, lo, mask));
+ hi = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, hi, mask));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
- fprintf(stderr, "Unimplemented NIR instr bit size: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented NIR instr bit size");
}
break;
}
get_ssa_temp(ctx, &instr->dest.ssa));
break;
}
- case nir_intrinsic_shader_clock:
- bld.smem(aco_opcode::s_memtime, Definition(get_ssa_temp(ctx, &instr->dest.ssa)), false);
- emit_split_vector(ctx, get_ssa_temp(ctx, &instr->dest.ssa), 2);
+ case nir_intrinsic_shader_clock: {
+ Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
+ if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_SUBGROUP && ctx->options->chip_class >= GFX10_3) {
+ /* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */
+ Temp clock = bld.sopk(aco_opcode::s_getreg_b32, bld.def(s1), ((20 - 1) << 11) | 29);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), clock, Operand(0u));
+ } else {
+ aco_opcode opcode =
+ nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE ?
+ aco_opcode::s_memrealtime : aco_opcode::s_memtime;
+ bld.smem(opcode, Definition(dst), memory_sync_info(0, semantic_volatile));
+ }
+ emit_split_vector(ctx, dst, 2);
break;
+ }
case nir_intrinsic_load_vertex_id_zero_base: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.vertex_id));
break;
}
default:
- fprintf(stderr, "Unimplemented intrinsic instr: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unimplemented intrinsic instr");
abort();
break;
{
Builder bld(ctx->program, ctx->block);
Temp ma, tc, sc, id;
+ aco_opcode madak = ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fmaak_f32 : aco_opcode::v_madak_f32;
+ aco_opcode madmk = ctx->program->chip_class >= GFX10_3 ? aco_opcode::v_fmamk_f32 : aco_opcode::v_madmk_f32;
if (is_array) {
coords[3] = bld.vop1(aco_opcode::v_rndne_f32, bld.def(v1), coords[3]);
sc = bld.vop3(aco_opcode::v_cubesc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
if (!is_deriv)
- sc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), sc, invma, Operand(0x3fc00000u/*1.5*/));
+ sc = bld.vop2(madak, bld.def(v1), sc, invma, Operand(0x3fc00000u/*1.5*/));
tc = bld.vop3(aco_opcode::v_cubetc_f32, bld.def(v1), coords[0], coords[1], coords[2]);
if (!is_deriv)
- tc = bld.vop2(aco_opcode::v_madak_f32, bld.def(v1), tc, invma, Operand(0x3fc00000u/*1.5*/));
+ tc = bld.vop2(madak, bld.def(v1), tc, invma, Operand(0x3fc00000u/*1.5*/));
id = bld.vop3(aco_opcode::v_cubeid_f32, bld.def(v1), coords[0], coords[1], coords[2]);
}
if (is_array)
- id = bld.vop2(aco_opcode::v_madmk_f32, bld.def(v1), coords[3], id, Operand(0x41000000u/*8.0*/));
+ id = bld.vop2(madmk, bld.def(v1), coords[3], id, Operand(0x41000000u/*8.0*/));
coords.resize(3);
coords[0] = sc;
coords[1] = tc;
{
Builder bld(ctx->program, ctx->block);
bool has_bias = false, has_lod = false, level_zero = false, has_compare = false,
- has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false, has_sample_index = false;
+ has_offset = false, has_ddx = false, has_ddy = false, has_derivs = false, has_sample_index = false,
+ has_clamped_lod = false;
Temp resource, sampler, fmask_ptr, bias = Temp(), compare = Temp(), sample_index = Temp(),
- lod = Temp(), offset = Temp(), ddx = Temp(), ddy = Temp();
+ lod = Temp(), offset = Temp(), ddx = Temp(), ddy = Temp(),
+ clamped_lod = Temp();
std::vector<Temp> coords;
std::vector<Temp> derivs;
nir_const_value *sample_index_cv = NULL;
break;
}
case nir_tex_src_bias:
- if (instr->op == nir_texop_txb) {
- bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
- has_bias = true;
- }
+ bias = get_ssa_temp(ctx, instr->src[i].src.ssa);
+ has_bias = true;
break;
case nir_tex_src_lod: {
nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
}
break;
}
+ case nir_tex_src_min_lod:
+ clamped_lod = get_ssa_temp(ctx, instr->src[i].src.ssa);
+ has_clamped_lod = true;
+ break;
case nir_tex_src_comparator:
if (instr->is_shadow) {
compare = get_ssa_temp(ctx, instr->src[i].src.ssa);
Temp samples_log2 = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(16u | 4u<<16));
Temp samples = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), Operand(1u), samples_log2);
Temp type = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), dword3, Operand(28u | 4u<<16 /* offset=28, width=4 */));
- Temp is_msaa = bld.sopc(aco_opcode::s_cmp_ge_u32, bld.def(s1, scc), type, Operand(14u));
+ Operand default_sample = Operand(1u);
+ if (ctx->options->robust_buffer_access) {
+ /* Extract the second dword of the descriptor, if it's
+ * all zero, then it's a null descriptor.
+ */
+ Temp dword1 = emit_extract_vector(ctx, resource, 1, s1);
+ Temp is_non_null_descriptor = bld.sopc(aco_opcode::s_cmp_gt_u32, bld.def(s1, scc), dword1, Operand(0u));
+ default_sample = Operand(is_non_null_descriptor);
+ }
+
+ Temp is_msaa = bld.sopc(aco_opcode::s_cmp_ge_u32, bld.def(s1, scc), type, Operand(14u));
bld.sop2(aco_opcode::s_cselect_b32, Definition(get_ssa_temp(ctx, &instr->dest.ssa)),
- samples, Operand(1u), bld.scc(is_msaa));
+ samples, default_sample, bld.scc(is_msaa));
return;
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D && ctx->options->chip_class == GFX9) {
assert(has_ddx && has_ddy && ddx.size() == 1 && ddy.size() == 1);
Temp zero = bld.copy(bld.def(v1), Operand(0u));
- derivs = {ddy, zero, ddy, zero};
+ derivs = {ddx, zero, ddy, zero};
} else {
for (unsigned i = 0; has_ddx && i < ddx.size(); i++)
derivs.emplace_back(emit_extract_vector(ctx, ddx, i, v1));
tex->da = da;
tex->definitions[0] = Definition(tmp_dst);
tex->dim = dim;
- tex->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(tex));
if (div_by_6) {
tex->da = da;
Temp size = bld.tmp(v2);
tex->definitions[0] = Definition(size);
- tex->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(tex));
emit_split_vector(ctx, size, size.size());
mubuf->operands[2] = Operand((uint32_t) 0);
mubuf->definitions[0] = Definition(tmp_dst);
mubuf->idxen = true;
- mubuf->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(mubuf));
expand_vector(ctx, tmp_dst, dst, instr->dest.ssa.num_components, (1 << last_bit) - 1);
args.emplace_back(sample_index);
if (has_lod)
args.emplace_back(lod);
+ if (has_clamped_lod)
+ args.emplace_back(clamped_lod);
Temp arg = bld.tmp(RegClass(RegType::vgpr, args.size()));
aco_ptr<Instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, args.size(), 1)};
tex->unrm = true;
tex->da = da;
tex->definitions[0] = Definition(tmp_dst);
- tex->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(tex));
if (instr->op == nir_texop_samples_identical) {
// TODO: would be better to do this by adding offsets, but needs the opcodes ordered.
aco_opcode opcode = aco_opcode::image_sample;
if (has_offset) { /* image_sample_*_o */
- if (has_compare) {
+ if (has_clamped_lod) {
+ if (has_compare) {
+ opcode = aco_opcode::image_sample_c_cl_o;
+ if (has_derivs)
+ opcode = aco_opcode::image_sample_c_d_cl_o;
+ if (has_bias)
+ opcode = aco_opcode::image_sample_c_b_cl_o;
+ } else {
+ opcode = aco_opcode::image_sample_cl_o;
+ if (has_derivs)
+ opcode = aco_opcode::image_sample_d_cl_o;
+ if (has_bias)
+ opcode = aco_opcode::image_sample_b_cl_o;
+ }
+ } else if (has_compare) {
opcode = aco_opcode::image_sample_c_o;
if (has_derivs)
opcode = aco_opcode::image_sample_c_d_o;
if (has_lod)
opcode = aco_opcode::image_sample_l_o;
}
+ } else if (has_clamped_lod) { /* image_sample_*_cl */
+ if (has_compare) {
+ opcode = aco_opcode::image_sample_c_cl;
+ if (has_derivs)
+ opcode = aco_opcode::image_sample_c_d_cl;
+ if (has_bias)
+ opcode = aco_opcode::image_sample_c_b_cl;
+ } else {
+ opcode = aco_opcode::image_sample_cl;
+ if (has_derivs)
+ opcode = aco_opcode::image_sample_d_cl;
+ if (has_bias)
+ opcode = aco_opcode::image_sample_b_cl;
+ }
} else { /* no offset */
if (has_compare) {
opcode = aco_opcode::image_sample_c;
}
if (instr->op == nir_texop_tg4) {
- if (has_offset) {
- opcode = aco_opcode::image_gather4_lz_o;
- if (has_compare)
+ if (has_offset) { /* image_gather4_*_o */
+ if (has_compare) {
opcode = aco_opcode::image_gather4_c_lz_o;
+ if (has_lod)
+ opcode = aco_opcode::image_gather4_c_l_o;
+ if (has_bias)
+ opcode = aco_opcode::image_gather4_c_b_o;
+ } else {
+ opcode = aco_opcode::image_gather4_lz_o;
+ if (has_lod)
+ opcode = aco_opcode::image_gather4_l_o;
+ if (has_bias)
+ opcode = aco_opcode::image_gather4_b_o;
+ }
} else {
- opcode = aco_opcode::image_gather4_lz;
- if (has_compare)
+ if (has_compare) {
opcode = aco_opcode::image_gather4_c_lz;
+ if (has_lod)
+ opcode = aco_opcode::image_gather4_c_l;
+ if (has_bias)
+ opcode = aco_opcode::image_gather4_c_b;
+ } else {
+ opcode = aco_opcode::image_gather4_lz;
+ if (has_lod)
+ opcode = aco_opcode::image_gather4_l;
+ if (has_bias)
+ opcode = aco_opcode::image_gather4_b;
+ }
}
} else if (instr->op == nir_texop_lod) {
opcode = aco_opcode::image_get_lod;
tex->dmask = dmask;
tex->da = da;
tex->definitions[0] = Definition(tmp_dst);
- tex->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(tex));
if (tg4_integer_cube_workaround) {
}
-Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa)
+Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa, RegClass rc, bool logical)
{
Temp tmp = get_ssa_temp(ctx, ssa);
- if (ssa->parent_instr->type == nir_instr_type_ssa_undef)
- return Operand(tmp.regClass());
- else
+ if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
+ return Operand(rc);
+ } else if (logical && ssa->bit_size == 1 && ssa->parent_instr->type == nir_instr_type_load_const) {
+ if (ctx->program->wave_size == 64)
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT64_MAX : 0u);
+ else
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT32_MAX : 0u);
+ } else {
return Operand(tmp);
+ }
}
void visit_phi(isel_context *ctx, nir_phi_instr *instr)
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
assert(instr->dest.ssa.bit_size != 1 || dst.regClass() == ctx->program->lane_mask);
- bool logical = !dst.is_linear() || ctx->divergent_vals[instr->dest.ssa.index];
+ bool logical = !dst.is_linear() || nir_dest_is_divergent(instr->dest);
logical |= ctx->block->kind & block_kind_merge;
aco_opcode opcode = logical ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
continue;
cur_pred_idx++;
- Operand op = get_phi_operand(ctx, src.second);
+ Operand op = get_phi_operand(ctx, src.second, dst.regClass(), logical);
operands[num_operands++] = op;
num_defined += !op.isUndefined();
}
/* uniform break - directly jump out of the loop */
ctx->block->kind |= block_kind_uniform;
ctx->cf_info.has_branch = true;
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_linear_edge(idx, logical_target);
return;
}
/* uniform continue - directly jump to the loop header */
ctx->block->kind |= block_kind_uniform;
ctx->cf_info.has_branch = true;
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_linear_edge(idx, logical_target);
return;
}
break;
default:
- fprintf(stderr, "Unknown NIR jump instr: ");
- nir_print_instr(&instr->instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(&instr->instr, "Unknown NIR jump instr");
abort();
}
}
/* remove critical edges from linear CFG */
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
Block* break_block = ctx->program->create_and_insert_block();
break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
break_block->kind |= block_kind_uniform;
logical_target = &ctx->program->blocks[ctx->cf_info.parent_loop.header_idx];
add_linear_edge(break_block->index, logical_target);
bld.reset(break_block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
Block* continue_block = ctx->program->create_and_insert_block();
continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
visit_jump(ctx, nir_instr_as_jump(instr));
break;
default:
- fprintf(stderr, "Unknown NIR instr type: ");
- nir_print_instr(instr, stderr);
- fprintf(stderr, "\n");
+ isel_err(instr, "Unknown NIR instr type");
//abort();
}
}
append_logical_end(ctx->block);
ctx->block->kind |= block_kind_loop_preheader | block_kind_uniform;
Builder bld(ctx->program, ctx->block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
unsigned loop_preheader_idx = ctx->block->index;
Block loop_exit = Block();
break_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
break_block->kind = block_kind_uniform;
bld.reset(break_block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_linear_edge(block_idx, break_block);
add_linear_edge(break_block->index, &loop_exit);
continue_block->loop_nest_depth = ctx->cf_info.loop_nest_depth;
continue_block->kind = block_kind_uniform;
bld.reset(continue_block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_linear_edge(block_idx, continue_block);
add_linear_edge(continue_block->index, &ctx->program->blocks[loop_header_idx]);
}
bld.reset(ctx->block);
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
}
/* Fixup phis in loop header from unreachable blocks.
/* branch to linear then block */
assert(cond.regClass() == ctx->program->lane_mask);
aco_ptr<Pseudo_branch_instruction> branch;
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0));
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
branch->operands[0] = Operand(cond);
ctx->block->instructions.push_back(std::move(branch));
append_logical_end(BB_then_logical);
/* branch from logical then block to invert block */
aco_ptr<Pseudo_branch_instruction> branch;
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
BB_then_logical->instructions.emplace_back(std::move(branch));
add_linear_edge(BB_then_logical->index, &ic->BB_invert);
if (!ctx->cf_info.parent_loop.has_divergent_branch)
BB_then_linear->kind |= block_kind_uniform;
add_linear_edge(ic->BB_if_idx, BB_then_linear);
/* branch from linear then block to invert block */
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
BB_then_linear->instructions.emplace_back(std::move(branch));
add_linear_edge(BB_then_linear->index, &ic->BB_invert);
ic->invert_idx = ctx->block->index;
/* branch to linear else block (skip else) */
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_nz, Format::PSEUDO_BRANCH, 1, 0));
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_nz, Format::PSEUDO_BRANCH, 1, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
branch->operands[0] = Operand(ic->cond);
ctx->block->instructions.push_back(std::move(branch));
/* branch from logical else block to endif block */
aco_ptr<Pseudo_branch_instruction> branch;
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
BB_else_logical->instructions.emplace_back(std::move(branch));
add_linear_edge(BB_else_logical->index, &ic->BB_endif);
if (!ctx->cf_info.parent_loop.has_divergent_branch)
ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
- /** emit linear else block */
- Block* BB_else_linear = ctx->program->create_and_insert_block();
- BB_else_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
- BB_else_linear->kind |= block_kind_uniform;
- add_linear_edge(ic->invert_idx, BB_else_linear);
+ /** emit linear else block */
+ Block* BB_else_linear = ctx->program->create_and_insert_block();
+ BB_else_linear->loop_nest_depth = ctx->cf_info.loop_nest_depth;
+ BB_else_linear->kind |= block_kind_uniform;
+ add_linear_edge(ic->invert_idx, BB_else_linear);
+
+ /* branch from linear else block to endif block */
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
+ BB_else_linear->instructions.emplace_back(std::move(branch));
+ add_linear_edge(BB_else_linear->index, &ic->BB_endif);
+
+
+ /** emit endif merge block */
+ ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
+ append_logical_start(ctx->block);
+
+
+ ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
+ ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
+ ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
+ ctx->cf_info.exec_potentially_empty_break_depth =
+ std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
+ if (ctx->cf_info.loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
+ !ctx->cf_info.parent_if.is_divergent) {
+ ctx->cf_info.exec_potentially_empty_break = false;
+ ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
+ }
+ /* uniform control flow never has an empty exec-mask */
+ if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
+ ctx->cf_info.exec_potentially_empty_discard = false;
+ ctx->cf_info.exec_potentially_empty_break = false;
+ ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
+ }
+}
+
+static void begin_uniform_if_then(isel_context *ctx, if_context *ic, Temp cond)
+{
+ assert(cond.regClass() == s1);
+
+ append_logical_end(ctx->block);
+ ctx->block->kind |= block_kind_uniform;
+
+ aco_ptr<Pseudo_branch_instruction> branch;
+ aco_opcode branch_opcode = aco_opcode::p_cbranch_z;
+ branch.reset(create_instruction<Pseudo_branch_instruction>(branch_opcode, Format::PSEUDO_BRANCH, 1, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
+ branch->operands[0] = Operand(cond);
+ branch->operands[0].setFixed(scc);
+ ctx->block->instructions.emplace_back(std::move(branch));
+
+ ic->BB_if_idx = ctx->block->index;
+ ic->BB_endif = Block();
+ ic->BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
+ ic->BB_endif.kind |= ctx->block->kind & block_kind_top_level;
+
+ ctx->cf_info.has_branch = false;
+ ctx->cf_info.parent_loop.has_divergent_branch = false;
+
+ /** emit then block */
+ Block* BB_then = ctx->program->create_and_insert_block();
+ BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth;
+ add_edge(ic->BB_if_idx, BB_then);
+ append_logical_start(BB_then);
+ ctx->block = BB_then;
+}
+
+static void begin_uniform_if_else(isel_context *ctx, if_context *ic)
+{
+ Block *BB_then = ctx->block;
+
+ ic->uniform_has_then_branch = ctx->cf_info.has_branch;
+ ic->then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
- /* branch from linear else block to endif block */
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
- BB_else_linear->instructions.emplace_back(std::move(branch));
- add_linear_edge(BB_else_linear->index, &ic->BB_endif);
+ if (!ic->uniform_has_then_branch) {
+ append_logical_end(BB_then);
+ /* branch from then block to endif block */
+ aco_ptr<Pseudo_branch_instruction> branch;
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
+ BB_then->instructions.emplace_back(std::move(branch));
+ add_linear_edge(BB_then->index, &ic->BB_endif);
+ if (!ic->then_branch_divergent)
+ add_logical_edge(BB_then->index, &ic->BB_endif);
+ BB_then->kind |= block_kind_uniform;
+ }
+ ctx->cf_info.has_branch = false;
+ ctx->cf_info.parent_loop.has_divergent_branch = false;
- /** emit endif merge block */
- ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
- append_logical_start(ctx->block);
+ /** emit else block */
+ Block* BB_else = ctx->program->create_and_insert_block();
+ BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth;
+ add_edge(ic->BB_if_idx, BB_else);
+ append_logical_start(BB_else);
+ ctx->block = BB_else;
+}
+static void end_uniform_if(isel_context *ctx, if_context *ic)
+{
+ Block *BB_else = ctx->block;
- ctx->cf_info.parent_if.is_divergent = ic->divergent_old;
- ctx->cf_info.exec_potentially_empty_discard |= ic->exec_potentially_empty_discard_old;
- ctx->cf_info.exec_potentially_empty_break |= ic->exec_potentially_empty_break_old;
- ctx->cf_info.exec_potentially_empty_break_depth =
- std::min(ic->exec_potentially_empty_break_depth_old, ctx->cf_info.exec_potentially_empty_break_depth);
- if (ctx->cf_info.loop_nest_depth == ctx->cf_info.exec_potentially_empty_break_depth &&
- !ctx->cf_info.parent_if.is_divergent) {
- ctx->cf_info.exec_potentially_empty_break = false;
- ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
+ if (!ctx->cf_info.has_branch) {
+ append_logical_end(BB_else);
+ /* branch from then block to endif block */
+ aco_ptr<Pseudo_branch_instruction> branch;
+ branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 1));
+ branch->definitions[0] = {ctx->program->allocateId(), s2};
+ branch->definitions[0].setHint(vcc);
+ BB_else->instructions.emplace_back(std::move(branch));
+ add_linear_edge(BB_else->index, &ic->BB_endif);
+ if (!ctx->cf_info.parent_loop.has_divergent_branch)
+ add_logical_edge(BB_else->index, &ic->BB_endif);
+ BB_else->kind |= block_kind_uniform;
}
- /* uniform control flow never has an empty exec-mask */
- if (!ctx->cf_info.loop_nest_depth && !ctx->cf_info.parent_if.is_divergent) {
- ctx->cf_info.exec_potentially_empty_discard = false;
- ctx->cf_info.exec_potentially_empty_break = false;
- ctx->cf_info.exec_potentially_empty_break_depth = UINT16_MAX;
+
+ ctx->cf_info.has_branch &= ic->uniform_has_then_branch;
+ ctx->cf_info.parent_loop.has_divergent_branch &= ic->then_branch_divergent;
+
+ /** emit endif merge block */
+ if (!ctx->cf_info.has_branch) {
+ ctx->block = ctx->program->insert_block(std::move(ic->BB_endif));
+ append_logical_start(ctx->block);
}
}
Temp cond = get_ssa_temp(ctx, if_stmt->condition.ssa);
Builder bld(ctx->program, ctx->block);
aco_ptr<Pseudo_branch_instruction> branch;
+ if_context ic;
- if (!ctx->divergent_vals[if_stmt->condition.ssa->index]) { /* uniform condition */
+ if (!nir_src_is_divergent(if_stmt->condition)) { /* uniform condition */
/**
* Uniform conditionals are represented in the following way*) :
*
* to the loop exit/entry block. Otherwise, it branches to the next
* merge block.
**/
- append_logical_end(ctx->block);
- ctx->block->kind |= block_kind_uniform;
- /* emit branch */
- assert(cond.regClass() == bld.lm);
// TODO: in a post-RA optimizer, we could check if the condition is in VCC and omit this instruction
+ assert(cond.regClass() == ctx->program->lane_mask);
cond = bool_to_scalar_condition(ctx, cond);
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_cbranch_z, Format::PSEUDO_BRANCH, 1, 0));
- branch->operands[0] = Operand(cond);
- branch->operands[0].setFixed(scc);
- ctx->block->instructions.emplace_back(std::move(branch));
-
- unsigned BB_if_idx = ctx->block->index;
- Block BB_endif = Block();
- BB_endif.loop_nest_depth = ctx->cf_info.loop_nest_depth;
- BB_endif.kind |= ctx->block->kind & block_kind_top_level;
-
- /** emit then block */
- Block* BB_then = ctx->program->create_and_insert_block();
- BB_then->loop_nest_depth = ctx->cf_info.loop_nest_depth;
- add_edge(BB_if_idx, BB_then);
- append_logical_start(BB_then);
- ctx->block = BB_then;
+ begin_uniform_if_then(ctx, &ic, cond);
visit_cf_list(ctx, &if_stmt->then_list);
- BB_then = ctx->block;
- bool then_branch = ctx->cf_info.has_branch;
- bool then_branch_divergent = ctx->cf_info.parent_loop.has_divergent_branch;
-
- if (!then_branch) {
- append_logical_end(BB_then);
- /* branch from then block to endif block */
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
- BB_then->instructions.emplace_back(std::move(branch));
- add_linear_edge(BB_then->index, &BB_endif);
- if (!then_branch_divergent)
- add_logical_edge(BB_then->index, &BB_endif);
- BB_then->kind |= block_kind_uniform;
- }
-
- ctx->cf_info.has_branch = false;
- ctx->cf_info.parent_loop.has_divergent_branch = false;
- /** emit else block */
- Block* BB_else = ctx->program->create_and_insert_block();
- BB_else->loop_nest_depth = ctx->cf_info.loop_nest_depth;
- add_edge(BB_if_idx, BB_else);
- append_logical_start(BB_else);
- ctx->block = BB_else;
+ begin_uniform_if_else(ctx, &ic);
visit_cf_list(ctx, &if_stmt->else_list);
- BB_else = ctx->block;
-
- if (!ctx->cf_info.has_branch) {
- append_logical_end(BB_else);
- /* branch from then block to endif block */
- branch.reset(create_instruction<Pseudo_branch_instruction>(aco_opcode::p_branch, Format::PSEUDO_BRANCH, 0, 0));
- BB_else->instructions.emplace_back(std::move(branch));
- add_linear_edge(BB_else->index, &BB_endif);
- if (!ctx->cf_info.parent_loop.has_divergent_branch)
- add_logical_edge(BB_else->index, &BB_endif);
- BB_else->kind |= block_kind_uniform;
- }
-
- ctx->cf_info.has_branch &= then_branch;
- ctx->cf_info.parent_loop.has_divergent_branch &= then_branch_divergent;
- /** emit endif merge block */
- if (!ctx->cf_info.has_branch) {
- ctx->block = ctx->program->insert_block(std::move(BB_endif));
- append_logical_start(ctx->block);
- }
- return !ctx->cf_info.has_branch;
+ end_uniform_if(ctx, &ic);
} else { /* non-uniform condition */
/**
* To maintain a logical and linear CFG without critical edges,
* *) Exceptions may be due to break and continue statements within loops
**/
- if_context ic;
-
begin_divergent_if_then(ctx, &ic, cond);
visit_cf_list(ctx, &if_stmt->then_list);
visit_cf_list(ctx, &if_stmt->else_list);
end_divergent_if(ctx, &ic);
-
- return true;
}
+
+ return !ctx->cf_info.has_branch && !ctx->block->logical_preds.empty();
}
static bool visit_cf_list(isel_context *ctx,
return false;
}
-static void export_vs_varying(isel_context *ctx, int slot, bool is_pos, int *next_pos)
+static void create_null_export(isel_context *ctx)
+{
+ /* Some shader stages always need to have exports.
+ * So when there is none, we need to add a null export.
+ */
+
+ unsigned dest = (ctx->program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
+ bool vm = (ctx->program->stage & hw_fs) || ctx->program->chip_class >= GFX10;
+ Builder bld(ctx->program, ctx->block);
+ bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
+ /* enabled_mask */ 0, dest, /* compr */ false, /* done */ true, vm);
+}
+
+static bool export_vs_varying(isel_context *ctx, int slot, bool is_pos, int *next_pos)
{
assert(ctx->stage == vertex_vs ||
ctx->stage == tess_eval_vs ||
- ctx->stage == gs_copy_vs);
+ ctx->stage == gs_copy_vs ||
+ ctx->stage == ngg_vertex_gs ||
+ ctx->stage == ngg_tess_eval_gs);
- int offset = ctx->stage == tess_eval_vs
+ int offset = (ctx->stage & sw_tes)
? ctx->program->info->tes.outinfo.vs_output_param_offset[slot]
: ctx->program->info->vs.outinfo.vs_output_param_offset[slot];
uint64_t mask = ctx->outputs.mask[slot];
if (!is_pos && !mask)
- return;
+ return false;
if (!is_pos && offset == AC_EXP_PARAM_UNDEFINED)
- return;
+ return false;
aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
exp->enabled_mask = mask;
for (unsigned i = 0; i < 4; ++i) {
if (mask & (1 << i))
- exp->operands[i] = Operand(ctx->outputs.outputs[slot][i]);
+ exp->operands[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
else
exp->operands[i] = Operand(v1);
}
- /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
+ /* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
* Setting valid_mask=1 prevents it and has no other effect.
*/
- exp->valid_mask = ctx->options->chip_class >= GFX10 && is_pos && *next_pos == 0;
+ exp->valid_mask = ctx->options->chip_class == GFX10 && is_pos && *next_pos == 0;
exp->done = false;
exp->compressed = false;
if (is_pos)
else
exp->dest = V_008DFC_SQ_EXP_PARAM + offset;
ctx->block->instructions.emplace_back(std::move(exp));
+
+ return true;
}
static void export_vs_psiz_layer_viewport(isel_context *ctx, int *next_pos)
for (unsigned i = 0; i < 4; ++i)
exp->operands[i] = Operand(v1);
if (ctx->outputs.mask[VARYING_SLOT_PSIZ]) {
- exp->operands[0] = Operand(ctx->outputs.outputs[VARYING_SLOT_PSIZ][0]);
+ exp->operands[0] = Operand(ctx->outputs.temps[VARYING_SLOT_PSIZ * 4u]);
exp->enabled_mask |= 0x1;
}
if (ctx->outputs.mask[VARYING_SLOT_LAYER]) {
- exp->operands[2] = Operand(ctx->outputs.outputs[VARYING_SLOT_LAYER][0]);
+ exp->operands[2] = Operand(ctx->outputs.temps[VARYING_SLOT_LAYER * 4u]);
exp->enabled_mask |= 0x4;
}
if (ctx->outputs.mask[VARYING_SLOT_VIEWPORT]) {
if (ctx->options->chip_class < GFX9) {
- exp->operands[3] = Operand(ctx->outputs.outputs[VARYING_SLOT_VIEWPORT][0]);
+ exp->operands[3] = Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]);
exp->enabled_mask |= 0x8;
} else {
Builder bld(ctx->program, ctx->block);
Temp out = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u),
- Operand(ctx->outputs.outputs[VARYING_SLOT_VIEWPORT][0]));
+ Operand(ctx->outputs.temps[VARYING_SLOT_VIEWPORT * 4u]));
if (exp->operands[2].isTemp())
out = bld.vop2(aco_opcode::v_or_b32, bld.def(v1), Operand(out), exp->operands[2]);
exp->enabled_mask |= 0x4;
}
}
- exp->valid_mask = ctx->options->chip_class >= GFX10 && *next_pos == 0;
+ exp->valid_mask = ctx->options->chip_class == GFX10 && *next_pos == 0;
exp->done = false;
exp->compressed = false;
exp->dest = V_008DFC_SQ_EXP_POS + (*next_pos)++;
ctx->block->instructions.emplace_back(std::move(exp));
}
+static void create_export_phis(isel_context *ctx)
+{
+ /* Used when exports are needed, but the output temps are defined in a preceding block.
+ * This function will set up phis in order to access the outputs in the next block.
+ */
+
+ assert(ctx->block->instructions.back()->opcode == aco_opcode::p_logical_start);
+ aco_ptr<Instruction> logical_start = aco_ptr<Instruction>(ctx->block->instructions.back().release());
+ ctx->block->instructions.pop_back();
+
+ Builder bld(ctx->program, ctx->block);
+
+ for (unsigned slot = 0; slot <= VARYING_SLOT_VAR31; ++slot) {
+ uint64_t mask = ctx->outputs.mask[slot];
+ for (unsigned i = 0; i < 4; ++i) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ Temp old = ctx->outputs.temps[slot * 4 + i];
+ Temp phi = bld.pseudo(aco_opcode::p_phi, bld.def(v1), old, Operand(v1));
+ ctx->outputs.temps[slot * 4 + i] = phi;
+ }
+ }
+
+ bld.insert(std::move(logical_start));
+}
+
static void create_vs_exports(isel_context *ctx)
{
assert(ctx->stage == vertex_vs ||
ctx->stage == tess_eval_vs ||
- ctx->stage == gs_copy_vs);
+ ctx->stage == gs_copy_vs ||
+ ctx->stage == ngg_vertex_gs ||
+ ctx->stage == ngg_tess_eval_gs);
- radv_vs_output_info *outinfo = ctx->stage == tess_eval_vs
+ radv_vs_output_info *outinfo = (ctx->stage & sw_tes)
? &ctx->program->info->tes.outinfo
: &ctx->program->info->vs.outinfo;
- if (outinfo->export_prim_id) {
+ if (outinfo->export_prim_id && !(ctx->stage & hw_ngg_gs)) {
ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
- ctx->outputs.outputs[VARYING_SLOT_PRIMITIVE_ID][0] = get_arg(ctx, ctx->args->vs_prim_id);
+ ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = get_arg(ctx, ctx->args->vs_prim_id);
}
if (ctx->options->key.has_multiview_view_index) {
ctx->outputs.mask[VARYING_SLOT_LAYER] |= 0x1;
- ctx->outputs.outputs[VARYING_SLOT_LAYER][0] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index));
+ ctx->outputs.temps[VARYING_SLOT_LAYER * 4u] = as_vgpr(ctx, get_arg(ctx, ctx->args->ac.view_index));
}
/* the order these position exports are created is important */
int next_pos = 0;
- export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
+ bool exported_pos = export_vs_varying(ctx, VARYING_SLOT_POS, true, &next_pos);
if (outinfo->writes_pointsize || outinfo->writes_layer || outinfo->writes_viewport_index) {
export_vs_psiz_layer_viewport(ctx, &next_pos);
+ exported_pos = true;
}
if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
- export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
+ exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST0, true, &next_pos);
if (ctx->num_clip_distances + ctx->num_cull_distances > 4)
- export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
+ exported_pos |= export_vs_varying(ctx, VARYING_SLOT_CLIP_DIST1, true, &next_pos);
if (ctx->export_clip_dists) {
if (ctx->num_clip_distances + ctx->num_cull_distances > 0)
}
for (unsigned i = 0; i <= VARYING_SLOT_VAR31; ++i) {
- if (i < VARYING_SLOT_VAR0 && i != VARYING_SLOT_LAYER &&
- i != VARYING_SLOT_PRIMITIVE_ID)
+ if (i < VARYING_SLOT_VAR0 &&
+ i != VARYING_SLOT_LAYER &&
+ i != VARYING_SLOT_PRIMITIVE_ID &&
+ i != VARYING_SLOT_VIEWPORT)
continue;
export_vs_varying(ctx, i, false, NULL);
}
+
+ if (!exported_pos)
+ create_null_export(ctx);
}
-static void export_fs_mrt_z(isel_context *ctx)
+static bool export_fs_mrt_z(isel_context *ctx)
{
Builder bld(ctx->program, ctx->block);
unsigned enabled_channels = 0;
if (ctx->program->info->ps.writes_stencil) {
/* Stencil should be in X[23:16]. */
- values[0] = Operand(ctx->outputs.outputs[FRAG_RESULT_STENCIL][0]);
+ values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
values[0] = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(16u), values[0]);
enabled_channels |= 0x3;
}
if (ctx->program->info->ps.writes_sample_mask) {
/* SampleMask should be in Y[15:0]. */
- values[1] = Operand(ctx->outputs.outputs[FRAG_RESULT_SAMPLE_MASK][0]);
+ values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
enabled_channels |= 0xc;
}
} else {
if (ctx->program->info->ps.writes_z) {
- values[0] = Operand(ctx->outputs.outputs[FRAG_RESULT_DEPTH][0]);
+ values[0] = Operand(ctx->outputs.temps[FRAG_RESULT_DEPTH * 4u]);
enabled_channels |= 0x1;
}
if (ctx->program->info->ps.writes_stencil) {
- values[1] = Operand(ctx->outputs.outputs[FRAG_RESULT_STENCIL][0]);
+ values[1] = Operand(ctx->outputs.temps[FRAG_RESULT_STENCIL * 4u]);
enabled_channels |= 0x2;
}
if (ctx->program->info->ps.writes_sample_mask) {
- values[2] = Operand(ctx->outputs.outputs[FRAG_RESULT_SAMPLE_MASK][0]);
+ values[2] = Operand(ctx->outputs.temps[FRAG_RESULT_SAMPLE_MASK * 4u]);
enabled_channels |= 0x4;
}
}
bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
enabled_channels, V_008DFC_SQ_EXP_MRTZ, compr);
+
+ return true;
}
-static void export_fs_mrt_color(isel_context *ctx, int slot)
+static bool export_fs_mrt_color(isel_context *ctx, int slot)
{
Builder bld(ctx->program, ctx->block);
unsigned write_mask = ctx->outputs.mask[slot];
for (unsigned i = 0; i < 4; ++i) {
if (write_mask & (1 << i)) {
- values[i] = Operand(ctx->outputs.outputs[slot][i]);
+ values[i] = Operand(ctx->outputs.temps[slot * 4u + i]);
} else {
values[i] = Operand(v1);
}
bool is_int8 = (ctx->options->key.fs.is_int8 >> slot) & 1;
bool is_int10 = (ctx->options->key.fs.is_int10 >> slot) & 1;
+ bool is_16bit = values[0].regClass() == v2b;
switch (col_format)
{
case V_028714_SPI_SHADER_FP16_ABGR:
enabled_channels = 0x5;
compr_op = aco_opcode::v_cvt_pkrtz_f16_f32;
+ if (is_16bit) {
+ if (ctx->options->chip_class >= GFX9) {
+ /* Pack the FP16 values together instead of converting them to
+ * FP32 and back to FP16.
+ * TODO: use p_create_vector and let the compiler optimizes.
+ */
+ compr_op = aco_opcode::v_pack_b32_f16;
+ } else {
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask >> i) & 1)
+ values[i] = bld.vop1(aco_opcode::v_cvt_f32_f16, bld.def(v1), values[i]);
+ }
+ }
+ }
break;
case V_028714_SPI_SHADER_UNORM16_ABGR:
enabled_channels = 0x5;
- compr_op = aco_opcode::v_cvt_pknorm_u16_f32;
+ if (is_16bit && ctx->options->chip_class >= GFX9) {
+ compr_op = aco_opcode::v_cvt_pknorm_u16_f16;
+ } else {
+ compr_op = aco_opcode::v_cvt_pknorm_u16_f32;
+ }
break;
case V_028714_SPI_SHADER_SNORM16_ABGR:
enabled_channels = 0x5;
- compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
+ if (is_16bit && ctx->options->chip_class >= GFX9) {
+ compr_op = aco_opcode::v_cvt_pknorm_i16_f16;
+ } else {
+ compr_op = aco_opcode::v_cvt_pknorm_i16_f32;
+ }
break;
case V_028714_SPI_SHADER_UINT16_ABGR: {
values[i]);
}
}
+ } else if (is_16bit) {
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask >> i) & 1) {
+ Temp tmp = convert_int(ctx, bld, values[i].getTemp(), 16, 32, false);
+ values[i] = Operand(tmp);
+ }
+ }
}
break;
}
values[i]);
}
}
+ } else if (is_16bit) {
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask >> i) & 1) {
+ Temp tmp = convert_int(ctx, bld, values[i].getTemp(), 16, 32, true);
+ values[i] = Operand(tmp);
+ }
+ }
}
break;
}
if (target == V_008DFC_SQ_EXP_NULL)
- return;
+ return false;
+
+ /* Replace NaN by zero (only 32-bit) to fix game bugs if requested. */
+ if (ctx->options->enable_mrt_output_nan_fixup &&
+ !is_16bit &&
+ (col_format == V_028714_SPI_SHADER_32_R ||
+ col_format == V_028714_SPI_SHADER_32_GR ||
+ col_format == V_028714_SPI_SHADER_32_AR ||
+ col_format == V_028714_SPI_SHADER_32_ABGR ||
+ col_format == V_028714_SPI_SHADER_FP16_ABGR)) {
+ for (int i = 0; i < 4; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+
+ Temp isnan = bld.vopc(aco_opcode::v_cmp_class_f32,
+ bld.hint_vcc(bld.def(bld.lm)), values[i],
+ bld.copy(bld.def(v1), Operand(3u)));
+ values[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), values[i],
+ bld.copy(bld.def(v1), Operand(0u)), isnan);
+ }
+ }
if ((bool) compr_op) {
for (int i = 0; i < 2; i++) {
bld.exp(aco_opcode::exp, values[0], values[1], values[2], values[3],
enabled_channels, target, (bool) compr_op);
+ return true;
}
static void create_fs_exports(isel_context *ctx)
{
+ bool exported = false;
+
/* Export depth, stencil and sample mask. */
if (ctx->outputs.mask[FRAG_RESULT_DEPTH] ||
ctx->outputs.mask[FRAG_RESULT_STENCIL] ||
- ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK]) {
- export_fs_mrt_z(ctx);
- }
+ ctx->outputs.mask[FRAG_RESULT_SAMPLE_MASK])
+ exported |= export_fs_mrt_z(ctx);
/* Export all color render targets. */
- for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i) {
+ for (unsigned i = FRAG_RESULT_DATA0; i < FRAG_RESULT_DATA7 + 1; ++i)
if (ctx->outputs.mask[i])
- export_fs_mrt_color(ctx, i);
- }
+ exported |= export_fs_mrt_color(ctx, i);
+
+ if (!exported)
+ create_null_export(ctx);
+}
+
+static void create_workgroup_barrier(Builder& bld)
+{
+ bld.barrier(aco_opcode::p_barrier,
+ memory_sync_info(storage_shared, semantic_acqrel, scope_workgroup),
+ scope_workgroup);
}
static void write_tcs_tess_factors(isel_context *ctx)
return;
}
- const unsigned tess_index_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
- const unsigned tess_index_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
-
Builder bld(ctx->program, ctx->block);
- bld.barrier(aco_opcode::p_memory_barrier_shared);
- unsigned workgroup_size = ctx->tcs_num_patches * ctx->shader->info.tess.tcs_vertices_out;
- if (unlikely(ctx->program->chip_class != GFX6 && workgroup_size > ctx->program->wave_size))
- bld.sopp(aco_opcode::s_barrier);
+ create_workgroup_barrier(bld);
Temp tcs_rel_ids = get_arg(ctx, ctx->args->ac.tcs_rel_ids);
Temp invocation_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), tcs_rel_ids, Operand(8u), Operand(5u));
std::pair<Temp, unsigned> lds_base = get_tcs_output_lds_offset(ctx);
unsigned stride = inner_comps + outer_comps;
- Temp inner[4];
- Temp outer[4];
+ unsigned lds_align = calculate_lds_alignment(ctx, lds_base.second);
+ Temp tf_inner_vec;
+ Temp tf_outer_vec;
Temp out[6];
- assert(inner_comps <= (sizeof(inner) / sizeof(Temp)));
- assert(outer_comps <= (sizeof(outer) / sizeof(Temp)));
assert(stride <= (sizeof(out) / sizeof(Temp)));
if (ctx->args->options->key.tcs.primitive_mode == GL_ISOLINES) {
// LINES reversal
- outer[0] = out[1] = load_lds(ctx, 4, bld.tmp(v1), lds_base.first, lds_base.second + tess_index_outer * 16 + 0 * 4, 4);
- outer[1] = out[0] = load_lds(ctx, 4, bld.tmp(v1), lds_base.first, lds_base.second + tess_index_outer * 16 + 1 * 4, 4);
+ tf_outer_vec = load_lds(ctx, 4, bld.tmp(v2), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
+ out[1] = emit_extract_vector(ctx, tf_outer_vec, 0, v1);
+ out[0] = emit_extract_vector(ctx, tf_outer_vec, 1, v1);
} else {
- for (unsigned i = 0; i < outer_comps; ++i)
- outer[i] = out[i] = load_lds(ctx, 4, bld.tmp(v1), lds_base.first, lds_base.second + tess_index_outer * 16 + i * 4, 4);
+ tf_outer_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, outer_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_out_loc, lds_align);
+ tf_inner_vec = load_lds(ctx, 4, bld.tmp(RegClass(RegType::vgpr, inner_comps)), lds_base.first, lds_base.second + ctx->tcs_tess_lvl_in_loc, lds_align);
+ for (unsigned i = 0; i < outer_comps; ++i)
+ out[i] = emit_extract_vector(ctx, tf_outer_vec, i, v1);
for (unsigned i = 0; i < inner_comps; ++i)
- inner[i] = out[outer_comps + i] = load_lds(ctx, 4, bld.tmp(v1), lds_base.first, lds_base.second + tess_index_inner * 16 + i * 4, 4);
+ out[outer_comps + i] = emit_extract_vector(ctx, tf_inner_vec, i, v1);
}
Temp rel_patch_id = get_tess_rel_patch_id(ctx);
Temp tf_base = get_arg(ctx, ctx->args->tess_factor_offset);
- Temp byte_offset = bld.v_mul_imm(bld.def(v1), rel_patch_id, stride * 4u);
+ Temp byte_offset = bld.v_mul24_imm(bld.def(v1), rel_patch_id, stride * 4u);
unsigned tf_const_offset = 0;
if (ctx->program->chip_class <= GFX8) {
Temp control_word = bld.copy(bld.def(v1), Operand(0x80000000u));
bld.mubuf(aco_opcode::buffer_store_dword,
/* SRSRC */ hs_ring_tess_factor, /* VADDR */ Operand(v1), /* SOFFSET */ tf_base, /* VDATA */ control_word,
- /* immediate OFFSET */ 0, /* OFFEN */ false, /* idxen*/ false, /* addr64 */ false,
- /* disable_wqm */ false, /* glc */ true);
+ /* immediate OFFSET */ 0, /* OFFEN */ false, /* swizzled */ false, /* idxen*/ false,
+ /* addr64 */ false, /* disable_wqm */ false, /* glc */ true);
tf_const_offset += 4;
begin_divergent_if_else(ctx, &ic_rel_patch_id_is_zero);
}
assert(stride == 2 || stride == 4 || stride == 6);
- Temp tf_vec = create_vec_from_array(ctx, out, stride, RegType::vgpr);
- store_vmem_mubuf(ctx, tf_vec, hs_ring_tess_factor, byte_offset, tf_base, tf_const_offset, 4, (1 << stride) - 1, true, false);
+ Temp tf_vec = create_vec_from_array(ctx, out, stride, RegType::vgpr, 4u);
+ store_vmem_mubuf(ctx, tf_vec, hs_ring_tess_factor, byte_offset, tf_base, tf_const_offset, 4, (1 << stride) - 1, true, memory_sync_info());
/* Store to offchip for TES to read - only if TES reads them */
if (ctx->args->options->key.tcs.tes_reads_tess_factors) {
Temp hs_ring_tess_offchip = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), ctx->program->private_segment_buffer, Operand(RING_HS_TESS_OFFCHIP * 16u));
Temp oc_lds = get_arg(ctx, ctx->args->oc_lds);
- std::pair<Temp, unsigned> vmem_offs_outer = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, tess_index_outer * 16);
- Temp outer_vec = create_vec_from_array(ctx, outer, outer_comps, RegType::vgpr);
- store_vmem_mubuf(ctx, outer_vec, hs_ring_tess_offchip, vmem_offs_outer.first, oc_lds, vmem_offs_outer.second, 4, (1 << outer_comps) - 1, true, false);
+ std::pair<Temp, unsigned> vmem_offs_outer = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_out_loc);
+ store_vmem_mubuf(ctx, tf_outer_vec, hs_ring_tess_offchip, vmem_offs_outer.first, oc_lds, vmem_offs_outer.second, 4, (1 << outer_comps) - 1, true, memory_sync_info(storage_vmem_output));
if (likely(inner_comps)) {
- std::pair<Temp, unsigned> vmem_offs_inner = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, tess_index_inner * 16);
- Temp inner_vec = create_vec_from_array(ctx, inner, inner_comps, RegType::vgpr);
- store_vmem_mubuf(ctx, inner_vec, hs_ring_tess_offchip, vmem_offs_inner.first, oc_lds, vmem_offs_inner.second, 4, (1 << inner_comps) - 1, true, false);
+ std::pair<Temp, unsigned> vmem_offs_inner = get_tcs_per_patch_output_vmem_offset(ctx, nullptr, ctx->tcs_tess_lvl_in_loc);
+ store_vmem_mubuf(ctx, tf_inner_vec, hs_ring_tess_offchip, vmem_offs_inner.first, oc_lds, vmem_offs_inner.second, 4, (1 << inner_comps) - 1, true, memory_sync_info(storage_vmem_output));
}
}
Temp out[4];
bool all_undef = true;
- assert(ctx->stage == vertex_vs || ctx->stage == gs_copy_vs);
+ assert(ctx->stage & hw_vs);
for (unsigned i = 0; i < num_comps; i++) {
- out[i] = ctx->outputs.outputs[loc][start + i];
+ out[i] = ctx->outputs.temps[loc * 4 + start + i];
all_undef = all_undef && !out[i].id();
}
if (all_undef)
store->glc = true;
store->dlc = false;
store->slc = true;
- store->can_reorder = true;
ctx->block->instructions.emplace_back(std::move(store));
}
}
float_controls & (FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 |
FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 | FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
- /* default to preserving fp16 and fp64 denorms, since it's free */
+ /* default to preserving fp16 and fp64 denorms, since it's free for fp64 and
+ * the precision seems needed for Wolfenstein: Youngblood to render correctly */
if (program->next_fp_mode.must_flush_denorms16_64)
program->next_fp_mode.denorm16_64 = 0;
else
}
}
+Temp merged_wave_info_to_mask(isel_context *ctx, unsigned i)
+{
+ Builder bld(ctx->program, ctx->block);
+
+ /* The s_bfm only cares about s0.u[5:0] so we don't need either s_bfe nor s_and here */
+ Temp count = i == 0
+ ? get_arg(ctx, ctx->args->merged_wave_info)
+ : bld.sop2(aco_opcode::s_lshr_b32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->merged_wave_info), Operand(i * 8u));
+
+ Temp mask = bld.sop2(aco_opcode::s_bfm_b64, bld.def(s2), count, Operand(0u));
+ Temp cond;
+
+ if (ctx->program->wave_size == 64) {
+ /* Special case for 64 active invocations, because 64 doesn't work with s_bfm */
+ Temp active_64 = bld.sopc(aco_opcode::s_bitcmp1_b32, bld.def(s1, scc), count, Operand(6u /* log2(64) */));
+ cond = bld.sop2(Builder::s_cselect, bld.def(bld.lm), Operand(-1u), mask, bld.scc(active_64));
+ } else {
+ /* We use s_bfm_b64 (not _b32) which works with 32, but we need to extract the lower half of the register */
+ cond = emit_extract_vector(ctx, mask, 0, bld.lm);
+ }
+
+ return cond;
+}
+
+bool ngg_early_prim_export(isel_context *ctx)
+{
+ /* TODO: Check edge flags, and if they are written, return false. (Needed for OpenGL, not for Vulkan.) */
+ return true;
+}
+
+void ngg_emit_sendmsg_gs_alloc_req(isel_context *ctx)
+{
+ Builder bld(ctx->program, ctx->block);
+
+ /* It is recommended to do the GS_ALLOC_REQ as soon and as quickly as possible, so we set the maximum priority (3). */
+ bld.sopp(aco_opcode::s_setprio, -1u, 0x3u);
+
+ /* Get the id of the current wave within the threadgroup (workgroup) */
+ Builder::Result wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16)));
+
+ /* Execute the following code only on the first wave (wave id 0),
+ * use the SCC def to tell if the wave id is zero or not.
+ */
+ Temp cond = wave_id_in_tg.def(1).getTemp();
+ if_context ic;
+ begin_uniform_if_then(ctx, &ic, cond);
+ begin_uniform_if_else(ctx, &ic);
+ bld.reset(ctx->block);
+
+ /* Number of vertices output by VS/TES */
+ Temp vtx_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->gs_tg_info), Operand(12u | (9u << 16u)));
+ /* Number of primitives output by VS/TES */
+ Temp prm_cnt = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->gs_tg_info), Operand(22u | (9u << 16u)));
+
+ /* Put the number of vertices and primitives into m0 for the GS_ALLOC_REQ */
+ Temp tmp = bld.sop2(aco_opcode::s_lshl_b32, bld.def(s1), bld.def(s1, scc), prm_cnt, Operand(12u));
+ tmp = bld.sop2(aco_opcode::s_or_b32, bld.m0(bld.def(s1)), bld.def(s1, scc), tmp, vtx_cnt);
+
+ /* Request the SPI to allocate space for the primitives and vertices that will be exported by the threadgroup. */
+ bld.sopp(aco_opcode::s_sendmsg, bld.m0(tmp), -1, sendmsg_gs_alloc_req);
+
+ end_uniform_if(ctx, &ic);
+
+ /* After the GS_ALLOC_REQ is done, reset priority to default (0). */
+ bld.reset(ctx->block);
+ bld.sopp(aco_opcode::s_setprio, -1u, 0x0u);
+}
+
+Temp ngg_get_prim_exp_arg(isel_context *ctx, unsigned num_vertices, const Temp vtxindex[])
+{
+ Builder bld(ctx->program, ctx->block);
+
+ if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) {
+ return get_arg(ctx, ctx->args->gs_vtx_offset[0]);
+ }
+
+ Temp gs_invocation_id = get_arg(ctx, ctx->args->ac.gs_invocation_id);
+ Temp tmp;
+
+ for (unsigned i = 0; i < num_vertices; ++i) {
+ assert(vtxindex[i].id());
+
+ if (i)
+ tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), vtxindex[i], Operand(10u * i), tmp);
+ else
+ tmp = vtxindex[i];
+
+ /* The initial edge flag is always false in tess eval shaders. */
+ if (ctx->stage == ngg_vertex_gs) {
+ Temp edgeflag = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), gs_invocation_id, Operand(8 + i), Operand(1u));
+ tmp = bld.vop3(aco_opcode::v_lshl_add_u32, bld.def(v1), edgeflag, Operand(10u * i + 9u), tmp);
+ }
+ }
+
+ /* TODO: Set isnull field in case of merged NGG VS+GS. */
+
+ return tmp;
+}
+
+void ngg_emit_prim_export(isel_context *ctx, unsigned num_vertices_per_primitive, const Temp vtxindex[])
+{
+ Builder bld(ctx->program, ctx->block);
+ Temp prim_exp_arg = ngg_get_prim_exp_arg(ctx, num_vertices_per_primitive, vtxindex);
+
+ bld.exp(aco_opcode::exp, prim_exp_arg, Operand(v1), Operand(v1), Operand(v1),
+ 1 /* enabled mask */, V_008DFC_SQ_EXP_PRIM /* dest */,
+ false /* compressed */, true/* done */, false /* valid mask */);
+}
+
+void ngg_emit_nogs_gsthreads(isel_context *ctx)
+{
+ /* Emit the things that NGG GS threads need to do, for shaders that don't have SW GS.
+ * These must always come before VS exports.
+ *
+ * It is recommended to do these as early as possible. They can be at the beginning when
+ * there is no SW GS and the shader doesn't write edge flags.
+ */
+
+ if_context ic;
+ Temp is_gs_thread = merged_wave_info_to_mask(ctx, 1);
+ begin_divergent_if_then(ctx, &ic, is_gs_thread);
+
+ Builder bld(ctx->program, ctx->block);
+ constexpr unsigned max_vertices_per_primitive = 3;
+ unsigned num_vertices_per_primitive = max_vertices_per_primitive;
+
+ if (ctx->stage == ngg_vertex_gs) {
+ /* TODO: optimize for points & lines */
+ } else if (ctx->stage == ngg_tess_eval_gs) {
+ if (ctx->shader->info.tess.point_mode)
+ num_vertices_per_primitive = 1;
+ else if (ctx->shader->info.tess.primitive_mode == GL_ISOLINES)
+ num_vertices_per_primitive = 2;
+ } else {
+ unreachable("Unsupported NGG shader stage");
+ }
+
+ Temp vtxindex[max_vertices_per_primitive];
+ vtxindex[0] = bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu),
+ get_arg(ctx, ctx->args->gs_vtx_offset[0]));
+ vtxindex[1] = num_vertices_per_primitive < 2 ? Temp(0, v1) :
+ bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
+ get_arg(ctx, ctx->args->gs_vtx_offset[0]), Operand(16u), Operand(16u));
+ vtxindex[2] = num_vertices_per_primitive < 3 ? Temp(0, v1) :
+ bld.vop2(aco_opcode::v_and_b32, bld.def(v1), Operand(0xffffu),
+ get_arg(ctx, ctx->args->gs_vtx_offset[2]));
+
+ /* Export primitive data to the index buffer. */
+ ngg_emit_prim_export(ctx, num_vertices_per_primitive, vtxindex);
+
+ /* Export primitive ID. */
+ if (ctx->stage == ngg_vertex_gs && ctx->args->options->key.vs_common_out.export_prim_id) {
+ /* Copy Primitive IDs from GS threads to the LDS address corresponding to the ES thread of the provoking vertex. */
+ Temp prim_id = get_arg(ctx, ctx->args->ac.gs_prim_id);
+ Temp provoking_vtx_index = vtxindex[0];
+ Temp addr = bld.v_mul_imm(bld.def(v1), provoking_vtx_index, 4u);
+
+ store_lds(ctx, 4, prim_id, 0x1u, addr, 0u, 4u);
+ }
+
+ begin_divergent_if_else(ctx, &ic);
+ end_divergent_if(ctx, &ic);
+}
+
+void ngg_emit_nogs_output(isel_context *ctx)
+{
+ /* Emits NGG GS output, for stages that don't have SW GS. */
+
+ if_context ic;
+ Builder bld(ctx->program, ctx->block);
+ bool late_prim_export = !ngg_early_prim_export(ctx);
+
+ /* NGG streamout is currently disabled by default. */
+ assert(!ctx->args->shader_info->so.num_outputs);
+
+ if (late_prim_export) {
+ /* VS exports are output to registers in a predecessor block. Emit phis to get them into this block. */
+ create_export_phis(ctx);
+ /* Do what we need to do in the GS threads. */
+ ngg_emit_nogs_gsthreads(ctx);
+
+ /* What comes next should be executed on ES threads. */
+ Temp is_es_thread = merged_wave_info_to_mask(ctx, 0);
+ begin_divergent_if_then(ctx, &ic, is_es_thread);
+ bld.reset(ctx->block);
+ }
+
+ /* Export VS outputs */
+ ctx->block->kind |= block_kind_export_end;
+ create_vs_exports(ctx);
+
+ /* Export primitive ID */
+ if (ctx->args->options->key.vs_common_out.export_prim_id) {
+ Temp prim_id;
+
+ if (ctx->stage == ngg_vertex_gs) {
+ /* Wait for GS threads to store primitive ID in LDS. */
+ create_workgroup_barrier(bld);
+
+ /* Calculate LDS address where the GS threads stored the primitive ID. */
+ Temp wave_id_in_tg = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc),
+ get_arg(ctx, ctx->args->merged_wave_info), Operand(24u | (4u << 16)));
+ Temp thread_id_in_wave = emit_mbcnt(ctx, bld.def(v1));
+ Temp wave_id_mul = bld.v_mul24_imm(bld.def(v1), as_vgpr(ctx, wave_id_in_tg), ctx->program->wave_size);
+ Temp thread_id_in_tg = bld.vadd32(bld.def(v1), Operand(wave_id_mul), Operand(thread_id_in_wave));
+ Temp addr = bld.v_mul24_imm(bld.def(v1), thread_id_in_tg, 4u);
+
+ /* Load primitive ID from LDS. */
+ prim_id = load_lds(ctx, 4, bld.tmp(v1), addr, 0u, 4u);
+ } else if (ctx->stage == ngg_tess_eval_gs) {
+ /* TES: Just use the patch ID as the primitive ID. */
+ prim_id = get_arg(ctx, ctx->args->ac.tes_patch_id);
+ } else {
+ unreachable("unsupported NGG shader stage.");
+ }
+
+ ctx->outputs.mask[VARYING_SLOT_PRIMITIVE_ID] |= 0x1;
+ ctx->outputs.temps[VARYING_SLOT_PRIMITIVE_ID * 4u] = prim_id;
+
+ export_vs_varying(ctx, VARYING_SLOT_PRIMITIVE_ID, false, nullptr);
+ }
+
+ if (late_prim_export) {
+ begin_divergent_if_else(ctx, &ic);
+ end_divergent_if(ctx, &ic);
+ bld.reset(ctx->block);
+ }
+}
+
void select_program(Program *program,
unsigned shader_count,
struct nir_shader *const *shaders,
struct radv_shader_args *args)
{
isel_context ctx = setup_isel_context(program, shader_count, shaders, config, args, false);
+ if_context ic_merged_wave_info;
+ bool ngg_no_gs = ctx.stage == ngg_vertex_gs || ctx.stage == ngg_tess_eval_gs;
for (unsigned i = 0; i < shader_count; i++) {
nir_shader *nir = shaders[i];
split_arguments(&ctx, startpgm);
}
+ if (ngg_no_gs) {
+ ngg_emit_sendmsg_gs_alloc_req(&ctx);
+
+ if (ngg_early_prim_export(&ctx))
+ ngg_emit_nogs_gsthreads(&ctx);
+ }
+
/* In a merged VS+TCS HS, the VS implementation can be completely empty. */
nir_function_impl *func = nir_shader_get_entrypoint(nir);
bool empty_shader = nir_cf_list_is_empty_block(&func->body) &&
(nir->info.stage == MESA_SHADER_TESS_EVAL &&
ctx.stage == tess_eval_geometry_gs));
- if_context ic;
- if (shader_count >= 2 && !empty_shader) {
- Builder bld(ctx.program, ctx.block);
- Temp count = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1), bld.def(s1, scc), get_arg(&ctx, args->merged_wave_info), Operand((8u << 16) | (i * 8u)));
- Temp thread_id = emit_mbcnt(&ctx, bld.def(v1));
- Temp cond = bld.vopc(aco_opcode::v_cmp_gt_u32, bld.hint_vcc(bld.def(bld.lm)), count, thread_id);
-
- begin_divergent_if_then(&ctx, &ic, cond);
+ bool check_merged_wave_info = ctx.tcs_in_out_eq ? i == 0 : ((shader_count >= 2 && !empty_shader) || ngg_no_gs);
+ bool endif_merged_wave_info = ctx.tcs_in_out_eq ? i == 1 : check_merged_wave_info;
+ if (check_merged_wave_info) {
+ Temp cond = merged_wave_info_to_mask(&ctx, i);
+ begin_divergent_if_then(&ctx, &ic_merged_wave_info, cond);
}
if (i) {
Builder bld(ctx.program, ctx.block);
- bld.barrier(aco_opcode::p_memory_barrier_shared);
- bld.sopp(aco_opcode::s_barrier);
+ create_workgroup_barrier(bld);
if (ctx.stage == vertex_geometry_gs || ctx.stage == tess_eval_geometry_gs) {
ctx.gs_wave_id = bld.sop2(aco_opcode::s_bfe_u32, bld.def(s1, m0), bld.def(s1, scc), get_arg(&ctx, args->merged_wave_info), Operand((8u << 16) | 16u));
visit_cf_list(&ctx, &func->body);
- if (ctx.program->info->so.num_outputs && (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs))
+ if (ctx.program->info->so.num_outputs && (ctx.stage & hw_vs))
emit_streamout(&ctx, 0);
- if (ctx.stage == vertex_vs || ctx.stage == tess_eval_vs) {
+ if (ctx.stage & hw_vs) {
create_vs_exports(&ctx);
+ ctx.block->kind |= block_kind_export_end;
+ } else if (ngg_no_gs && ngg_early_prim_export(&ctx)) {
+ ngg_emit_nogs_output(&ctx);
} else if (nir->info.stage == MESA_SHADER_GEOMETRY) {
Builder bld(ctx.program, ctx.block);
- bld.barrier(aco_opcode::p_memory_barrier_gs_data);
+ bld.barrier(aco_opcode::p_barrier,
+ memory_sync_info(storage_vmem_output, semantic_release, scope_device));
bld.sopp(aco_opcode::s_sendmsg, bld.m0(ctx.gs_wave_id), -1, sendmsg_gs_done(false, false, 0));
} else if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
write_tcs_tess_factors(&ctx);
}
- if (ctx.stage == fragment_fs)
+ if (ctx.stage == fragment_fs) {
create_fs_exports(&ctx);
+ ctx.block->kind |= block_kind_export_end;
+ }
- if (shader_count >= 2 && !empty_shader) {
- begin_divergent_if_else(&ctx, &ic);
- end_divergent_if(&ctx, &ic);
+ if (endif_merged_wave_info) {
+ begin_divergent_if_else(&ctx, &ic_merged_wave_info);
+ end_divergent_if(&ctx, &ic_merged_wave_info);
}
- ralloc_free(ctx.divergent_vals);
+ if (ngg_no_gs && !ngg_early_prim_export(&ctx))
+ ngg_emit_nogs_output(&ctx);
+
+ if (i == 0 && ctx.stage == vertex_tess_control_hs && ctx.tcs_in_out_eq) {
+ /* Outputs of the previous stage are inputs to the next stage */
+ ctx.inputs = ctx.outputs;
+ ctx.outputs = shader_io_state();
+ }
}
program->config->float_mode = program->blocks[0].fp_mode.val;
append_logical_end(ctx.block);
- ctx.block->kind |= block_kind_uniform | block_kind_export_end;
+ ctx.block->kind |= block_kind_uniform;
Builder bld(ctx.program, ctx.block);
if (ctx.program->wb_smem_l1_on_end)
- bld.smem(aco_opcode::s_dcache_wb, false);
+ bld.smem(aco_opcode::s_dcache_wb, memory_sync_info(storage_buffer, semantic_volatile));
bld.sopp(aco_opcode::s_endpgm);
cleanup_cfg(program);
{
isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, args, true);
- program->next_fp_mode.preserve_signed_zero_inf_nan32 = false;
- program->next_fp_mode.preserve_signed_zero_inf_nan16_64 = false;
- program->next_fp_mode.must_flush_denorms32 = false;
- program->next_fp_mode.must_flush_denorms16_64 = false;
- program->next_fp_mode.care_about_round32 = false;
- program->next_fp_mode.care_about_round16_64 = false;
- program->next_fp_mode.denorm16_64 = fp_denorm_keep;
- program->next_fp_mode.denorm32 = 0;
- program->next_fp_mode.round32 = fp_round_ne;
- program->next_fp_mode.round16_64 = fp_round_ne;
ctx.block->fp_mode = program->next_fp_mode;
add_startpgm(&ctx);
Temp cond = bld.sopc(aco_opcode::s_cmp_eq_u32, bld.def(s1, scc), stream_id, Operand(stream));
append_logical_end(ctx.block);
ctx.block->kind |= block_kind_uniform;
- bld.branch(aco_opcode::p_cbranch_z, cond);
+ bld.branch(aco_opcode::p_cbranch_z, bld.hint_vcc(bld.def(s2)), cond);
BB_endif.kind |= ctx.block->kind & block_kind_top_level;
mubuf->glc = true;
mubuf->slc = true;
mubuf->dlc = args->options->chip_class >= GFX10;
- mubuf->barrier = barrier_none;
- mubuf->can_reorder = true;
ctx.outputs.mask[i] |= 1 << j;
- ctx.outputs.outputs[i][j] = mubuf->definitions[0].getTemp();
+ ctx.outputs.temps[i * 4u + j] = mubuf->definitions[0].getTemp();
bld.insert(std::move(mubuf));
append_logical_end(ctx.block);
/* branch from then block to endif block */
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_edge(ctx.block->index, &BB_endif);
ctx.block->kind |= block_kind_uniform;
append_logical_end(BB_else);
/* branch from else block to endif block */
- bld.branch(aco_opcode::p_branch);
+ bld.branch(aco_opcode::p_branch, bld.hint_vcc(bld.def(s2)));
add_edge(BB_else->index, &BB_endif);
BB_else->kind |= block_kind_uniform;
cleanup_cfg(program);
}
+
+void select_trap_handler_shader(Program *program, struct nir_shader *shader,
+ ac_shader_config* config,
+ struct radv_shader_args *args)
+{
+ assert(args->options->chip_class == GFX8);
+
+ init_program(program, compute_cs, args->shader_info,
+ args->options->chip_class, args->options->family, config);
+
+ isel_context ctx = {};
+ ctx.program = program;
+ ctx.args = args;
+ ctx.options = args->options;
+ ctx.stage = program->stage;
+
+ ctx.block = ctx.program->create_and_insert_block();
+ ctx.block->loop_nest_depth = 0;
+ ctx.block->kind = block_kind_top_level;
+
+ program->workgroup_size = 1; /* XXX */
+
+ add_startpgm(&ctx);
+ append_logical_start(ctx.block);
+
+ Builder bld(ctx.program, ctx.block);
+
+ /* Load the buffer descriptor from TMA. */
+ bld.smem(aco_opcode::s_load_dwordx4, Definition(PhysReg{ttmp4}, s4),
+ Operand(PhysReg{tma}, s2), Operand(0u));
+
+ /* Store TTMP0-TTMP1. */
+ bld.smem(aco_opcode::s_buffer_store_dwordx2, Operand(PhysReg{ttmp4}, s4),
+ Operand(0u), Operand(PhysReg{ttmp0}, s2), memory_sync_info(), true);
+
+ uint32_t hw_regs_idx[] = {
+ 2, /* HW_REG_STATUS */
+ 3, /* HW_REG_TRAP_STS */
+ 4, /* HW_REG_HW_ID */
+ 7, /* HW_REG_IB_STS */
+ };
+
+ /* Store some hardware registers. */
+ for (unsigned i = 0; i < ARRAY_SIZE(hw_regs_idx); i++) {
+ /* "((size - 1) << 11) | register" */
+ bld.sopk(aco_opcode::s_getreg_b32, Definition(PhysReg{ttmp8}, s1),
+ ((20 - 1) << 11) | hw_regs_idx[i]);
+
+ bld.smem(aco_opcode::s_buffer_store_dword, Operand(PhysReg{ttmp4}, s4),
+ Operand(8u + i * 4), Operand(PhysReg{ttmp8}, s1), memory_sync_info(), true);
+ }
+
+ program->config->float_mode = program->blocks[0].fp_mode.val;
+
+ append_logical_end(ctx.block);
+ ctx.block->kind |= block_kind_uniform;
+ bld.sopp(aco_opcode::s_endpgm);
+
+ cleanup_cfg(program);
+}
}