if (ctx->program->wave_size == 32) {
return thread_id_lo;
+ } else if (ctx->program->chip_class <= GFX7) {
+ Temp thread_id_hi = bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ return thread_id_hi;
} else {
- Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, dst, mask_hi, thread_id_lo);
return thread_id_hi;
}
}
}
}
+static Temp emit_masked_swizzle(isel_context *ctx, Builder &bld, Temp src, unsigned mask)
+{
+ if (ctx->options->chip_class >= GFX8) {
+ unsigned and_mask = mask & 0x1f;
+ unsigned or_mask = (mask >> 5) & 0x1f;
+ unsigned xor_mask = (mask >> 10) & 0x1f;
+
+ uint16_t dpp_ctrl = 0xffff;
+
+ // TODO: we could use DPP8 for some swizzles
+ if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
+ unsigned res[4] = {0, 1, 2, 3};
+ for (unsigned i = 0; i < 4; i++)
+ res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
+ dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
+ dpp_ctrl = dpp_row_rr(8);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
+ dpp_ctrl = dpp_row_mirror;
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
+ dpp_ctrl = dpp_row_half_mirror;
+ }
+
+ if (dpp_ctrl != 0xffff)
+ return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
+ }
+
+ return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
+}
+
Temp as_vgpr(isel_context *ctx, Temp val)
{
if (val.type() == RegType::sgpr) {
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
if (select != Temp())
- hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), select);
+ hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), bld.scc(select));
lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
Temp mid = bld.tmp(s1);
lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
}
}
+void emit_vop2_instruction_logic64(isel_context *ctx, nir_alu_instr *instr,
+ aco_opcode op, Temp dst)
+{
+ Builder bld(ctx->program, ctx->block);
+ bld.is_precise = instr->exact;
+
+ Temp src0 = get_alu_src(ctx, instr->src[0]);
+ Temp src1 = get_alu_src(ctx, instr->src[1]);
+
+ if (src1.type() == RegType::sgpr) {
+ assert(src0.type() == RegType::vgpr);
+ std::swap(src0, src1);
+ }
+
+ Temp src00 = bld.tmp(src0.type(), 1);
+ Temp src01 = bld.tmp(src0.type(), 1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(src00), Definition(src01), src0);
+ Temp src10 = bld.tmp(v1);
+ Temp src11 = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(src10), Definition(src11), src1);
+ Temp lo = bld.vop2(op, bld.def(v1), src00, src10);
+ Temp hi = bld.vop2(op, bld.def(v1), src01, src11);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
+}
+
void emit_vop3a_instruction(isel_context *ctx, nir_alu_instr *instr, aco_opcode op, Temp dst,
bool flush_denorms = false)
{
if (ctx->options->chip_class >= GFX7)
return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
- /* GFX6 doesn't support V_FLOOR_F64, lower it. */
+ /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
+ * lowered at NIR level for precision reasons). */
Temp src0 = as_vgpr(ctx, val);
Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
bld.sop2(Builder::s_and, Definition(dst), bld.def(s1, scc), tmp, Operand(exec, bld.lm));
} else if (dst.regClass() == v1) {
emit_vop1_instruction(ctx, instr, aco_opcode::v_not_b32, dst);
+ } else if (dst.regClass() == v2) {
+ Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
+ lo = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), lo);
+ hi = bld.vop1(aco_opcode::v_not_b32, bld.def(v1), hi);
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
} else if (dst.type() == RegType::sgpr) {
aco_opcode opcode = dst.size() == 1 ? aco_opcode::s_not_b32 : aco_opcode::s_not_b64;
bld.sop1(opcode, Definition(dst), bld.def(s1, scc), src);
emit_boolean_logic(ctx, instr, Builder::s_or, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_or_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_or_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_or_b32, dst, true);
} else if (dst.regClass() == s2) {
emit_boolean_logic(ctx, instr, Builder::s_and, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_and_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_and_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_and_b32, dst, true);
} else if (dst.regClass() == s2) {
emit_boolean_logic(ctx, instr, Builder::s_xor, dst);
} else if (dst.regClass() == v1) {
emit_vop2_instruction(ctx, instr, aco_opcode::v_xor_b32, dst, true);
+ } else if (dst.regClass() == v2) {
+ emit_vop2_instruction_logic64(ctx, instr, aco_opcode::v_xor_b32, dst);
} else if (dst.regClass() == s1) {
emit_sop2_instruction(ctx, instr, aco_opcode::s_xor_b32, dst, true);
} else if (dst.regClass() == s2) {
} else if (dst.regClass() == v1) {
emit_rsq(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
} else if (dst.regClass() == v1) {
emit_rcp(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
} else if (dst.regClass() == v1) {
emit_sqrt(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
int byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
if (byte_align) {
- if ((bytes_needed > 2 || !supports_8bit_16bit_loads) && byte_align_loads) {
+ if ((bytes_needed > 2 ||
+ (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) ||
+ !supports_8bit_16bit_loads) && byte_align_loads) {
if (info->component_stride) {
assert(supports_8bit_16bit_loads && "unimplemented");
bytes_needed = 2;
/* dword or larger stores have to be dword-aligned */
unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
- unsigned align_offset = instr ? nir_intrinsic_align_mul(instr) : 0;
- bool dword_aligned = (align_offset + offset) % 4 == 0 && align_mul % 4 == 0;
- if (bytes >= 4 && !dword_aligned)
- bytes = MIN2(bytes, 2);
+ unsigned align_offset = (instr ? nir_intrinsic_align_offset(instr) : 0) + offset;
+ bool dword_aligned = align_offset % 4 == 0 && align_mul % 4 == 0;
+ if (!dword_aligned)
+ bytes = MIN2(bytes, (align_offset % 2 == 0 && align_mul % 2 == 0) ? 2 : 1);
advance_write_mask(&todo, offset, bytes);
write_count_with_skips++;
void load_buffer(isel_context *ctx, unsigned num_components, unsigned component_size,
Temp dst, Temp rsrc, Temp offset, unsigned align_mul, unsigned align_offset,
- bool glc=false, bool readonly=true)
+ bool glc=false, bool readonly=true, bool allow_smem=true)
{
Builder bld(ctx->program, ctx->block);
- bool use_smem = dst.type() != RegType::vgpr && ((ctx->options->chip_class >= GFX8 && component_size >= 4) || readonly);
+ bool use_smem = dst.type() != RegType::vgpr && (!glc || ctx->options->chip_class >= GFX8) && allow_smem;
if (use_smem)
offset = bld.as_uniform(offset);
Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[0].ssa));
rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
- bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT);
+ unsigned access = nir_intrinsic_access(instr);
+ bool glc = access & (ACCESS_VOLATILE | ACCESS_COHERENT);
unsigned size = instr->dest.ssa.bit_size / 8;
+
+ uint32_t flags = get_all_buffer_resource_flags(ctx, instr->src[0].ssa, access);
+ /* GLC bypasses VMEM/SMEM caches, so GLC SMEM loads/stores are coherent with GLC VMEM loads/stores
+ * TODO: this optimization is disabled for now because we still need to ensure correct ordering
+ */
+ bool allow_smem = !(flags & (0 && glc ? has_nonglc_vmem_store : has_vmem_store));
+ allow_smem |= ((access & ACCESS_RESTRICT) && (access & ACCESS_NON_WRITEABLE)) || (access & ACCESS_CAN_REORDER);
+
load_buffer(ctx, num_components, size, dst, rsrc, get_ssa_temp(ctx, instr->src[1].ssa),
- nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, false);
+ nir_intrinsic_align_mul(instr), nir_intrinsic_align_offset(instr), glc, false, allow_smem);
}
void visit_store_ssbo(isel_context *ctx, nir_intrinsic_instr *instr)
Temp rsrc = convert_pointer_to_64_bit(ctx, get_ssa_temp(ctx, instr->src[1].ssa));
rsrc = bld.smem(aco_opcode::s_load_dwordx4, bld.def(s4), rsrc, Operand(0u));
+ bool glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ uint32_t flags = get_all_buffer_resource_flags(ctx, instr->src[1].ssa, nir_intrinsic_access(instr));
+ /* GLC bypasses VMEM/SMEM caches, so GLC SMEM loads/stores are coherent with GLC VMEM loads/stores
+ * TODO: this optimization is disabled for now because we still need to ensure correct ordering
+ */
+ bool allow_smem = !(flags & (0 && glc ? has_nonglc_vmem_loadstore : has_vmem_loadstore));
+
bool smem = !nir_src_is_divergent(instr->src[2]) &&
ctx->options->chip_class >= GFX8 &&
- elem_size_bytes >= 4;
+ (elem_size_bytes >= 4 || can_subdword_ssbo_store_use_smem(instr)) &&
+ allow_smem;
if (smem)
offset = bld.as_uniform(offset);
bool smem_nonfs = smem && ctx->stage != fragment_fs;
if (op != aco_opcode::p_fs_buffer_store_smem)
store->operands[1].setFixed(m0);
store->operands[2] = Operand(write_datas[i]);
- store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ store->glc = glc;
store->dlc = false;
store->disable_wqm = true;
store->barrier = barrier_buffer;
store->operands[3] = Operand(write_datas[i]);
store->offset = offsets[i];
store->offen = (offset.type() == RegType::vgpr);
- store->glc = nir_intrinsic_access(instr) & (ACCESS_VOLATILE | ACCESS_COHERENT | ACCESS_NON_READABLE);
+ store->glc = glc;
store->dlc = false;
store->disable_wqm = true;
store->barrier = barrier_buffer;
uint32_t mask = nir_intrinsic_swizzle_mask(instr);
if (dst.regClass() == v1) {
emit_wqm(ctx,
- bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false),
+ emit_masked_swizzle(ctx, bld, src, mask),
dst);
} else if (dst.regClass() == v2) {
Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
- lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, mask, 0, false));
- hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, mask, 0, false));
+ lo = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, lo, mask));
+ hi = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, hi, mask));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
}
-Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa)
+Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa, RegClass rc, bool logical)
{
Temp tmp = get_ssa_temp(ctx, ssa);
- if (ssa->parent_instr->type == nir_instr_type_ssa_undef)
- return Operand(tmp.regClass());
- else
+ if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
+ return Operand(rc);
+ } else if (logical && ssa->bit_size == 1 && ssa->parent_instr->type == nir_instr_type_load_const) {
+ if (ctx->program->wave_size == 64)
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT64_MAX : 0u);
+ else
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT32_MAX : 0u);
+ } else {
return Operand(tmp);
+ }
}
void visit_phi(isel_context *ctx, nir_phi_instr *instr)
if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
continue;
cur_pred_idx++;
- Operand op = get_phi_operand(ctx, src.second);
+ Operand op = get_phi_operand(ctx, src.second, dst.regClass(), logical);
operands[num_operands++] = op;
num_defined += !op.isUndefined();
}