if (ctx->program->wave_size == 32) {
return thread_id_lo;
+ } else if (ctx->program->chip_class <= GFX7) {
+ Temp thread_id_hi = bld.vop2(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ return thread_id_hi;
} else {
- Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32, dst, mask_hi, thread_id_lo);
+ Temp thread_id_hi = bld.vop3(aco_opcode::v_mbcnt_hi_u32_b32_e64, dst, mask_hi, thread_id_lo);
return thread_id_hi;
}
}
}
}
+static Temp emit_masked_swizzle(isel_context *ctx, Builder &bld, Temp src, unsigned mask)
+{
+ if (ctx->options->chip_class >= GFX8) {
+ unsigned and_mask = mask & 0x1f;
+ unsigned or_mask = (mask >> 5) & 0x1f;
+ unsigned xor_mask = (mask >> 10) & 0x1f;
+
+ uint16_t dpp_ctrl = 0xffff;
+
+ // TODO: we could use DPP8 for some swizzles
+ if (and_mask == 0x1f && or_mask < 4 && xor_mask < 4) {
+ unsigned res[4] = {0, 1, 2, 3};
+ for (unsigned i = 0; i < 4; i++)
+ res[i] = ((res[i] | or_mask) ^ xor_mask) & 0x3;
+ dpp_ctrl = dpp_quad_perm(res[0], res[1], res[2], res[3]);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 8) {
+ dpp_ctrl = dpp_row_rr(8);
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0xf) {
+ dpp_ctrl = dpp_row_mirror;
+ } else if (and_mask == 0x1f && !or_mask && xor_mask == 0x7) {
+ dpp_ctrl = dpp_row_half_mirror;
+ }
+
+ if (dpp_ctrl != 0xffff)
+ return bld.vop1_dpp(aco_opcode::v_mov_b32, bld.def(v1), src, dpp_ctrl);
+ }
+
+ return bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false);
+}
+
Temp as_vgpr(isel_context *ctx, Temp val)
{
if (val.type() == RegType::sgpr) {
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), vec);
hi = bld.pseudo(aco_opcode::p_extract_vector, bld.def(s1), hi, Operand(0u));
if (select != Temp())
- hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), select);
+ hi = bld.sop2(aco_opcode::s_cselect_b32, bld.def(s1), hi, Operand(0u), bld.scc(select));
lo = bld.sop2(aco_opcode::s_lshr_b64, bld.def(s2), bld.def(s1, scc), lo, shift);
Temp mid = bld.tmp(s1);
lo = bld.pseudo(aco_opcode::p_split_vector, bld.def(s1), Definition(mid), lo);
offset = Operand(0u);
}
- unsigned num_components = dst.bytes() / component_size;
+ unsigned num_components = vec.bytes() / component_size;
if (vec.regClass() == dst.regClass()) {
assert(offset.constantValue() == 0);
bld.copy(Definition(dst), vec);
return;
}
- emit_split_vector(ctx, vec, vec.bytes() / component_size);
+ emit_split_vector(ctx, vec, num_components);
std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
assert(offset.constantValue() % component_size == 0);
unsigned skip = offset.constantValue() / component_size;
- for (unsigned i = 0; i < num_components; i++)
- elems[i] = emit_extract_vector(ctx, vec, i + skip, rc);
+ for (unsigned i = skip; i < num_components; i++)
+ elems[i - skip] = emit_extract_vector(ctx, vec, i, rc);
/* if dst is vgpr - split the src and create a shrunk version according to the mask. */
if (dst.type() == RegType::vgpr) {
+ num_components = dst.bytes() / component_size;
aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
for (unsigned i = 0; i < num_components; i++)
create_vec->operands[i] = Operand(elems[i]);
sop2->operands[0] = Operand(get_alu_src(ctx, instr->src[0]));
sop2->operands[1] = Operand(get_alu_src(ctx, instr->src[1]));
sop2->definitions[0] = Definition(dst);
+ if (instr->no_unsigned_wrap)
+ sop2->definitions[0].setNUW(true);
if (writes_scc)
sop2->definitions[1] = Definition(ctx->program->allocateId(), scc, s1);
ctx->block->instructions.emplace_back(std::move(sop2));
if (ctx->options->chip_class >= GFX7)
return bld.vop1(aco_opcode::v_floor_f64, Definition(dst), val);
- /* GFX6 doesn't support V_FLOOR_F64, lower it. */
+ /* GFX6 doesn't support V_FLOOR_F64, lower it (note that it's actually
+ * lowered at NIR level for precision reasons). */
Temp src0 = as_vgpr(ctx, val);
Temp mask = bld.copy(bld.def(s1), Operand(3u)); /* isnan */
} else if (dst.regClass() == v1) {
emit_rsq(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rsq_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
case nir_op_fneg: {
Temp src = get_alu_src(ctx, instr->src[0]);
if (dst.regClass() == v2b) {
+ if (ctx->block->fp_mode.must_flush_denorms16_64)
+ src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
bld.vop2(aco_opcode::v_xor_b32, Definition(dst), Operand(0x8000u), as_vgpr(ctx, src));
} else if (dst.regClass() == v1) {
if (ctx->block->fp_mode.must_flush_denorms32)
case nir_op_fabs: {
Temp src = get_alu_src(ctx, instr->src[0]);
if (dst.regClass() == v2b) {
+ if (ctx->block->fp_mode.must_flush_denorms16_64)
+ src = bld.vop2(aco_opcode::v_mul_f16, bld.def(v2b), Operand((uint16_t)0x3C00), as_vgpr(ctx, src));
bld.vop2(aco_opcode::v_and_b32, Definition(dst), Operand(0x7FFFu), as_vgpr(ctx, src));
} else if (dst.regClass() == v1) {
if (ctx->block->fp_mode.must_flush_denorms32)
} else if (dst.regClass() == v1) {
emit_rcp(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_rcp_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
} else if (dst.regClass() == v1) {
emit_sqrt(ctx, bld, Definition(dst), src);
} else if (dst.regClass() == v2) {
+ /* Lowered at NIR level for precision reasons. */
emit_vop1_instruction(ctx, instr, aco_opcode::v_sqrt_f64, dst);
} else {
fprintf(stderr, "Unimplemented NIR instr bit size: ");
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->src[0].src.ssa->bit_size == 64)
src = bld.vop1(aco_opcode::v_cvt_f32_f64, bld.def(v1), src);
- bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
+ if (instr->op == nir_op_f2f16_rtne && ctx->block->fp_mode.round16_64 != fp_round_ne)
+ /* We emit s_round_mode/s_setreg_imm32 in lower_to_hw_instr to
+ * keep value numbering and the scheduler simpler.
+ */
+ bld.vop1(aco_opcode::p_cvt_f16_f32_rtne, Definition(dst), src);
+ else
+ bld.vop1(aco_opcode::v_cvt_f16_f32, Definition(dst), src);
break;
}
case nir_op_f2f16_rtz: {
break;
}
case nir_op_b2b32:
- case nir_op_b2i32: {
+ case nir_op_b2i8:
+ case nir_op_b2i16:
+ case nir_op_b2i32:
+ case nir_op_b2i64: {
Temp src = get_alu_src(ctx, instr->src[0]);
assert(src.regClass() == bld.lm);
- if (dst.regClass() == s1) {
+ Temp tmp = dst.bytes() == 8 ? bld.tmp(RegClass::get(dst.type(), 4)) : dst;
+ if (tmp.regClass() == s1) {
// TODO: in a post-RA optimization, we can check if src is in VCC, and directly use VCCNZ
- bool_to_scalar_condition(ctx, src, dst);
- } else if (dst.regClass() == v1) {
- bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(dst), Operand(0u), Operand(1u), src);
+ bool_to_scalar_condition(ctx, src, tmp);
+ } else if (tmp.type() == RegType::vgpr) {
+ bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(tmp), Operand(0u), Operand(1u), src);
} else {
unreachable("Invalid register class for b2i32");
}
+
+ if (tmp != dst)
+ bld.pseudo(aco_opcode::p_create_vector, Definition(dst), tmp, Operand(0u));
break;
}
case nir_op_b2b1:
int byte_align = align_mul % 4 == 0 ? align_offset % 4 : -1;
if (byte_align) {
- if ((bytes_needed > 2 || !supports_8bit_16bit_loads) && byte_align_loads) {
+ if ((bytes_needed > 2 ||
+ (bytes_needed == 2 && (align_mul % 2 || align_offset % 2)) ||
+ !supports_8bit_16bit_loads) && byte_align_loads) {
if (info->component_stride) {
assert(supports_8bit_16bit_loads && "unimplemented");
bytes_needed = 2;
/* align offset down if needed */
Operand aligned_offset = offset;
+ unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
if (need_to_align_offset) {
+ align = 4;
Temp offset_tmp = offset.isTemp() ? offset.getTemp() : Temp();
if (offset.isConstant()) {
aligned_offset = Operand(offset.constantValue() & 0xfffffffcu);
Temp aligned_offset_tmp = aligned_offset.isTemp() ? aligned_offset.getTemp() :
bld.copy(bld.def(s1), aligned_offset);
- unsigned align = align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
Temp val = callback(bld, info, aligned_offset_tmp, bytes_needed, align,
reduced_const_offset, byte_align ? Temp() : info->dst);
if (num_tmps > 1) {
aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(
aco_opcode::p_create_vector, Format::PSEUDO, num_tmps, 1)};
- for (unsigned i = 0; i < num_vals; i++)
+ for (unsigned i = 0; i < num_tmps; i++)
vec->operands[i] = Operand(tmp[i]);
tmp[0] = bld.tmp(RegClass::get(reg_type, tmp_size));
vec->definitions[0] = Definition(tmp[0]);
unsigned bytes_size = 0;
aco_opcode op;
- if (bytes_needed == 1) {
+ if (bytes_needed == 1 || align_ % 2) {
bytes_size = 1;
op = aco_opcode::buffer_load_ubyte;
- } else if (bytes_needed == 2) {
+ } else if (bytes_needed == 2 || align_ % 4) {
bytes_size = 2;
op = aco_opcode::buffer_load_ushort;
} else if (bytes_needed <= 4) {
mubuf->barrier = info->barrier;
mubuf->can_reorder = info->can_reorder;
mubuf->offset = const_offset;
- RegClass rc = RegClass::get(RegType::vgpr, align(bytes_size, 4));
+ mubuf->swizzled = info->swizzle_component_size != 0;
+ RegClass rc = RegClass::get(RegType::vgpr, bytes_size);
Temp val = dst_hint.id() && rc == dst_hint.regClass() ? dst_hint : bld.tmp(rc);
mubuf->definitions[0] = Definition(val);
bld.insert(std::move(mubuf));
}
static auto emit_mubuf_load = emit_load<mubuf_load_callback, true, true, 4096>;
+static auto emit_scratch_load = emit_load<mubuf_load_callback, false, true, 4096>;
Temp get_gfx6_global_rsrc(Builder& bld, Temp addr)
{
/* use allocated_vec if possible */
auto it = ctx->allocated_vec.find(src.id());
if (it != ctx->allocated_vec.end()) {
- unsigned total_size = 0;
- for (unsigned i = 0; it->second[i].bytes() && (i < NIR_MAX_VEC_COMPONENTS); i++)
- total_size += it->second[i].bytes();
- if (total_size != src.bytes())
+ if (!it->second[0].id())
goto split;
-
unsigned elem_size = it->second[0].bytes();
+ assert(src.bytes() % elem_size == 0);
+
+ for (unsigned i = 0; i < src.bytes() / elem_size; i++) {
+ if (!it->second[i].id())
+ goto split;
+ }
for (unsigned i = 0; i < count; i++) {
if (offsets[i] % elem_size || dst[i].bytes() % elem_size)
}
}
+ split:
+
if (dst_type == RegType::sgpr)
src = bld.as_uniform(src);
- split:
/* just split it */
aco_ptr<Instruction> split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, count)};
split->operands[0] = Operand(src);
/* dword or larger stores have to be dword-aligned */
unsigned align_mul = instr ? nir_intrinsic_align_mul(instr) : 4;
- unsigned align_offset = instr ? nir_intrinsic_align_mul(instr) : 0;
- bool dword_aligned = (align_offset + offset) % 4 == 0 && align_mul % 4 == 0;
- if (bytes >= 4 && !dword_aligned)
- bytes = MIN2(bytes, 2);
+ unsigned align_offset = (instr ? nir_intrinsic_align_offset(instr) : 0) + offset;
+ bool dword_aligned = align_offset % 4 == 0 && align_mul % 4 == 0;
+ if (!dword_aligned)
+ bytes = MIN2(bytes, (align_offset % 2 == 0 && align_mul % 2 == 0) ? 2 : 1);
advance_write_mask(&todo, offset, bytes);
write_count_with_skips++;
}
void emit_single_mubuf_store(isel_context *ctx, Temp descriptor, Temp voffset, Temp soffset, Temp vdata,
- unsigned const_offset = 0u, bool allow_reorder = true, bool slc = false)
+ unsigned const_offset = 0u, bool allow_reorder = true, bool slc = false,
+ bool swizzled = false)
{
assert(vdata.id());
assert(vdata.size() != 3 || ctx->program->chip_class != GFX6);
Operand voffset_op = voffset.id() ? Operand(as_vgpr(ctx, voffset)) : Operand(v1);
Operand soffset_op = soffset.id() ? Operand(soffset) : Operand(0u);
Builder::Result r = bld.mubuf(op, Operand(descriptor), voffset_op, soffset_op, Operand(vdata), const_offset,
- /* offen */ !voffset_op.isUndefined(), /* idxen*/ false, /* addr64 */ false,
- /* disable_wqm */ false, /* glc */ true, /* dlc*/ false, /* slc */ slc);
+ /* offen */ !voffset_op.isUndefined(), /* swizzled */ swizzled,
+ /* idxen*/ false, /* addr64 */ false, /* disable_wqm */ false, /* glc */ true,
+ /* dlc*/ false, /* slc */ slc);
static_cast<MUBUF_instruction *>(r.instr)->can_reorder = allow_reorder;
}
for (unsigned i = 0; i < write_count; i++) {
unsigned const_offset = offsets[i] + base_const_offset;
- emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, reorder, slc);
+ emit_single_mubuf_store(ctx, descriptor, voffset, soffset, write_datas[i], const_offset, reorder, slc, !allow_combining);
}
}
if (use_mubuf) {
Instruction *mubuf = bld.mubuf(opcode,
Definition(fetch_dst), list, fetch_index, soffset,
- fetch_offset, false, true).instr;
+ fetch_offset, false, false, true).instr;
static_cast<MUBUF_instruction*>(mubuf)->can_reorder = true;
} else {
Instruction *mtbuf = bld.mtbuf(opcode,
{
Builder bld(ctx->program, ctx->block);
- bool use_smem = dst.type() != RegType::vgpr && (ctx->options->chip_class >= GFX8 || readonly) && allow_smem;
+ bool use_smem = dst.type() != RegType::vgpr && (!glc || ctx->options->chip_class >= GFX8) && allow_smem;
if (use_smem)
offset = bld.as_uniform(offset);
Temp index = bld.as_uniform(get_ssa_temp(ctx, instr->src[0].ssa));
if (offset != 0) // TODO check if index != 0 as well
- index = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
+ index = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc), Operand(offset), index);
Temp ptr = convert_pointer_to_64_bit(ctx, get_arg(ctx, ctx->args->ac.push_constants));
Temp vec = dst;
bool trim = false;
unreachable("unimplemented or forbidden load_push_constant.");
}
- bld.smem(op, Definition(vec), ptr, index);
+ static_cast<SMEM_instruction*>(bld.smem(op, Definition(vec), ptr, index).instr)->prevent_overflow = true;
if (!aligned) {
Operand byte_offset = index_cv ? Operand((offset + index_cv->u32) % 4) : Operand(index);
Temp offset = get_ssa_temp(ctx, instr->src[0].ssa);
if (base && offset.type() == RegType::sgpr)
- offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
+ offset = bld.nuw().sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), offset, Operand(base));
else if (base && offset.type() == RegType::vgpr)
offset = bld.vadd32(bld.def(v1), Operand(base), offset);
aco_ptr<SMEM_instruction> store{create_instruction<SMEM_instruction>(op, Format::SMEM, 3, 0)};
store->operands[0] = Operand(rsrc);
if (offsets[i]) {
- Temp off = bld.sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
- offset, Operand(offsets[i]));
+ Temp off = bld.nuw().sop2(aco_opcode::s_add_i32, bld.def(s1), bld.def(s1, scc),
+ offset, Operand(offsets[i]));
store->operands[1] = Operand(off);
} else {
store->operands[1] = Operand(offset);
op64_rtn = aco_opcode::ds_cmpst_rtn_b64;
num_operands = 4;
break;
+ case nir_intrinsic_shared_atomic_fadd:
+ op32 = aco_opcode::ds_add_f32;
+ op32_rtn = aco_opcode::ds_add_rtn_f32;
+ op64 = aco_opcode::num_opcodes;
+ op64_rtn = aco_opcode::num_opcodes;
+ break;
default:
unreachable("Unhandled shared atomic intrinsic");
}
scratch_addr = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), scratch_addr, Operand(0u));
uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
- S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);;
+ S_008F0C_INDEX_STRIDE(ctx->program->wave_size == 64 ? 3 : 2);
if (ctx->program->chip_class >= GFX10) {
rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
}
- /* older generations need element size = 16 bytes. element size removed in GFX9 */
+ /* older generations need element size = 4 bytes. element size removed in GFX9 */
if (ctx->program->chip_class <= GFX8)
- rsrc_conf |= S_008F0C_ELEMENT_SIZE(3);
+ rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), scratch_addr, Operand(-1u), Operand(rsrc_conf));
}
instr->dest.ssa.bit_size / 8u, rsrc};
info.align_mul = nir_intrinsic_align_mul(instr);
info.align_offset = nir_intrinsic_align_offset(instr);
- info.swizzle_component_size = 16;
+ info.swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 0;
info.can_reorder = false;
info.soffset = ctx->program->scratch_offset;
- emit_mubuf_load(ctx, bld, &info);
+ emit_scratch_load(ctx, bld, &info);
}
void visit_store_scratch(isel_context *ctx, nir_intrinsic_instr *instr) {
unsigned write_count = 0;
Temp write_datas[32];
unsigned offsets[32];
+ unsigned swizzle_component_size = ctx->program->chip_class <= GFX8 ? 4 : 16;
split_buffer_store(ctx, instr, false, RegType::vgpr, data, writemask,
- 16, &write_count, write_datas, offsets);
+ swizzle_component_size, &write_count, write_datas, offsets);
for (unsigned i = 0; i < write_count; i++) {
aco_opcode op = get_buffer_store_op(false, write_datas[i].bytes());
- bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_datas[i], offsets[i], true);
+ bld.mubuf(op, rsrc, offset, ctx->program->scratch_offset, write_datas[i], offsets[i], true, true);
}
}
Temp addr = get_ssa_temp(ctx, instr->src[0].ssa);
nir_const_value* const_addr = nir_src_as_const_value(instr->src[0]);
Temp private_segment_buffer = ctx->program->private_segment_buffer;
+ //TODO: bounds checking?
if (addr.type() == RegType::sgpr) {
Operand offset;
if (const_addr) {
case nir_intrinsic_shared_atomic_xor:
case nir_intrinsic_shared_atomic_exchange:
case nir_intrinsic_shared_atomic_comp_swap:
+ case nir_intrinsic_shared_atomic_fadd:
visit_shared_atomic(ctx, instr);
break;
case nir_intrinsic_image_deref_load:
}
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
uint32_t mask = nir_intrinsic_swizzle_mask(instr);
- if (dst.regClass() == v1) {
- emit_wqm(ctx,
- bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), src, mask, 0, false),
- dst);
+ if (instr->dest.ssa.bit_size == 1) {
+ assert(src.regClass() == bld.lm);
+ src = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), Operand(0u), Operand((uint32_t)-1), src);
+ src = emit_masked_swizzle(ctx, bld, src, mask);
+ Temp tmp = bld.vopc(aco_opcode::v_cmp_lg_u32, bld.def(bld.lm), Operand(0u), src);
+ emit_wqm(ctx, tmp, dst);
+ } else if (dst.regClass() == v1b) {
+ Temp tmp = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask));
+ emit_extract_vector(ctx, tmp, 0, dst);
+ } else if (dst.regClass() == v2b) {
+ Temp tmp = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask));
+ emit_extract_vector(ctx, tmp, 0, dst);
+ } else if (dst.regClass() == v1) {
+ emit_wqm(ctx, emit_masked_swizzle(ctx, bld, src, mask), dst);
} else if (dst.regClass() == v2) {
Temp lo = bld.tmp(v1), hi = bld.tmp(v1);
bld.pseudo(aco_opcode::p_split_vector, Definition(lo), Definition(hi), src);
- lo = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), lo, mask, 0, false));
- hi = emit_wqm(ctx, bld.ds(aco_opcode::ds_swizzle_b32, bld.def(v1), hi, mask, 0, false));
+ lo = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, lo, mask));
+ hi = emit_wqm(ctx, emit_masked_swizzle(ctx, bld, hi, mask));
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), lo, hi);
emit_split_vector(ctx, dst, 2);
} else {
}
-Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa, RegClass rc)
+Operand get_phi_operand(isel_context *ctx, nir_ssa_def *ssa, RegClass rc, bool logical)
{
Temp tmp = get_ssa_temp(ctx, ssa);
- if (ssa->parent_instr->type == nir_instr_type_ssa_undef)
+ if (ssa->parent_instr->type == nir_instr_type_ssa_undef) {
return Operand(rc);
- else
+ } else if (logical && ssa->bit_size == 1 && ssa->parent_instr->type == nir_instr_type_load_const) {
+ if (ctx->program->wave_size == 64)
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT64_MAX : 0u);
+ else
+ return Operand(nir_instr_as_load_const(ssa->parent_instr)->value[0].b ? UINT32_MAX : 0u);
+ } else {
return Operand(tmp);
+ }
}
void visit_phi(isel_context *ctx, nir_phi_instr *instr)
if (!(ctx->block->kind & block_kind_loop_header) && cur_pred_idx >= preds.size())
continue;
cur_pred_idx++;
- Operand op = get_phi_operand(ctx, src.second, dst.regClass());
+ Operand op = get_phi_operand(ctx, src.second, dst.regClass(), logical);
operands[num_operands++] = op;
num_defined += !op.isUndefined();
}
Temp control_word = bld.copy(bld.def(v1), Operand(0x80000000u));
bld.mubuf(aco_opcode::buffer_store_dword,
/* SRSRC */ hs_ring_tess_factor, /* VADDR */ Operand(v1), /* SOFFSET */ tf_base, /* VDATA */ control_word,
- /* immediate OFFSET */ 0, /* OFFEN */ false, /* idxen*/ false, /* addr64 */ false,
- /* disable_wqm */ false, /* glc */ true);
+ /* immediate OFFSET */ 0, /* OFFEN */ false, /* swizzled */ false, /* idxen*/ false,
+ /* addr64 */ false, /* disable_wqm */ false, /* glc */ true);
tf_const_offset += 4;
begin_divergent_if_else(ctx, &ic_rel_patch_id_is_zero);
{
isel_context ctx = setup_isel_context(program, 1, &gs_shader, config, args, true);
- program->next_fp_mode.preserve_signed_zero_inf_nan32 = false;
- program->next_fp_mode.preserve_signed_zero_inf_nan16_64 = false;
- program->next_fp_mode.must_flush_denorms32 = false;
- program->next_fp_mode.must_flush_denorms16_64 = false;
- program->next_fp_mode.care_about_round32 = false;
- program->next_fp_mode.care_about_round16_64 = false;
- program->next_fp_mode.denorm16_64 = fp_denorm_keep;
- program->next_fp_mode.denorm32 = 0;
- program->next_fp_mode.round32 = fp_round_ne;
- program->next_fp_mode.round16_64 = fp_round_ne;
ctx.block->fp_mode = program->next_fp_mode;
add_startpgm(&ctx);