}
}
-/* this function trims subdword vectors:
- * if dst is vgpr - split the src and create a shrunk version according to the mask.
- * if dst is sgpr - split the src, but move the original to sgpr. */
-void trim_subdword_vector(isel_context *ctx, Temp vec_src, Temp dst, unsigned num_components, unsigned mask)
+void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst, unsigned component_size)
{
- assert(vec_src.type() == RegType::vgpr);
- emit_split_vector(ctx, vec_src, num_components);
-
Builder bld(ctx->program, ctx->block);
- std::array<Temp,NIR_MAX_VEC_COMPONENTS> elems;
- unsigned component_size = vec_src.bytes() / num_components;
- RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
+ if (offset.isTemp()) {
+ Temp tmp[4] = {vec, vec, vec, vec};
- unsigned k = 0;
- for (unsigned i = 0; i < num_components; i++) {
- if (mask & (1 << i))
- elems[k++] = emit_extract_vector(ctx, vec_src, i, rc);
+ if (vec.size() == 4) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1), tmp[3] = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), Definition(tmp[3]), vec);
+ } else if (vec.size() == 3) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
+ } else if (vec.size() == 2) {
+ tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
+ bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
+ }
+ for (unsigned i = 0; i < dst.size(); i++)
+ tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
+
+ vec = tmp[0];
+ if (dst.size() == 2)
+ vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
+
+ offset = Operand(0u);
+ }
+
+ unsigned num_components = dst.bytes() / component_size;
+ if (vec.regClass() == dst.regClass()) {
+ assert(offset.constantValue() == 0);
+ bld.copy(Definition(dst), vec);
+ emit_split_vector(ctx, dst, num_components);
+ return;
}
+ emit_split_vector(ctx, vec, vec.bytes() / component_size);
+ std::array<Temp, NIR_MAX_VEC_COMPONENTS> elems;
+ RegClass rc = RegClass(RegType::vgpr, component_size).as_subdword();
+
+ assert(offset.constantValue() % component_size == 0);
+ unsigned skip = offset.constantValue() / component_size;
+ for (unsigned i = 0; i < num_components; i++)
+ elems[i] = emit_extract_vector(ctx, vec, i + skip, rc);
+
+ /* if dst is vgpr - split the src and create a shrunk version according to the mask. */
if (dst.type() == RegType::vgpr) {
- assert(dst.bytes() == k * component_size);
- aco_ptr<Pseudo_instruction> vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, k, 1)};
- for (unsigned i = 0; i < k; i++)
- vec->operands[i] = Operand(elems[i]);
- vec->definitions[0] = Definition(dst);
- bld.insert(std::move(vec));
+ aco_ptr<Pseudo_instruction> create_vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, num_components, 1)};
+ for (unsigned i = 0; i < num_components; i++)
+ create_vec->operands[i] = Operand(elems[i]);
+ create_vec->definitions[0] = Definition(dst);
+ bld.insert(std::move(create_vec));
+
+ /* if dst is sgpr - split the src, but move the original to sgpr. */
+ } else if (skip) {
+ vec = bld.pseudo(aco_opcode::p_as_uniform, bld.def(RegClass(RegType::sgpr, vec.size())), vec);
+ byte_align_scalar(ctx, vec, offset, dst);
} else {
- // TODO: alignbyte if mask doesn't start with 1?
- assert(mask & 1);
- assert(dst.size() == vec_src.size());
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec_src);
+ assert(dst.size() == vec.size());
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), vec);
}
+
ctx->allocated_vec.emplace(dst.id(), elems);
}
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->src[0].src.ssa->bit_size == 8)
src = convert_int(ctx, bld, src, 8, 16, true);
+ else if (instr->src[0].src.ssa->bit_size == 64)
+ src = convert_int(ctx, bld, src, 64, 32, false);
bld.vop1(aco_opcode::v_cvt_f16_i16, Definition(dst), src);
break;
}
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->src[0].src.ssa->bit_size == 8)
src = convert_int(ctx, bld, src, 8, 16, false);
+ else if (instr->src[0].src.ssa->bit_size == 64)
+ src = convert_int(ctx, bld, src, 64, 32, false);
bld.vop1(aco_opcode::v_cvt_f16_u16, Definition(dst), src);
break;
}
assert(dst.size() == 1);
Temp src = get_alu_src(ctx, instr->src[0]);
if (instr->src[0].src.ssa->bit_size == 8) {
- //TODO: we should use v_cvt_f32_ubyte1/v_cvt_f32_ubyte2/etc depending on the register assignment
bld.vop1(aco_opcode::v_cvt_f32_ubyte0, Definition(dst), src);
} else {
if (instr->src[0].src.ssa->bit_size == 16)
case nir_op_f2i8:
case nir_op_f2i16: {
Temp src = get_alu_src(ctx, instr->src[0]);
+ Temp tmp = dst.type() == RegType::vgpr ? dst : bld.tmp(v1);
if (instr->src[0].src.ssa->bit_size == 16)
- src = bld.vop1(aco_opcode::v_cvt_i16_f16, bld.def(v1), src);
+ src = bld.vop1(aco_opcode::v_cvt_i16_f16, Definition(tmp), src);
else if (instr->src[0].src.ssa->bit_size == 32)
- src = bld.vop1(aco_opcode::v_cvt_i32_f32, bld.def(v1), src);
+ src = bld.vop1(aco_opcode::v_cvt_i32_f32, Definition(tmp), src);
else
- src = bld.vop1(aco_opcode::v_cvt_i32_f64, bld.def(v1), src);
+ src = bld.vop1(aco_opcode::v_cvt_i32_f64, Definition(tmp), src);
- if (dst.type() == RegType::vgpr)
- bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u));
- else
+ if (dst.type() != RegType::vgpr)
bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
break;
}
case nir_op_f2u8:
case nir_op_f2u16: {
Temp src = get_alu_src(ctx, instr->src[0]);
+ Temp tmp = dst.type() == RegType::vgpr ? dst : bld.tmp(v1);
if (instr->src[0].src.ssa->bit_size == 16)
- src = bld.vop1(aco_opcode::v_cvt_u16_f16, bld.def(v1), src);
+ bld.vop1(aco_opcode::v_cvt_u16_f16, Definition(tmp), src);
else if (instr->src[0].src.ssa->bit_size == 32)
- src = bld.vop1(aco_opcode::v_cvt_u32_f32, bld.def(v1), src);
+ bld.vop1(aco_opcode::v_cvt_u32_f32, Definition(tmp), src);
else
- src = bld.vop1(aco_opcode::v_cvt_u32_f64, bld.def(v1), src);
+ bld.vop1(aco_opcode::v_cvt_u32_f64, Definition(tmp), src);
- if (dst.type() == RegType::vgpr)
- bld.pseudo(aco_opcode::p_extract_vector, Definition(dst), src, Operand(0u));
- else
- bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), src);
+ if (dst.type() != RegType::vgpr)
+ bld.pseudo(aco_opcode::p_as_uniform, Definition(dst), tmp);
break;
}
case nir_op_f2i32: {
return new_mask;
}
-void byte_align_vector(isel_context *ctx, Temp vec, Operand offset, Temp dst)
-{
- Builder bld(ctx->program, ctx->block);
- if (offset.isTemp()) {
- Temp tmp[3] = {vec, vec, vec};
-
- if (vec.size() == 3) {
- tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = bld.tmp(v1);
- bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), Definition(tmp[2]), vec);
- } else if (vec.size() == 2) {
- tmp[0] = bld.tmp(v1), tmp[1] = bld.tmp(v1), tmp[2] = tmp[1];
- bld.pseudo(aco_opcode::p_split_vector, Definition(tmp[0]), Definition(tmp[1]), vec);
- }
- for (unsigned i = 0; i < dst.size(); i++)
- tmp[i] = bld.vop3(aco_opcode::v_alignbyte_b32, bld.def(v1), tmp[i + 1], tmp[i], offset);
-
- vec = tmp[0];
- if (dst.size() == 2)
- vec = bld.pseudo(aco_opcode::p_create_vector, bld.def(v2), tmp[0], tmp[1]);
-
- offset = Operand(0u);
- }
-
- if (vec.bytes() == dst.bytes() && offset.constantValue() == 0)
- bld.copy(Definition(dst), vec);
- else
- trim_subdword_vector(ctx, vec, dst, vec.bytes(), ((1 << dst.bytes()) - 1) << offset.constantValue());
-}
-
struct LoadEmitInfo {
Operand offset;
Temp dst;
Temp val = callback(bld, info, aligned_offset_tmp, bytes_needed, align,
reduced_const_offset, byte_align ? Temp() : info->dst);
+ /* the callback wrote directly to dst */
+ if (val == info->dst) {
+ assert(num_vals == 0);
+ emit_split_vector(ctx, info->dst, info->num_components);
+ return;
+ }
+
/* shift result right if needed */
- if (byte_align) {
+ if (info->component_size < 4 && byte_align_loads) {
Operand align((uint32_t)byte_align);
if (byte_align == -1) {
if (offset.isConstant())
align = offset;
}
- if (align.isTemp() || align.constantValue()) {
- assert(val.bytes() >= load_size && "unimplemented");
- Temp new_val = bld.tmp(RegClass::get(val.type(), load_size));
- if (val.type() == RegType::sgpr)
- byte_align_scalar(ctx, val, align, new_val);
- else
- byte_align_vector(ctx, val, align, new_val);
- val = new_val;
- }
+ assert(val.bytes() >= load_size && "unimplemented");
+ if (val.type() == RegType::sgpr)
+ byte_align_scalar(ctx, val, align, info->dst);
+ else
+ byte_align_vector(ctx, val, align, info->dst, component_size);
+ return;
}
/* add result to list and advance */
vals[num_vals++] = val;
}
- /* the callback wrote directly to dst */
- if (vals[0] == info->dst) {
- assert(num_vals == 1);
- emit_split_vector(ctx, info->dst, info->num_components);
- return;
- }
-
/* create array of components */
unsigned components_split = 0;
std::array<Temp, NIR_MAX_VEC_COMPONENTS> allocated_vec;
mubuf->definitions[0] = Definition(val);
bld.insert(std::move(mubuf));
- if (bytes_size < 4)
- val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, bytes_size)), val, Operand(0u));
-
return val;
}
bld.insert(std::move(flat));
}
- if (bytes_size < 4)
- val = bld.pseudo(aco_opcode::p_extract_vector, bld.def(RegClass::get(RegType::vgpr, bytes_size)), val, Operand(0u));
-
return val;
}
if (target == V_008DFC_SQ_EXP_NULL)
return false;
+ /* Replace NaN by zero (only 32-bit) to fix game bugs if requested. */
+ if (ctx->options->enable_mrt_output_nan_fixup &&
+ !is_16bit &&
+ (col_format == V_028714_SPI_SHADER_32_R ||
+ col_format == V_028714_SPI_SHADER_32_GR ||
+ col_format == V_028714_SPI_SHADER_32_AR ||
+ col_format == V_028714_SPI_SHADER_32_ABGR ||
+ col_format == V_028714_SPI_SHADER_FP16_ABGR)) {
+ for (int i = 0; i < 4; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+
+ Temp isnan = bld.vopc(aco_opcode::v_cmp_class_f32,
+ bld.hint_vcc(bld.def(bld.lm)), values[i],
+ bld.copy(bld.def(v1), Operand(3u)));
+ values[i] = bld.vop2(aco_opcode::v_cndmask_b32, bld.def(v1), values[i],
+ bld.copy(bld.def(v1), Operand(0u)), isnan);
+ }
+ }
+
if ((bool) compr_op) {
for (int i = 0; i < 2; i++) {
/* check if at least one of the values to be compressed is enabled */