return;
if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
+ assert(devinfo->gen < 12);
brw_push_insn_state(p);
brw_set_default_exec_size(p, BRW_EXECUTE_8);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
if (needs_zero_fill) {
if (devinfo->gen < 12)
brw_inst_set_no_dd_clear(devinfo, inst, true);
+ brw_set_default_swsb(p, tgl_swsb_null());
inst = brw_MOV(p, suboffset(dst, 1), brw_imm_w(0));
if (devinfo->gen < 12)
brw_inst_set_no_dd_check(devinfo, inst, true);
(devinfo->gen >= 7 ? GEN7_SFID_DATAPORT_DATA_CACHE :
devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_RENDER_CACHE :
BRW_SFID_DATAPORT_WRITE);
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
uint32_t msg_type;
if (devinfo->gen >= 6)
brw_set_default_exec_size(p, BRW_EXECUTE_8);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
/* set message header global offset field (reg 0, element 2) */
brw_set_default_exec_size(p, BRW_EXECUTE_1);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p,
retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
mrf.nr,
brw_imm_ud(offset));
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
{
unsigned offset)
{
const struct gen_device_info *devinfo = p->devinfo;
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
if (devinfo->gen >= 6)
offset /= 16;
{
brw_push_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
brw_set_default_exec_size(p, BRW_EXECUTE_8);
brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
/* set message header global offset field (reg 0, element 2) */
brw_set_default_exec_size(p, BRW_EXECUTE_1);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, get_element_ud(mrf, 2), brw_imm_ud(offset));
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
{
(devinfo->gen >= 6 ? GEN6_SFID_DATAPORT_CONSTANT_CACHE :
BRW_SFID_DATAPORT_READ);
const unsigned exec_size = 1 << brw_get_default_exec_size(p);
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
/* On newer hardware, offset is in units of owords. */
if (devinfo->gen >= 6)
brw_push_insn_state(p);
brw_set_default_exec_size(p, BRW_EXECUTE_8);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
brw_MOV(p, mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
/* set message header global offset field (reg 0, element 2) */
brw_set_default_exec_size(p, BRW_EXECUTE_1);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p,
retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
mrf.nr,
brw_imm_ud(offset));
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
+
brw_inst *insn = next_insn(p, BRW_OPCODE_SEND);
brw_inst_set_sfid(devinfo, insn, target_cache);
struct brw_reg temp = get_element_ud(header, 3);
+ brw_push_insn_state(p);
brw_AND(p, temp, get_element_ud(sampler_index, 0), brw_imm_ud(0x0f0));
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_SHL(p, temp, temp, brw_imm_ud(4));
brw_ADD(p,
get_element_ud(header, 3),
get_element_ud(brw_vec8_grf(0, 0), 3),
temp);
+ brw_pop_insn_state(p);
}
}
brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
brw_set_desc(p, send, desc.ud | desc_imm);
} else {
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_exec_size(p, BRW_EXECUTE_1);
brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
/* Load the indirect descriptor to an address register using OR so the
* caller can specify additional descriptor bits with the desc_imm
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
send = next_insn(p, BRW_OPCODE_SEND);
brw_set_src0(p, send, retype(payload, BRW_REGISTER_TYPE_UD));
if (desc.file == BRW_IMMEDIATE_VALUE) {
desc.ud |= desc_imm;
} else {
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_exec_size(p, BRW_EXECUTE_1);
brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
/* Load the indirect descriptor to an address register using OR so the
* caller can specify additional descriptor bits with the desc_imm
brw_pop_insn_state(p);
desc = addr;
+
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
if (ex_desc.file == BRW_IMMEDIATE_VALUE &&
(ex_desc.ud & INTEL_MASK(15, 12)) == 0) {
ex_desc.ud |= ex_desc_imm;
} else {
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
struct brw_reg addr = retype(brw_address_reg(2), BRW_REGISTER_TYPE_UD);
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_exec_size(p, BRW_EXECUTE_1);
brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
/* Load the indirect extended descriptor to an address register using OR
* so the caller can specify additional descriptor bits with the
brw_pop_insn_state(p);
ex_desc = addr;
+
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
send = next_insn(p, devinfo->gen >= 12 ? BRW_OPCODE_SEND : BRW_OPCODE_SENDS);
unsigned desc_imm)
{
if (surface.file != BRW_IMMEDIATE_VALUE) {
+ const struct tgl_swsb swsb = brw_get_default_swsb(p);
struct brw_reg addr = retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD);
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_exec_size(p, BRW_EXECUTE_1);
brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
/* Mask out invalid bits from the surface index to avoid hangs e.g. when
* some surface array is accessed out of bounds.
brw_pop_insn_state(p);
surface = addr;
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
brw_send_indirect_message(p, sfid, dst, payload, surface, desc_imm, false);
brw_MOV(p, dst, offset(dst, 1));
}
- if (stall)
+ if (stall) {
+ brw_set_default_swsb(p, tgl_swsb_sbid(TGL_SBID_DST,
+ brw_get_default_swsb(p).sbid));
+
brw_MOV(p, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW), dst);
+ }
brw_pop_insn_state(p);
}
* hardware.
*/
brw_SHR(p, vec1(dst), mask, brw_imm_ud(qtr_control * 8));
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_AND(p, vec1(dst), exec_mask, vec1(dst));
exec_mask = vec1(dst);
}
* register is above this limit.
*/
if (offset >= limit) {
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_ADD(p, addr, addr, brw_imm_ud(offset - offset % limit));
offset = offset % limit;
}
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
+
/* Use indirect addressing to fetch the specified component. */
if (type_sz(src.type) > 4 &&
(devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
retype(brw_vec1_indirect(addr.subnr, offset),
BRW_REGISTER_TYPE_D));
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
retype(brw_vec1_indirect(addr.subnr, offset + 4),
BRW_REGISTER_TYPE_D));
brw_float_controls_mode(struct brw_codegen *p,
unsigned mode, unsigned mask)
{
- brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
- brw_imm_ud(~mask));
- brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
-
/* From the Skylake PRM, Volume 7, page 760:
* "Implementation Restriction on Register Access: When the control
* register is used as an explicit source and/or destination, hardware
* does not ensure execution pipeline coherency. Software must set the
* thread control field to ‘switch’ for an instruction that uses
* control register as an explicit operand."
+ *
+ * On Gen12+ this is implemented in terms of SWSB annotations instead.
*/
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
+
+ brw_inst *inst = brw_AND(p, brw_cr0_reg(0), brw_cr0_reg(0),
+ brw_imm_ud(~mask));
+ brw_inst_set_exec_size(p->devinfo, inst, BRW_EXECUTE_1);
if (p->devinfo->gen < 12)
brw_inst_set_thread_control(p->devinfo, inst, BRW_THREAD_SWITCH);
if (p->devinfo->gen < 12)
brw_inst_set_thread_control(p->devinfo, inst_or, BRW_THREAD_SWITCH);
}
+
+ if (p->devinfo->gen >= 12)
+ brw_SYNC(p, TGL_SYNC_NOP);
}
* of case-by-case work. It's just not worth it.
*/
brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
if (type_sz(reg.type) > 4 &&
((devinfo->gen == 7 && !devinfo->is_haswell) ||
*/
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D));
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D));
} else {
src.hstride - 1));
/* Add on the register start offset */
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_ADD(p, addr, addr, brw_imm_uw(src.nr * REG_SIZE + src.subnr));
if (type_sz(src.type) > 4 &&
assert(dst.hstride == 1);
brw_MOV(p, dst_d,
retype(brw_VxH_indirect(0, 0), BRW_REGISTER_TYPE_D));
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, byte_offset(dst_d, 4),
retype(brw_VxH_indirect(0, 4), BRW_REGISTER_TYPE_D));
} else {
retype(brw_VxH_indirect(0, 0), src.type));
}
}
+
+ brw_set_default_swsb(p, tgl_swsb_null());
}
}
brw_inst_set_no_dd_clear(devinfo, insn, c < 3);
brw_inst_set_no_dd_check(devinfo, insn, c > 0);
}
+
+ brw_set_default_swsb(p, tgl_swsb_null());
}
break;
fs_generator::generate_barrier(fs_inst *, struct brw_reg src)
{
brw_barrier(p, src);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_WAIT(p);
}
/* Set up an implied move from g0 to the MRF. */
src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
} else {
+ const tgl_swsb swsb = brw_get_default_swsb(p);
assert(inst->base_mrf != -1);
struct brw_reg header_reg = brw_message_reg(inst->base_mrf);
brw_push_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
brw_set_default_exec_size(p, BRW_EXECUTE_8);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
/* Explicitly set up the message header by copying g0 to the MRF. */
brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_set_default_exec_size(p, BRW_EXECUTE_1);
if (inst->offset) {
}
brw_pop_insn_state(p);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
}
}
brw_ADD(p, byte_offset(dst, g * type_size),
negate(byte_offset(src, g * type_size)),
byte_offset(src, (g + 2) * type_size));
+ brw_set_default_swsb(p, tgl_swsb_null());
}
brw_pop_insn_state(p);
} else {
const unsigned lower_size = inst->force_writemask_all ? inst->exec_size :
MIN2(16, inst->exec_size);
const unsigned block_size = 4 * lower_size / REG_SIZE;
+ const tgl_swsb swsb = brw_get_default_swsb(p);
assert(inst->mlen != 0);
brw_push_insn_state(p);
for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
brw_set_default_group(p, inst->group + lower_size * i);
+ if (i > 0) {
+ brw_set_default_swsb(p, tgl_swsb_null());
+ brw_SYNC(p, TGL_SYNC_ALLRD);
+ } else {
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
+ }
+
brw_MOV(p, brw_uvec_mrf(lower_size, inst->base_mrf + 1, 0),
retype(offset(src, block_size * i), BRW_REGISTER_TYPE_UD));
+ if (i + 1 < inst->exec_size / lower_size)
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
+ else
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
+
brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
block_size,
inst->offset + block_size * REG_SIZE * i);
BRW_DATAPORT_READ_TARGET_DATA_CACHE));
} else {
+ const tgl_swsb swsb = brw_get_default_swsb(p);
struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
brw_push_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
/* a0.0 = surf_index & 0xff */
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
brw_set_dest(p, insn_and, addr);
brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
/* dst = send(payload, a0.0 | <descriptor>) */
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
brw_send_indirect_message(
p, GEN6_SFID_DATAPORT_CONSTANT_CACHE,
retype(dst, BRW_REGISTER_TYPE_UD),
brw_inst_set_exec_size(devinfo, insn, cvt(lower_size) - 1);
brw_inst_set_group(devinfo, insn, inst->group + lower_size * i);
brw_inst_set_compression(devinfo, insn, lower_size > 8);
+ brw_set_default_swsb(p, tgl_swsb_null());
}
}
/* Now the form:
* 0xhhhh0000
*/
+ brw_set_default_swsb(p, tgl_swsb_regdist(1));
brw_SHL(p, dst, dst, brw_imm_ud(16u));
/* And, finally the form of packHalf2x16's output:
struct brw_reg offset,
struct brw_reg value)
{
+ const tgl_swsb swsb = brw_get_default_swsb(p);
+
assert(devinfo->gen >= 7);
brw_push_insn_state(p);
brw_set_default_mask_control(p, true);
+ brw_set_default_swsb(p, tgl_swsb_src_dep(swsb));
assert(payload.file == BRW_GENERAL_REGISTER_FILE);
struct brw_reg payload_offset = retype(brw_vec1_grf(payload.nr, 0),
* out of this path, so we just emit the MOVs from here.
*/
brw_MOV(p, payload_offset, offset);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, payload_value, value);
+ brw_set_default_swsb(p, tgl_swsb_dst_dep(swsb, 1));
brw_shader_time_add(p, payload,
prog_data->binding_table.shader_time_start);
brw_pop_insn_state(p);
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_MOV(p, dst, src[1]);
brw_set_default_mask_control(p, BRW_MASK_ENABLE);
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, dst, src[0]);
break;
assert(src[0].type == dst.type);
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 0),
subscript(strided, BRW_REGISTER_TYPE_D, 0));
+ brw_set_default_swsb(p, tgl_swsb_null());
brw_MOV(p, subscript(dst, BRW_REGISTER_TYPE_D, 1),
subscript(strided, BRW_REGISTER_TYPE_D, 1));
} else {