struct brw_reg *src,
GLuint msg_reg_nr)
{
- struct intel_context *intel = &p->brw->intel;
- if (intel->gen < 6)
+ struct brw_context *brw = p->brw;
+ if (brw->gen < 6)
return;
if (src->file == BRW_MESSAGE_REGISTER_FILE)
static void
gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg)
{
- /* From the BSpec / ISA Reference / send - [DevIVB+]:
+ /* From the Ivybridge PRM, Volume 4 Part 3, page 218 ("send"):
* "The send with EOT should use register space R112-R127 for <src>. This is
* to enable loading of a new thread into the same slot while the message
* with EOT for current thread is pending dispatch."
* Since we're pretending to have 16 MRFs anyway, we may as well use the
* registers required for messages with EOT.
*/
- struct intel_context *intel = &p->brw->intel;
- if (intel->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
+ struct brw_context *brw = p->brw;
+ if (brw->gen == 7 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
reg->file = BRW_GENERAL_REGISTER_FILE;
reg->nr += GEN7_MRF_HACK_START;
}
struct brw_reg reg)
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
if (reg.type != BRW_ARCHITECTURE_REGISTER_FILE)
assert(reg.nr < 128);
gen7_convert_mrf_to_grf(p, ®);
- if (intel->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND ||
+ if (brw->gen >= 6 && (insn->header.opcode == BRW_OPCODE_SEND ||
insn->header.opcode == BRW_OPCODE_SENDC)) {
/* Any source modifiers or regions will be ignored, since this just
* identifies the MRF/GRF to start reading the message contents from.
bool header_present,
bool end_of_thread)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
brw_set_src1(p, inst, brw_imm_d(0));
- if (intel->gen >= 5) {
+ if (brw->gen >= 5) {
inst->bits3.generic_gen5.header_present = header_present;
inst->bits3.generic_gen5.response_length = response_length;
inst->bits3.generic_gen5.msg_length = msg_length;
inst->bits3.generic_gen5.end_of_thread = end_of_thread;
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
/* On Gen6+ Message target/SFID goes in bits 27:24 of the header */
inst->header.destreg__conditionalmod = sfid;
} else {
GLuint dataType )
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
unsigned msg_length;
unsigned response_length;
brw_set_message_descriptor(p, insn, BRW_SFID_MATH,
msg_length, response_length, false, false);
- if (intel->gen == 5) {
+ if (brw->gen == 5) {
insn->bits3.math_gen5.function = function;
insn->bits3.math_gen5.int_type = integer_type;
insn->bits3.math_gen5.precision = low_precision;
static void brw_set_urb_message( struct brw_compile *p,
struct brw_instruction *insn,
- bool allocate,
- bool used,
+ enum brw_urb_write_flags flags,
GLuint msg_length,
GLuint response_length,
- bool end_of_thread,
- bool complete,
GLuint offset,
GLuint swizzle_control )
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
brw_set_message_descriptor(p, insn, BRW_SFID_URB,
- msg_length, response_length, true, end_of_thread);
- if (intel->gen == 7) {
+ msg_length, response_length, true,
+ flags & BRW_URB_WRITE_EOT);
+ if (brw->gen == 7) {
insn->bits3.urb_gen7.opcode = 0; /* URB_WRITE_HWORD */
insn->bits3.urb_gen7.offset = offset;
assert(swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
insn->bits3.urb_gen7.swizzle_control = swizzle_control;
- /* per_slot_offset = 0 makes it ignore offsets in message header */
- insn->bits3.urb_gen7.per_slot_offset = 0;
- insn->bits3.urb_gen7.complete = complete;
- } else if (intel->gen >= 5) {
+ insn->bits3.urb_gen7.per_slot_offset =
+ flags & BRW_URB_WRITE_PER_SLOT_OFFSET ? 1 : 0;
+ insn->bits3.urb_gen7.complete = flags & BRW_URB_WRITE_COMPLETE ? 1 : 0;
+ } else if (brw->gen >= 5) {
insn->bits3.urb_gen5.opcode = 0; /* URB_WRITE */
insn->bits3.urb_gen5.offset = offset;
insn->bits3.urb_gen5.swizzle_control = swizzle_control;
- insn->bits3.urb_gen5.allocate = allocate;
- insn->bits3.urb_gen5.used = used; /* ? */
- insn->bits3.urb_gen5.complete = complete;
+ insn->bits3.urb_gen5.allocate = flags & BRW_URB_WRITE_ALLOCATE ? 1 : 0;
+ insn->bits3.urb_gen5.used = flags & BRW_URB_WRITE_UNUSED ? 0 : 1;
+ insn->bits3.urb_gen5.complete = flags & BRW_URB_WRITE_COMPLETE ? 1 : 0;
} else {
insn->bits3.urb.opcode = 0; /* ? */
insn->bits3.urb.offset = offset;
insn->bits3.urb.swizzle_control = swizzle_control;
- insn->bits3.urb.allocate = allocate;
- insn->bits3.urb.used = used; /* ? */
- insn->bits3.urb.complete = complete;
+ insn->bits3.urb.allocate = flags & BRW_URB_WRITE_ALLOCATE ? 1 : 0;
+ insn->bits3.urb.used = flags & BRW_URB_WRITE_UNUSED ? 0 : 1;
+ insn->bits3.urb.complete = flags & BRW_URB_WRITE_COMPLETE ? 1 : 0;
}
}
GLuint send_commit_msg)
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
unsigned sfid;
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
/* Use the Render Cache for RT writes; otherwise use the Data Cache */
if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
else
sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
/* Use the render cache for all write messages. */
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
} else {
brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
header_present, end_of_thread);
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
insn->bits3.gen7_dp.binding_table_index = binding_table_index;
insn->bits3.gen7_dp.msg_control = msg_control;
insn->bits3.gen7_dp.last_render_target = last_render_target;
insn->bits3.gen7_dp.msg_type = msg_type;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
insn->bits3.gen6_dp.binding_table_index = binding_table_index;
insn->bits3.gen6_dp.msg_control = msg_control;
insn->bits3.gen6_dp.last_render_target = last_render_target;
insn->bits3.gen6_dp.msg_type = msg_type;
insn->bits3.gen6_dp.send_commit_msg = send_commit_msg;
- } else if (intel->gen == 5) {
+ } else if (brw->gen == 5) {
insn->bits3.dp_write_gen5.binding_table_index = binding_table_index;
insn->bits3.dp_write_gen5.msg_control = msg_control;
insn->bits3.dp_write_gen5.last_render_target = last_render_target;
GLuint response_length)
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
unsigned sfid;
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
else
brw_set_message_descriptor(p, insn, sfid, msg_length, response_length,
header_present, false);
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
insn->bits3.gen7_dp.binding_table_index = binding_table_index;
insn->bits3.gen7_dp.msg_control = msg_control;
insn->bits3.gen7_dp.last_render_target = 0;
insn->bits3.gen7_dp.msg_type = msg_type;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
insn->bits3.gen6_dp.binding_table_index = binding_table_index;
insn->bits3.gen6_dp.msg_control = msg_control;
insn->bits3.gen6_dp.last_render_target = 0;
insn->bits3.gen6_dp.msg_type = msg_type;
insn->bits3.gen6_dp.send_commit_msg = 0;
- } else if (intel->gen == 5) {
+ } else if (brw->gen == 5) {
insn->bits3.dp_read_gen5.binding_table_index = binding_table_index;
insn->bits3.dp_read_gen5.msg_control = msg_control;
insn->bits3.dp_read_gen5.msg_type = msg_type;
insn->bits3.dp_read_gen5.target_cache = target_cache;
- } else if (intel->is_g4x) {
+ } else if (brw->is_g4x) {
insn->bits3.dp_read_g4x.binding_table_index = binding_table_index; /*0:7*/
insn->bits3.dp_read_g4x.msg_control = msg_control; /*8:10*/
insn->bits3.dp_read_g4x.msg_type = msg_type; /*11:13*/
GLuint return_format)
{
struct brw_context *brw = p->brw;
- struct intel_context *intel = &brw->intel;
brw_set_message_descriptor(p, insn, BRW_SFID_SAMPLER, msg_length,
response_length, header_present, false);
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
insn->bits3.sampler_gen7.binding_table_index = binding_table_index;
insn->bits3.sampler_gen7.sampler = sampler;
insn->bits3.sampler_gen7.msg_type = msg_type;
insn->bits3.sampler_gen7.simd_mode = simd_mode;
- } else if (intel->gen >= 5) {
+ } else if (brw->gen >= 5) {
insn->bits3.sampler_gen5.binding_table_index = binding_table_index;
insn->bits3.sampler_gen5.sampler = sampler;
insn->bits3.sampler_gen5.msg_type = msg_type;
insn->bits3.sampler_gen5.simd_mode = simd_mode;
- } else if (intel->is_g4x) {
+ } else if (brw->is_g4x) {
insn->bits3.sampler_g4x.binding_table_index = binding_table_index;
insn->bits3.sampler_g4x.sampler = sampler;
insn->bits3.sampler_g4x.msg_type = msg_type;
struct brw_reg src1,
struct brw_reg src2)
{
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, opcode);
gen7_convert_mrf_to_grf(p, &dest);
dest.file == BRW_MESSAGE_REGISTER_FILE);
assert(dest.nr < 128);
assert(dest.address_mode == BRW_ADDRESS_DIRECT);
- assert(dest.type == BRW_REGISTER_TYPE_F);
+ assert(dest.type == BRW_REGISTER_TYPE_F ||
+ dest.type == BRW_REGISTER_TYPE_D ||
+ dest.type == BRW_REGISTER_TYPE_UD);
insn->bits1.da3src.dest_reg_file = (dest.file == BRW_MESSAGE_REGISTER_FILE);
insn->bits1.da3src.dest_reg_nr = dest.nr;
insn->bits1.da3src.dest_subreg_nr = dest.subnr / 16;
assert(src0.file == BRW_GENERAL_REGISTER_FILE);
assert(src0.address_mode == BRW_ADDRESS_DIRECT);
assert(src0.nr < 128);
- assert(src0.type == BRW_REGISTER_TYPE_F);
insn->bits2.da3src.src0_swizzle = src0.dw1.bits.swizzle;
insn->bits2.da3src.src0_subreg_nr = get_3src_subreg_nr(src0);
insn->bits2.da3src.src0_reg_nr = src0.nr;
assert(src1.file == BRW_GENERAL_REGISTER_FILE);
assert(src1.address_mode == BRW_ADDRESS_DIRECT);
assert(src1.nr < 128);
- assert(src1.type == BRW_REGISTER_TYPE_F);
insn->bits2.da3src.src1_swizzle = src1.dw1.bits.swizzle;
insn->bits2.da3src.src1_subreg_nr_low = get_3src_subreg_nr(src1) & 0x3;
insn->bits3.da3src.src1_subreg_nr_high = get_3src_subreg_nr(src1) >> 2;
assert(src2.file == BRW_GENERAL_REGISTER_FILE);
assert(src2.address_mode == BRW_ADDRESS_DIRECT);
assert(src2.nr < 128);
- assert(src2.type == BRW_REGISTER_TYPE_F);
insn->bits3.da3src.src2_swizzle = src2.dw1.bits.swizzle;
insn->bits3.da3src.src2_subreg_nr = get_3src_subreg_nr(src2);
insn->bits3.da3src.src2_rep_ctrl = src2.vstride == BRW_VERTICAL_STRIDE_0;
insn->bits1.da3src.src2_abs = src2.abs;
insn->bits1.da3src.src2_negate = src2.negate;
+ if (brw->gen >= 7) {
+ /* Set both the source and destination types based on dest.type,
+ * ignoring the source register types. The MAD and LRP emitters ensure
+ * that all four types are float. The BFE and BFI2 emitters, however,
+ * may send us mixed D and UD types and want us to ignore that and use
+ * the destination type.
+ */
+ switch (dest.type) {
+ case BRW_REGISTER_TYPE_F:
+ insn->bits1.da3src.src_type = BRW_3SRC_TYPE_F;
+ insn->bits1.da3src.dst_type = BRW_3SRC_TYPE_F;
+ break;
+ case BRW_REGISTER_TYPE_D:
+ insn->bits1.da3src.src_type = BRW_3SRC_TYPE_D;
+ insn->bits1.da3src.dst_type = BRW_3SRC_TYPE_D;
+ break;
+ case BRW_REGISTER_TYPE_UD:
+ insn->bits1.da3src.src_type = BRW_3SRC_TYPE_UD;
+ insn->bits1.da3src.dst_type = BRW_3SRC_TYPE_UD;
+ break;
+ }
+ }
+
return insn;
}
return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
}
+#define ALU3F(OP) \
+struct brw_instruction *brw_##OP(struct brw_compile *p, \
+ struct brw_reg dest, \
+ struct brw_reg src0, \
+ struct brw_reg src1, \
+ struct brw_reg src2) \
+{ \
+ assert(dest.type == BRW_REGISTER_TYPE_F); \
+ assert(src0.type == BRW_REGISTER_TYPE_F); \
+ assert(src1.type == BRW_REGISTER_TYPE_F); \
+ assert(src2.type == BRW_REGISTER_TYPE_F); \
+ return brw_alu3(p, BRW_OPCODE_##OP, dest, src0, src1, src2); \
+}
+
/* Rounding operations (other than RNDD) require two instructions - the first
* stores a rounded value (possibly the wrong way) in the dest register, but
* also sets a per-channel "increment bit" in the flag register. A predicated
brw_set_dest(p, rnd, dest); \
brw_set_src0(p, rnd, src); \
\
- if (p->brw->intel.gen < 6) { \
+ if (p->brw->gen < 6) { \
/* turn on round-increments */ \
rnd->header.destreg__conditionalmod = BRW_CONDITIONAL_R; \
add = brw_ADD(p, dest, dest, brw_imm_f(1.0f)); \
ALU2(DP2)
ALU2(LINE)
ALU2(PLN)
-ALU3(MAD)
+ALU3F(MAD)
+ALU3F(LRP)
+ALU1(BFREV)
+ALU3(BFE)
+ALU2(BFI1)
+ALU3(BFI2)
+ALU1(FBH)
+ALU1(FBL)
+ALU1(CBIT)
ROUND(RNDZ)
ROUND(RNDE)
struct brw_instruction *
brw_IF(struct brw_compile *p, GLuint execute_size)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_IF);
/* Override the defaults for this instruction:
*/
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
brw_set_dest(p, insn, brw_ip_reg());
brw_set_src0(p, insn, brw_ip_reg());
brw_set_src1(p, insn, brw_imm_d(0x0));
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0));
insn->bits1.branch_gen6.jump_count = 0;
brw_set_src0(p, insn, vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_D)));
struct brw_instruction *else_inst,
struct brw_instruction *endif_inst)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
/* We shouldn't be patching IF and ELSE instructions in single program flow
* mode when gen < 6, because in single program flow mode on those
* instructions to conditional ADDs. So we do patch IF and ELSE
* instructions in single program flow mode on those platforms.
*/
- if (intel->gen < 6)
+ if (brw->gen < 6)
assert(!p->single_program_flow);
assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF);
/* Jump count is for 64bit data chunk each, so one 128bit instruction
* requires 2 chunks.
*/
- if (intel->gen >= 5)
+ if (brw->gen >= 5)
br = 2;
assert(endif_inst->header.opcode == BRW_OPCODE_ENDIF);
if (else_inst == NULL) {
/* Patch IF -> ENDIF */
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
/* Turn it into an IFF, which means no mask stack operations for
* all-false and jumping past the ENDIF.
*/
if_inst->bits3.if_else.jump_count = br * (endif_inst - if_inst + 1);
if_inst->bits3.if_else.pop_count = 0;
if_inst->bits3.if_else.pad0 = 0;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
/* As of gen6, there is no IFF and IF must point to the ENDIF. */
if_inst->bits1.branch_gen6.jump_count = br * (endif_inst - if_inst);
} else {
else_inst->header.execution_size = if_inst->header.execution_size;
/* Patch IF -> ELSE */
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
if_inst->bits3.if_else.jump_count = br * (else_inst - if_inst);
if_inst->bits3.if_else.pop_count = 0;
if_inst->bits3.if_else.pad0 = 0;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
if_inst->bits1.branch_gen6.jump_count = br * (else_inst - if_inst + 1);
}
/* Patch ELSE -> ENDIF */
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
/* BRW_OPCODE_ELSE pre-gen6 should point just past the
* matching ENDIF.
*/
else_inst->bits3.if_else.jump_count = br*(endif_inst - else_inst + 1);
else_inst->bits3.if_else.pop_count = 1;
else_inst->bits3.if_else.pad0 = 0;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
/* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
else_inst->bits1.branch_gen6.jump_count = br*(endif_inst - else_inst);
} else {
void
brw_ELSE(struct brw_compile *p)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_ELSE);
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
brw_set_dest(p, insn, brw_ip_reg());
brw_set_src0(p, insn, brw_ip_reg());
brw_set_src1(p, insn, brw_imm_d(0x0));
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0));
insn->bits1.branch_gen6.jump_count = 0;
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
void
brw_ENDIF(struct brw_compile *p)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn = NULL;
struct brw_instruction *else_inst = NULL;
struct brw_instruction *if_inst = NULL;
* instructions to conditional ADDs. So we only do this trick on Gen4 and
* Gen5.
*/
- if (intel->gen < 6 && p->single_program_flow)
+ if (brw->gen < 6 && p->single_program_flow)
emit_endif = false;
/*
return;
}
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
brw_set_dest(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
brw_set_src0(p, insn, retype(brw_vec4_grf(0,0), BRW_REGISTER_TYPE_UD));
brw_set_src1(p, insn, brw_imm_d(0x0));
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
brw_set_dest(p, insn, brw_imm_w(0));
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src1(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
insn->header.thread_control = BRW_THREAD_SWITCH;
/* Also pop item off the stack in the endif instruction: */
- if (intel->gen < 6) {
+ if (brw->gen < 6) {
insn->bits3.if_else.jump_count = 0;
insn->bits3.if_else.pop_count = 1;
insn->bits3.if_else.pad0 = 0;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
insn->bits1.branch_gen6.jump_count = 2;
} else {
insn->bits3.break_cont.jip = 2;
struct brw_instruction *brw_BREAK(struct brw_compile *p)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
insn = next_insn(p, BRW_OPCODE_BREAK);
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src0(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
brw_set_src1(p, insn, brw_imm_d(0x0));
*/
struct brw_instruction *brw_DO(struct brw_compile *p, GLuint execute_size)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
- if (intel->gen >= 6 || p->single_program_flow) {
+ if (brw->gen >= 6 || p->single_program_flow) {
push_loop_stack(p, &p->store[p->nr_insn]);
return &p->store[p->nr_insn];
} else {
static void
brw_patch_break_cont(struct brw_compile *p, struct brw_instruction *while_inst)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *do_inst = get_inner_do_insn(p);
struct brw_instruction *inst;
- int br = (intel->gen == 5) ? 2 : 1;
+ int br = (brw->gen == 5) ? 2 : 1;
for (inst = while_inst - 1; inst != do_inst; inst--) {
/* If the jump count is != 0, that means that this instruction has already
struct brw_instruction *brw_WHILE(struct brw_compile *p)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn, *do_insn;
GLuint br = 1;
- if (intel->gen >= 5)
+ if (brw->gen >= 5)
br = 2;
- if (intel->gen >= 7) {
+ if (brw->gen >= 7) {
insn = next_insn(p, BRW_OPCODE_WHILE);
do_insn = get_inner_do_insn(p);
insn->bits3.break_cont.jip = br * (do_insn - insn);
insn->header.execution_size = BRW_EXECUTE_8;
- } else if (intel->gen == 6) {
+ } else if (brw->gen == 6) {
insn = next_insn(p, BRW_OPCODE_WHILE);
do_insn = get_inner_do_insn(p);
*/
void brw_land_fwd_jump(struct brw_compile *p, int jmp_insn_idx)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *jmp_insn = &p->store[jmp_insn_idx];
GLuint jmpi = 1;
- if (intel->gen >= 5)
+ if (brw->gen >= 5)
jmpi = 2;
assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
struct brw_reg src0,
struct brw_reg src1)
{
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_CMP);
insn->header.destreg__conditionalmod = conditional;
p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
p->flag_value = 0xff;
}
+
+ /* Item WaCMPInstNullDstForcesThreadSwitch in the Haswell Bspec workarounds
+ * page says:
+ * "Any CMP instruction with a null destination must use a {switch}."
+ *
+ * It also applies to other Gen7 platforms (IVB, BYT) even though it isn't
+ * mentioned on their work-arounds pages.
+ */
+ if (brw->gen == 7) {
+ if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+ dest.nr == BRW_ARF_NULL) {
+ insn->header.thread_control = BRW_THREAD_SWITCH;
+ }
+ }
}
/* Issue 'wait' instruction for n1, host could program MMIO
GLuint data_type,
GLuint precision )
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH);
- assert(dest.file == BRW_GENERAL_REGISTER_FILE);
+ assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
+ (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
assert(src.file == BRW_GENERAL_REGISTER_FILE);
assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
- if (intel->gen == 6)
+ if (brw->gen == 6)
assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
/* Source modifiers are ignored for extended math instructions on Gen6. */
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
assert(!src.negate);
assert(!src.abs);
}
struct brw_reg src0,
struct brw_reg src1)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn = next_insn(p, BRW_OPCODE_MATH);
- assert(intel->gen >= 6);
- (void) intel;
-
-
- assert(dest.file == BRW_GENERAL_REGISTER_FILE);
+ assert(dest.file == BRW_GENERAL_REGISTER_FILE ||
+ (brw->gen >= 7 && dest.file == BRW_MESSAGE_REGISTER_FILE));
assert(src0.file == BRW_GENERAL_REGISTER_FILE);
assert(src1.file == BRW_GENERAL_REGISTER_FILE);
assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
}
}
/* Source modifiers are ignored for extended math instructions on Gen6. */
- if (intel->gen == 6) {
+ if (brw->gen == 6) {
assert(!src0.negate);
assert(!src0.abs);
assert(!src1.negate);
int num_regs,
GLuint offset)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
uint32_t msg_control, msg_type;
int mlen;
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
* protection. Our use of DP writes is all about register
* spilling within a thread.
*/
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
dest = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
send_commit_msg = 0;
} else {
}
brw_set_dest(p, insn, dest);
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf);
} else {
brw_set_src0(p, insn, brw_null_reg());
}
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
else
msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
int num_regs,
GLuint offset)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
uint32_t msg_control;
int rlen;
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
insn->header.destreg__conditionalmod = mrf.nr;
brw_set_dest(p, insn, dest); /* UW? */
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf);
} else {
brw_set_src0(p, insn, brw_null_reg());
uint32_t offset,
uint32_t bind_table_index)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
/* On newer hardware, offset is in units of owords. */
- if (intel->gen >= 6)
+ if (brw->gen >= 6)
offset /= 16;
mrf = retype(mrf, BRW_REGISTER_TYPE_UD);
dest = retype(vec8(dest), BRW_REGISTER_TYPE_UW);
brw_set_dest(p, insn, dest);
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
brw_set_src0(p, insn, mrf);
} else {
brw_set_src0(p, insn, brw_null_reg());
bool eot,
bool header_present)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
GLuint msg_type;
struct brw_reg dest;
else
dest = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
insn = next_insn(p, BRW_OPCODE_SENDC);
} else {
insn = next_insn(p, BRW_OPCODE_SEND);
insn->header.predicate_control = 0;
insn->header.compression_control = BRW_COMPRESSION_NONE;
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
/* headerless version, just submit color payload */
src0 = brw_message_reg(msg_reg_nr);
GLuint simd_mode,
GLuint return_format)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr);
insn = next_insn(p, BRW_OPCODE_SEND);
insn->header.predicate_control = 0; /* XXX */
insn->header.compression_control = BRW_COMPRESSION_NONE;
- if (intel->gen < 6)
+ if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_dest(p, insn, dest);
struct brw_reg dest,
GLuint msg_reg_nr,
struct brw_reg src0,
- bool allocate,
- bool used,
+ enum brw_urb_write_flags flags,
GLuint msg_length,
GLuint response_length,
- bool eot,
- bool writes_complete,
GLuint offset,
GLuint swizzle)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr);
- if (intel->gen == 7) {
+ if (brw->gen == 7) {
/* Enable Channel Masks in the URB_WRITE_HWORD message header */
brw_push_insn_state(p);
brw_set_access_mode(p, BRW_ALIGN_1);
+ brw_set_mask_control(p, BRW_MASK_DISABLE);
brw_OR(p, retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5),
BRW_REGISTER_TYPE_UD),
retype(brw_vec1_grf(0, 5), BRW_REGISTER_TYPE_UD),
brw_set_src0(p, insn, src0);
brw_set_src1(p, insn, brw_imm_d(0));
- if (intel->gen < 6)
+ if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_urb_message(p,
insn,
- allocate,
- used,
+ flags,
msg_length,
response_length,
- eot,
- writes_complete,
offset,
swizzle);
}
static int
brw_find_loop_end(struct brw_compile *p, int start)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
int ip;
int scale = 8;
void *store = p->store;
struct brw_instruction *insn = store + ip;
if (insn->header.opcode == BRW_OPCODE_WHILE) {
- int jip = intel->gen == 6 ? insn->bits1.branch_gen6.jump_count
+ int jip = brw->gen == 6 ? insn->bits1.branch_gen6.jump_count
: insn->bits3.break_cont.jip;
if (ip + jip * scale <= start)
return ip;
void
brw_set_uip_jip(struct brw_compile *p)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
int ip;
int scale = 8;
void *store = p->store;
- if (intel->gen < 6)
+ if (brw->gen < 6)
return;
for (ip = 0; ip < p->next_insn_offset; ip = next_ip(p, ip)) {
/* Gen7 UIP points to WHILE; Gen6 points just after it */
insn->bits3.break_cont.uip =
(brw_find_loop_end(p, ip) - ip +
- (intel->gen == 6 ? 16 : 0)) / scale;
+ (brw->gen == 6 ? 16 : 0)) / scale;
break;
case BRW_OPCODE_CONTINUE:
assert(block_end_ip != 0);
GLuint response_length,
bool eot)
{
- struct intel_context *intel = &p->brw->intel;
+ struct brw_context *brw = p->brw;
struct brw_instruction *insn;
gen6_resolve_implied_move(p, &src0, msg_reg_nr);
brw_set_src0(p, insn, src0);
brw_set_src1(p, insn, brw_imm_d(0));
- if (intel->gen < 6)
+ if (brw->gen < 6)
insn->header.destreg__conditionalmod = msg_reg_nr;
brw_set_ff_sync_message(p,
* messages.
*/
void brw_shader_time_add(struct brw_compile *p,
- int base_mrf,
+ struct brw_reg payload,
uint32_t surf_index)
{
- struct intel_context *intel = &p->brw->intel;
- assert(intel->gen >= 7);
+ struct brw_context *brw = p->brw;
+ assert(brw->gen >= 7);
brw_push_insn_state(p);
brw_set_access_mode(p, BRW_ALIGN_1);
*/
brw_set_dest(p, send, brw_vec1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
BRW_ARF_NULL, 0));
- brw_set_src0(p, send, brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE,
- base_mrf, 0));
+ brw_set_src0(p, send, brw_vec1_reg(payload.file,
+ payload.nr, 0));
+
+ uint32_t sfid, msg_type;
+ if (brw->is_haswell) {
+ sfid = HSW_SFID_DATAPORT_DATA_CACHE_1;
+ msg_type = HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP;
+ } else {
+ sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
+ msg_type = GEN7_DATAPORT_DC_UNTYPED_ATOMIC_OP;
+ }
bool header_present = false;
bool eot = false;
uint32_t mlen = 2; /* offset, value */
uint32_t rlen = 0;
- brw_set_message_descriptor(p, send,
- GEN7_SFID_DATAPORT_DATA_CACHE,
- mlen, rlen, header_present, eot);
+ brw_set_message_descriptor(p, send, sfid, mlen, rlen, header_present, eot);
- send->bits3.ud |= 6 << 14; /* untyped atomic op */
+ send->bits3.ud |= msg_type << 14;
send->bits3.ud |= 0 << 13; /* no return data */
send->bits3.ud |= 1 << 12; /* SIMD8 mode */
send->bits3.ud |= BRW_AOP_ADD << 8;