* IN THE SOFTWARE.
*/
-#include "glsl/glsl_parser_extras.h"
#include "brw_vec4.h"
#include "brw_cfg.h"
#include "brw_eu.h"
brw_set_default_mask_control(p, BRW_MASK_DISABLE);
brw_set_default_access_mode(p, BRW_ALIGN_1);
- if (memcmp(&surface_reg, &sampler_reg, sizeof(surface_reg)) == 0) {
+ if (brw_regs_equal(&surface_reg, &sampler_reg)) {
brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
} else {
brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
bool final_write = inst->sol_final_write;
brw_push_insn_state(p);
+ brw_set_default_exec_size(p, BRW_EXECUTE_4);
/* Copy Vertex data into M0.x */
brw_MOV(p, stride(dst, 4, 4, 1),
stride(retype(src0, BRW_REGISTER_TYPE_UD), 4, 4, 1));
+ brw_pop_insn_state(p);
+ brw_push_insn_state(p);
/* Send SVB Write */
brw_svb_write(p,
final_write ? src1 : brw_null_reg(), /* dest == src1 */
brw_MOV(p, get_element_ud(header, 0), get_element_ud(dst, 0));
/* src1 is not an immediate when we use transform feedback */
- if (src1.file != BRW_IMMEDIATE_VALUE)
+ if (src1.file != BRW_IMMEDIATE_VALUE) {
+ brw_set_default_exec_size(p, BRW_EXECUTE_4);
brw_MOV(p, brw_vec4_grf(src1.nr, 0), brw_vec4_grf(dst.nr, 1));
+ }
brw_pop_insn_state(p);
}
static void
generate_tcs_get_instance_id(struct brw_codegen *p, struct brw_reg dst)
{
+ const struct brw_device_info *devinfo = p->devinfo;
+ const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
+
/* "Instance Count" comes as part of the payload in r0.2 bits 23:17.
*
* Since we operate in SIMD4x2 mode, we need run half as many threads
brw_push_insn_state(p);
brw_set_default_access_mode(p, BRW_ALIGN_1);
- const int mask = INTEL_MASK(23, 17);
- const int shift = 17;
+ const int mask = ivb ? INTEL_MASK(22, 16) : INTEL_MASK(23, 17);
+ const int shift = ivb ? 16 : 17;
brw_AND(p, get_element_ud(dst, 0), get_element_ud(r0, 2), brw_imm_ud(mask));
brw_SHR(p, get_element_ud(dst, 0), get_element_ud(dst, 0),
true /* header */, false /* eot */);
brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_WRITE_OWORD);
brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
- brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
- brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
+ if (inst->urb_write_flags & BRW_URB_WRITE_EOT) {
+ brw_inst_set_eot(devinfo, send, 1);
+ } else {
+ brw_inst_set_urb_per_slot_offset(devinfo, send, 1);
+ brw_inst_set_urb_swizzle_control(devinfo, send, BRW_URB_SWIZZLE_INTERLEAVE);
+ }
/* what happens to swizzles? */
}
brw_pop_insn_state(p);
}
+static void
+generate_tes_create_input_read_header(struct brw_codegen *p,
+ struct brw_reg dst)
+{
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+
+ /* Initialize the register to 0 */
+ brw_MOV(p, dst, brw_imm_ud(0));
+
+ /* Enable all the channels in m0.5 bits 15:8 */
+ brw_MOV(p, get_element_ud(dst, 5), brw_imm_ud(0xff00));
+
+ /* Copy g1.3 (the patch URB handle) to m0.0 and m0.1. For safety,
+ * mask out irrelevant "Reserved" bits, as they're not marked MBZ.
+ */
+ brw_AND(p, vec2(get_element_ud(dst, 0)),
+ retype(brw_vec1_grf(1, 3), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(0x1fff));
+ brw_pop_insn_state(p);
+}
+
+static void
+generate_tes_add_indirect_urb_offset(struct brw_codegen *p,
+ struct brw_reg dst,
+ struct brw_reg header,
+ struct brw_reg offset)
+{
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+
+ brw_MOV(p, dst, header);
+ /* m0.3-0.4: 128-bit-granular offsets into the URB from the handles */
+ brw_MOV(p, vec2(get_element_ud(dst, 3)), stride(offset, 4, 1, 0));
+
+ brw_pop_insn_state(p);
+}
+
static void
generate_vec4_urb_read(struct brw_codegen *p,
vec4_instruction *inst,
brw_inst_set_urb_global_offset(devinfo, send, inst->offset);
}
+static void
+generate_tcs_release_input(struct brw_codegen *p,
+ struct brw_reg header,
+ struct brw_reg vertex,
+ struct brw_reg is_unpaired)
+{
+ const struct brw_device_info *devinfo = p->devinfo;
+
+ assert(vertex.file == BRW_IMMEDIATE_VALUE);
+ assert(vertex.type == BRW_REGISTER_TYPE_UD);
+
+ /* m0.0-0.1: URB handles */
+ struct brw_reg urb_handles =
+ retype(brw_vec2_grf(1 + (vertex.ud >> 3), vertex.ud & 7),
+ BRW_REGISTER_TYPE_UD);
+
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+ brw_MOV(p, header, brw_imm_ud(0));
+ brw_MOV(p, vec2(get_element_ud(header, 0)), urb_handles);
+ brw_pop_insn_state(p);
+
+ brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
+ brw_set_dest(p, send, brw_null_reg());
+ brw_set_src0(p, send, header);
+ brw_set_message_descriptor(p, send, BRW_SFID_URB,
+ 1 /* mlen */, 0 /* rlen */,
+ true /* header */, false /* eot */);
+ brw_inst_set_urb_opcode(devinfo, send, BRW_URB_OPCODE_READ_OWORD);
+ brw_inst_set_urb_complete(devinfo, send, 1);
+ brw_inst_set_urb_swizzle_control(devinfo, send, is_unpaired.ud ?
+ BRW_URB_SWIZZLE_NONE :
+ BRW_URB_SWIZZLE_INTERLEAVE);
+}
+
+static void
+generate_tcs_thread_end(struct brw_codegen *p, vec4_instruction *inst)
+{
+ struct brw_reg header = brw_message_reg(inst->base_mrf);
+
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+ brw_MOV(p, header, brw_imm_ud(0));
+ brw_MOV(p, get_element_ud(header, 5), brw_imm_ud(WRITEMASK_X << 8));
+ brw_MOV(p, get_element_ud(header, 0),
+ retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ brw_MOV(p, brw_message_reg(inst->base_mrf + 1), brw_imm_ud(0u));
+ brw_pop_insn_state(p);
+
+ brw_urb_WRITE(p,
+ brw_null_reg(), /* dest */
+ inst->base_mrf, /* starting mrf reg nr */
+ header,
+ BRW_URB_WRITE_EOT | BRW_URB_WRITE_OWORD |
+ BRW_URB_WRITE_USE_CHANNEL_MASKS,
+ inst->mlen,
+ 0, /* response len */
+ 0, /* urb destination offset */
+ 0);
+}
+
+static void
+generate_tes_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
+{
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_MOV(p, dst, retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_D));
+ brw_pop_insn_state(p);
+}
+
static void
generate_tcs_get_primitive_id(struct brw_codegen *p, struct brw_reg dst)
{
struct brw_vue_prog_data *prog_data,
struct brw_reg dst)
{
+ const struct brw_device_info *devinfo = p->devinfo;
+ const bool ivb = devinfo->is_ivybridge || devinfo->is_baytrail;
struct brw_reg m0_2 = get_element_ud(dst, 2);
unsigned instances = ((struct brw_tcs_prog_data *) prog_data)->instances;
/* Zero the message header */
brw_MOV(p, retype(dst, BRW_REGISTER_TYPE_UD), brw_imm_ud(0u));
- /* Copy "Barrier ID" from DW0 bits 16:13 */
+ /* Copy "Barrier ID" from r0.2, bits 16:13 (Gen7.5+) or 15:12 (Gen7) */
brw_AND(p, m0_2,
retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
- brw_imm_ud(0x1e000));
+ brw_imm_ud(ivb ? INTEL_MASK(15, 12) : INTEL_MASK(16, 13)));
- /* Shift it into place */
- brw_SHL(p, m0_2, get_element_ud(dst, 2), brw_imm_ud(11));
+ /* Shift it up to bits 27:24. */
+ brw_SHL(p, m0_2, get_element_ud(dst, 2), brw_imm_ud(ivb ? 12 : 11));
/* Set the Barrier Count and the enable bit */
brw_OR(p, m0_2, m0_2, brw_imm_ud(instances << 9 | (1 << 15)));
brw_pop_insn_state(p);
}
+static void
+generate_mov_indirect(struct brw_codegen *p,
+ vec4_instruction *inst,
+ struct brw_reg dst, struct brw_reg reg,
+ struct brw_reg indirect, struct brw_reg length)
+{
+ assert(indirect.type == BRW_REGISTER_TYPE_UD);
+ assert(p->devinfo->gen >= 6);
+
+ unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr * (REG_SIZE / 2);
+
+ /* This instruction acts in align1 mode */
+ assert(dst.writemask == WRITEMASK_XYZW);
+
+ if (indirect.file == BRW_IMMEDIATE_VALUE) {
+ imm_byte_offset += indirect.ud;
+
+ reg.nr = imm_byte_offset / REG_SIZE;
+ reg.subnr = (imm_byte_offset / (REG_SIZE / 2)) % 2;
+ unsigned shift = (imm_byte_offset / 4) % 4;
+ reg.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift);
+
+ brw_MOV(p, dst, reg);
+ } else {
+ brw_push_insn_state(p);
+ brw_set_default_access_mode(p, BRW_ALIGN_1);
+ brw_set_default_mask_control(p, BRW_MASK_DISABLE);
+
+ struct brw_reg addr = vec8(brw_address_reg(0));
+
+ /* We need to move the indirect value into the address register. In
+ * order to make things make some sense, we want to respect at least the
+ * X component of the swizzle. In order to do that, we need to convert
+ * the subnr (probably 0) to an align1 subnr and add in the swizzle.
+ */
+ assert(brw_is_single_value_swizzle(indirect.swizzle));
+ indirect.subnr = (indirect.subnr * 4 + BRW_GET_SWZ(indirect.swizzle, 0));
+
+ /* We then use a region of <8,4,0>:uw to pick off the first 2 bytes of
+ * the indirect and splat it out to all four channels of the given half
+ * of a0.
+ */
+ indirect.subnr *= 2;
+ indirect = stride(retype(indirect, BRW_REGISTER_TYPE_UW), 8, 4, 0);
+ brw_ADD(p, addr, indirect, brw_imm_uw(imm_byte_offset));
+
+ /* Now we need to incorporate the swizzle from the source register */
+ if (reg.swizzle != BRW_SWIZZLE_XXXX) {
+ uint32_t uv_swiz = BRW_GET_SWZ(reg.swizzle, 0) << 2 |
+ BRW_GET_SWZ(reg.swizzle, 1) << 6 |
+ BRW_GET_SWZ(reg.swizzle, 2) << 10 |
+ BRW_GET_SWZ(reg.swizzle, 3) << 14;
+ uv_swiz |= uv_swiz << 16;
+
+ brw_ADD(p, addr, addr, brw_imm_uv(uv_swiz));
+ }
+
+ brw_MOV(p, dst, retype(brw_VxH_indirect(0, 0), reg.type));
+
+ brw_pop_insn_state(p);
+ }
+}
+
static void
generate_code(struct brw_codegen *p,
const struct brw_compiler *compiler,
assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
unsigned pre_emit_nr_insn = p->nr_insn;
+ bool fix_exec_size = false;
if (dst.width == BRW_WIDTH_4) {
/* This happens in attribute fixups for "dual instanced" geometry
if (src[i].file == BRW_GENERAL_REGISTER_FILE)
src[i] = stride(src[i], 4, 4, 1);
}
+ brw_set_default_exec_size(p, BRW_EXECUTE_4);
+ fix_exec_size = true;
}
switch (inst->opcode) {
generate_tcs_create_barrier_header(p, prog_data, dst);
break;
+ case TES_OPCODE_CREATE_INPUT_READ_HEADER:
+ generate_tes_create_input_read_header(p, dst);
+ break;
+
+ case TES_OPCODE_ADD_INDIRECT_URB_OFFSET:
+ generate_tes_add_indirect_urb_offset(p, dst, src[0], src[1]);
+ break;
+
+ case TES_OPCODE_GET_PRIMITIVE_ID:
+ generate_tes_get_primitive_id(p, dst);
+ break;
+
+ case TCS_OPCODE_SRC0_010_IS_ZERO:
+ /* If src_reg had stride like fs_reg, we wouldn't need this. */
+ brw_MOV(p, brw_null_reg(), stride(src[0], 0, 1, 0));
+ break;
+
+ case TCS_OPCODE_RELEASE_INPUT:
+ generate_tcs_release_input(p, dst, src[0], src[1]);
+ break;
+
+ case TCS_OPCODE_THREAD_END:
+ generate_tcs_thread_end(p, inst);
+ break;
+
case SHADER_OPCODE_BARRIER:
brw_barrier(p, src[0]);
brw_WAIT(p);
break;
+ case SHADER_OPCODE_MOV_INDIRECT:
+ generate_mov_indirect(p, inst, dst, src[0], src[1], src[2]);
+ break;
+
default:
unreachable("Unsupported opcode");
}
+ if (fix_exec_size)
+ brw_set_default_exec_size(p, BRW_EXECUTE_8);
+
if (inst->opcode == VEC4_OPCODE_PACK_BYTES) {
/* Handled dependency hints in the generator. */
compiler->shader_debug_log(log_data,
"%s vec4 shader: %d inst, %d loops, %u cycles, "
- "compacted %d to %d bytes.\n",
+ "compacted %d to %d bytes.",
stage_abbrev, before_size / 16,
loop_count, cfg->cycle_count,
before_size, after_size);