}
static struct brw_reg
-brw_reg_from_fs_reg(const struct brw_codegen *p,
- fs_inst *inst, fs_reg *reg, unsigned gen)
+brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen, bool compressed)
{
+ assert(reg->reg_offset == 0);
struct brw_reg brw_reg;
switch (reg->file) {
case VGRF:
if (reg->stride == 0) {
brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
- } else if (!p->compressed &&
- inst->exec_size * reg->stride * type_sz(reg->type) <= 32) {
- brw_reg = brw_vecn_reg(inst->exec_size, brw_file_from_reg(reg),
- reg->nr, 0);
- brw_reg = stride(brw_reg, inst->exec_size * reg->stride,
- inst->exec_size, reg->stride);
} else {
/* From the Haswell PRM:
*
- * VertStride must be used to cross GRF register boundaries. This
- * rule implies that elements within a 'Width' cannot cross GRF
- * boundaries.
+ * "VertStride must be used to cross GRF register boundaries. This
+ * rule implies that elements within a 'Width' cannot cross GRF
+ * boundaries."
*
- * So, for registers that are large enough, we have to split the exec
- * size in two and trust the compression state to sort it out.
+ * The maximum width value that could satisfy this restriction is:
*/
- assert(inst->exec_size / 2 * reg->stride * type_sz(reg->type) <= 32);
- brw_reg = brw_vecn_reg(inst->exec_size / 2, brw_file_from_reg(reg),
- reg->nr, 0);
- brw_reg = stride(brw_reg, inst->exec_size / 2 * reg->stride,
- inst->exec_size / 2, reg->stride);
+ const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type));
+
+ /* Because the hardware can only split source regions at a whole
+ * multiple of width during decompression (i.e. vertically), clamp
+ * the value obtained above to the physical execution size of a
+ * single decompressed chunk of the instruction:
+ */
+ const unsigned phys_width = compressed ? inst->exec_size / 2 :
+ inst->exec_size;
+
+ /* XXX - The equation above is strictly speaking not correct on
+ * hardware that supports unbalanced GRF writes -- On Gen9+
+ * each decompressed chunk of the instruction may have a
+ * different execution size when the number of components
+ * written to each destination GRF is not the same.
+ */
+ const unsigned width = MIN2(reg_width, phys_width);
+ brw_reg = brw_vecn_reg(width, brw_file_from_reg(reg), reg->nr, 0);
+ brw_reg = stride(brw_reg, width * reg->stride, width, reg->stride);
}
brw_reg = retype(brw_reg, reg->type);
case ARF:
case FIXED_GRF:
case IMM:
+ assert(reg->subreg_offset == 0);
brw_reg = reg->as_brw_reg();
break;
case BAD_FILE:
if (inst->opcode == FS_OPCODE_REP_FB_WRITE)
msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
else if (prog_data->dual_src_blend) {
- if (!inst->force_sechalf)
+ if (!inst->group)
msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
else
msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
brw_fb_WRITE(p,
- dispatch_width,
payload,
implied_header,
msg_control,
}
}
+void
+fs_generator::generate_fb_read(fs_inst *inst, struct brw_reg dst,
+ struct brw_reg payload)
+{
+ brw_wm_prog_data *prog_data =
+ reinterpret_cast<brw_wm_prog_data *>(this->prog_data);
+ const unsigned surf_index =
+ prog_data->binding_table.render_target_start + inst->target;
+
+ gen9_fb_READ(p, dst, payload, surf_index,
+ inst->header_size, inst->regs_written,
+ prog_data->persample_dispatch);
+
+ brw_mark_surface_used(&prog_data->base, surf_index);
+}
+
void
fs_generator::generate_mov_indirect(fs_inst *inst,
struct brw_reg dst,
assert(header.type == BRW_REGISTER_TYPE_UD);
brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
- brw_set_dest(p, send, dst);
+ brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
brw_set_src0(p, send, header);
brw_set_src1(p, send, brw_imm_ud(0u));
* See also: emit_interpolation_setup_gen4().
*/
struct brw_reg delta_x = src[0];
- struct brw_reg delta_y = offset(src[0], dispatch_width / 8);
+ struct brw_reg delta_y = offset(src[0], inst->exec_size / 8);
struct brw_reg interp = src[1];
if (devinfo->has_pln &&
* current block (or the program).
*/
this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
-
- brw_push_insn_state(p);
- brw_set_default_mask_control(p, BRW_MASK_DISABLE);
gen6_HALT(p);
- brw_pop_insn_state(p);
}
void
fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
{
+ /* The 32-wide messages only respect the first 16-wide half of the channel
+ * enable signals which are replicated identically for the second group of
+ * 16 channels, so we cannot use them unless the write is marked
+ * force_writemask_all.
+ */
+ const unsigned lower_size = inst->force_writemask_all ? inst->exec_size :
+ MIN2(16, inst->exec_size);
+ const unsigned block_size = 4 * lower_size / REG_SIZE;
assert(inst->mlen != 0);
- brw_MOV(p,
- brw_uvec_mrf(inst->exec_size, (inst->base_mrf + 1), 0),
- retype(src, BRW_REGISTER_TYPE_UD));
- brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
- inst->exec_size / 8, inst->offset);
+ brw_push_insn_state(p);
+ brw_set_default_exec_size(p, cvt(lower_size) - 1);
+ brw_set_default_compression(p, lower_size > 8);
+
+ for (unsigned i = 0; i < inst->exec_size / lower_size; i++) {
+ brw_set_default_group(p, inst->group + lower_size * i);
+
+ brw_MOV(p, brw_uvec_mrf(lower_size, inst->base_mrf + 1, 0),
+ retype(offset(src, block_size * i), BRW_REGISTER_TYPE_UD));
+
+ brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
+ block_size,
+ inst->offset + block_size * REG_SIZE * i);
+ }
+
+ brw_pop_insn_state(p);
}
void
fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
{
+ assert(inst->exec_size <= 16 || inst->force_writemask_all);
assert(inst->mlen != 0);
brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
void
fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
{
+ assert(inst->exec_size <= 16 || inst->force_writemask_all);
+
gen7_block_read_scratch(p, dst, inst->exec_size / 8, inst->offset);
}
brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
brw_pop_insn_state(p);
- brw_set_dest(p, send, dst);
+ brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UD));
brw_set_src0(p, send, src);
brw_set_sampler_message(p, send,
surf_index,
uint32_t surf_index = index.ud;
uint32_t simd_mode, rlen, msg_type;
- if (dispatch_width == 16) {
+ if (inst->exec_size == 16) {
simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
rlen = 8;
} else {
+ assert(inst->exec_size == 8);
simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
rlen = 4;
}
assert(index.type == BRW_REGISTER_TYPE_UD);
uint32_t simd_mode, rlen, mlen;
- if (dispatch_width == 16) {
+ if (inst->exec_size == 16) {
mlen = 2;
rlen = 8;
simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
} else {
+ assert(inst->exec_size == 8);
mlen = 1;
rlen = 4;
simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
src0.type == BRW_REGISTER_TYPE_UD);
struct brw_reg reg = stride(src1, 1, 4, 0);
- if (devinfo->gen >= 8 || dispatch_width == 8) {
+ if (devinfo->gen >= 8 || inst->exec_size == 8) {
brw_ADD(p, dst, src0, reg);
- } else if (dispatch_width == 16) {
+ } else if (inst->exec_size == 16) {
brw_push_insn_state(p);
brw_set_default_exec_size(p, BRW_EXECUTE_8);
brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
* type and regioning so the instruction is considered compressed
* or not accordingly.
*/
- p->compressed = inst->dst.component_size(inst->exec_size) > REG_SIZE;
- brw_set_default_compression(p, p->compressed);
- brw_set_default_group(p, inst->force_sechalf ? 8 : 0);
+ const bool compressed =
+ inst->dst.component_size(inst->exec_size) > REG_SIZE;
+ brw_set_default_compression(p, compressed);
+ brw_set_default_group(p, inst->group);
for (unsigned int i = 0; i < inst->sources; i++) {
- src[i] = brw_reg_from_fs_reg(p, inst, &inst->src[i], devinfo->gen);
+ src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen,
+ compressed);
/* The accumulator result appears to get used for the
* conditional modifier generation. When negating a UD
inst->src[i].type != BRW_REGISTER_TYPE_UD ||
!inst->src[i].negate);
}
- dst = brw_reg_from_fs_reg(p, inst, &inst->dst, devinfo->gen);
+ dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen, compressed);
brw_set_default_access_mode(p, BRW_ALIGN_1);
brw_set_default_predicate_control(p, inst->predicate);
brw_set_default_acc_write_control(p, inst->writes_accumulator);
brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
- assert(inst->force_writemask_all || inst->exec_size >= 8);
+ assert(inst->force_writemask_all || inst->exec_size >= 4);
+ assert(inst->force_writemask_all || inst->group % inst->exec_size == 0);
assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
/* FBL only supports UD type for dst. */
brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
break;
+ case BRW_OPCODE_LZD:
+ brw_LZD(p, dst, src[0]);
+ break;
case BRW_OPCODE_CBIT:
assert(devinfo->gen >= 7);
/* CBIT only supports UD type for dst. */
assert(devinfo->gen == 6);
gen6_IF(p, inst->conditional_mod, src[0], src[1]);
} else {
- brw_IF(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
+ brw_IF(p, brw_inst_exec_size(devinfo, p->current));
}
break;
break;
case BRW_OPCODE_DO:
- brw_DO(p, dispatch_width == 16 ? BRW_EXECUTE_16 : BRW_EXECUTE_8);
+ brw_DO(p, brw_inst_exec_size(devinfo, p->current));
break;
case BRW_OPCODE_BREAK:
brw_BREAK(p);
- brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
break;
case BRW_OPCODE_CONTINUE:
brw_CONT(p);
- brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
break;
case BRW_OPCODE_WHILE:
generate_fb_write(inst, src[0]);
break;
+ case FS_OPCODE_FB_READ:
+ generate_fb_read(inst, dst, src[0]);
+ break;
+
case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
generate_mov_dispatch_to_flags(inst);
break;
break;
case SHADER_OPCODE_BROADCAST:
+ assert(inst->force_writemask_all);
brw_broadcast(p, dst, src[0], src[1]);
break;
}
break;
- case FS_OPCODE_INTERPOLATE_AT_CENTROID:
- generate_pixel_interpolator_query(inst, dst, src[0], src[1],
- GEN7_PIXEL_INTERPOLATOR_LOC_CENTROID);
- break;
-
case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
generate_pixel_interpolator_query(inst, dst, src[0], src[1],
GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE);
generate_barrier(inst, src[0]);
break;
+ case BRW_OPCODE_DIM:
+ assert(devinfo->is_haswell);
+ assert(src[0].type == BRW_REGISTER_TYPE_DF);
+ assert(dst.type == BRW_REGISTER_TYPE_DF);
+ brw_DIM(p, dst, retype(src[0], BRW_REGISTER_TYPE_F));
+ break;
+
default:
unreachable("Unsupported opcode");
}
}
- brw_set_uip_jip(p);
+ brw_set_uip_jip(p, start_offset);
annotation_finalize(&annotation, p->next_insn_offset);
#ifndef NDEBUG