vec4_result, surf_index, vec4_offset);
inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
- fs_reg dw = offset(vec4_result, bld, (const_offset & 0xf) / 4);
- switch (type_sz(dst.type)) {
- case 2:
- shuffle_32bit_load_result_to_16bit_data(bld, dst, dw, 1);
- bld.MOV(dst, subscript(dw, dst.type, (const_offset / 2) & 1));
- break;
- case 4:
- bld.MOV(dst, retype(dw, dst.type));
- break;
- case 8:
- shuffle_32bit_load_result_to_64bit_data(bld, dst, dw, 1);
- break;
- default:
- unreachable("Unsupported bit_size");
- }
+ shuffle_from_32bit_read(bld, dst, vec4_result,
+ (const_offset & 0xf) / type_sz(dst.type), 1);
}
/**
case FS_OPCODE_PACK_HALF_2x16_SPLIT:
/* Multiple partial writes to the destination */
return true;
+ case SHADER_OPCODE_SHUFFLE:
+ /* This instruction returns an arbitrary channel from the source and
+ * gets split into smaller instructions in the generator. It's possible
+ * that one of the instructions will read from a channel corresponding
+ * to an earlier instruction.
+ */
+ case SHADER_OPCODE_SEL_EXEC:
+ /* This is implemented as
+ *
+ * mov(16) g4<1>D 0D { align1 WE_all 1H };
+ * mov(16) g4<1>D g5<8,8,1>D { align1 1H }
+ *
+ * Because the source is only read in the second instruction, the first
+ * may stomp all over it.
+ */
+ return true;
default:
/* The SIMD16 compressed instruction
*
stride == r.stride);
}
+bool
+fs_reg::negative_equals(const fs_reg &r) const
+{
+ return (this->backend_reg::negative_equals(r) &&
+ stride == r.stride);
+}
+
bool
fs_reg::is_contiguous() const
{
case GLSL_TYPE_INT16:
case GLSL_TYPE_FLOAT16:
return DIV_ROUND_UP(type->components(), 2);
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return DIV_ROUND_UP(type->components(), 4);
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT64:
else
return 1;
}
+ case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
+ return (i == 0 ? 2 : 1);
default:
return 1;
{
switch (opcode) {
case FS_OPCODE_FB_WRITE:
+ case FS_OPCODE_REP_FB_WRITE:
+ if (arg == 0) {
+ if (base_mrf >= 0)
+ return src[0].file == BAD_FILE ? 0 : 2 * REG_SIZE;
+ else
+ return mlen * REG_SIZE;
+ }
+ break;
+
case FS_OPCODE_FB_READ:
case SHADER_OPCODE_URB_WRITE_SIMD8:
case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
case SHADER_OPCODE_TYPED_ATOMIC:
case SHADER_OPCODE_TYPED_SURFACE_READ:
case SHADER_OPCODE_TYPED_SURFACE_WRITE:
- case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
+ case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
+ case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
case SHADER_OPCODE_BYTE_SCATTERED_READ:
if (arg == 0)
return mlen * REG_SIZE;
break;
+ case FS_OPCODE_SET_SAMPLE_ID:
+ if (arg == 1)
+ return 1;
+ break;
+
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
/* The payload is actually stored in src1 */
if (arg == 1)
fs_inst::flags_written() const
{
if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
+ opcode != BRW_OPCODE_CSEL &&
opcode != BRW_OPCODE_IF &&
opcode != BRW_OPCODE_WHILE)) ||
- opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
+ opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL ||
+ opcode == FS_OPCODE_FB_WRITE) {
return flag_mask(this);
} else {
return flag_mask(dst, size_written);
case SHADER_OPCODE_SAMPLEINFO:
return 1;
case FS_OPCODE_FB_WRITE:
- return 2;
+ case FS_OPCODE_REP_FB_WRITE:
+ return inst->src[0].file == BAD_FILE ? 0 : 2;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
/* gl_FragCoord.z */
if (devinfo->gen >= 6) {
- bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
+ bld.MOV(wpos, fetch_payload_reg(bld, payload.source_depth_reg));
} else {
bld.emit(FS_OPCODE_LINTERP, wpos,
- this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
- interp_reg(VARYING_SLOT_POS, 2));
+ this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
+ component(interp_reg(VARYING_SLOT_POS, 2), 0));
}
wpos = offset(wpos, bld, 1);
* The X, Y sample positions come in as bytes in thread payload. So, read
* the positions using vstride=16, width=8, hstride=2.
*/
- struct brw_reg sample_pos_reg =
- stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
- BRW_REGISTER_TYPE_B), 16, 8, 2);
+ const fs_reg sample_pos_reg =
+ fetch_payload_reg(abld, payload.sample_pos_reg, BRW_REGISTER_TYPE_W);
- if (dispatch_width == 8) {
- abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
- } else {
- abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
- abld.half(1).MOV(half(int_sample_x, 1),
- fs_reg(suboffset(sample_pos_reg, 16)));
- }
/* Compute gl_SamplePosition.x */
- compute_sample_position(pos, int_sample_x);
- pos = offset(pos, abld, 1);
- if (dispatch_width == 8) {
- abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
- } else {
- abld.half(0).MOV(half(int_sample_y, 0),
- fs_reg(suboffset(sample_pos_reg, 1)));
- abld.half(1).MOV(half(int_sample_y, 1),
- fs_reg(suboffset(sample_pos_reg, 17)));
- }
+ abld.MOV(int_sample_x, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 0));
+ compute_sample_position(offset(pos, abld, 0), int_sample_x);
+
/* Compute gl_SamplePosition.y */
- compute_sample_position(pos, int_sample_y);
+ abld.MOV(int_sample_y, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 1));
+ compute_sample_position(offset(pos, abld, 1), int_sample_y);
return reg;
}
* TODO: These payload bits exist on Gen7 too, but they appear to always
* be zero, so this code fails to work. We should find out why.
*/
- fs_reg tmp(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UW);
+ const fs_reg tmp = abld.vgrf(BRW_REGISTER_TYPE_UW);
+
+ for (unsigned i = 0; i < DIV_ROUND_UP(dispatch_width, 16); i++) {
+ const fs_builder hbld = abld.group(MIN2(16, dispatch_width), i);
+ hbld.SHR(offset(tmp, hbld, i),
+ stride(retype(brw_vec1_grf(1 + i, 0), BRW_REGISTER_TYPE_UB),
+ 1, 8, 0),
+ brw_imm_v(0x44440000));
+ }
- abld.SHR(tmp, fs_reg(stride(retype(brw_vec1_grf(1, 0),
- BRW_REGISTER_TYPE_UB), 1, 8, 0)),
- brw_imm_v(0x44440000));
abld.AND(*reg, tmp, brw_imm_w(0xf));
} else {
- const fs_reg t1 = component(fs_reg(VGRF, alloc.allocate(1),
- BRW_REGISTER_TYPE_UD), 0);
- const fs_reg t2(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UW);
+ const fs_reg t1 = component(abld.vgrf(BRW_REGISTER_TYPE_UD), 0);
+ const fs_reg t2 = abld.vgrf(BRW_REGISTER_TYPE_UW);
/* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
* 8x multisampling, subspan 0 will represent sample N (where N
brw_imm_ud(0xc0));
abld.exec_all().group(1, 0).SHR(t1, t1, brw_imm_d(5));
- /* This works for both SIMD8 and SIMD16 */
- abld.exec_all().group(4, 0).MOV(t2, brw_imm_v(0x3210));
+ /* This works for SIMD8-SIMD16. It also works for SIMD32 but only if we
+ * can assume 4x MSAA. Disallow it on IVB+
+ *
+ * FINISHME: One day, we could come up with a way to do this that
+ * actually works on gen7.
+ */
+ if (devinfo->gen >= 7)
+ limit_dispatch_width(16, "gl_SampleId is unsupported in SIMD32 on gen7");
+ abld.exec_all().group(8, 0).MOV(t2, brw_imm_v(0x32103210));
/* This special instruction takes care of setting vstride=1,
* width=4, hstride=0 of t2 during an ADD instruction.
fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
- fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
- BRW_REGISTER_TYPE_D));
+ fs_reg coverage_mask =
+ fetch_payload_reg(bld, payload.sample_mask_in_reg, BRW_REGISTER_TYPE_D);
if (wm_prog_data->persample_dispatch) {
/* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
* setup regs, now that the location of the constants has been chosen.
*/
foreach_block_and_inst(block, fs_inst, inst, cfg) {
- if (inst->opcode == FS_OPCODE_LINTERP) {
- assert(inst->src[1].file == FIXED_GRF);
- inst->src[1].nr += urb_start;
- }
-
- if (inst->opcode == FS_OPCODE_CINTERP) {
- assert(inst->src[0].file == FIXED_GRF);
- inst->src[0].nr += urb_start;
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file == ATTR) {
+ /* ATTR regs in the FS are in units of logical scalar inputs each
+ * of which consumes half of a GRF register.
+ */
+ assert(inst->src[i].offset < REG_SIZE / 2);
+ const unsigned grf = urb_start + inst->src[i].nr / 2;
+ const unsigned offset = (inst->src[i].nr % 2) * (REG_SIZE / 2) +
+ inst->src[i].offset;
+ const unsigned width = inst->src[i].stride == 0 ?
+ 1 : MIN2(inst->exec_size, 8);
+ struct brw_reg reg = stride(
+ byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
+ offset),
+ width * inst->src[i].stride,
+ width, inst->src[i].stride);
+ reg.abs = inst->src[i].abs;
+ reg.negate = inst->src[i].negate;
+ inst->src[i] = reg;
+ }
}
}
static void
cplx_align_assert_sane(struct cplx_align a)
{
- assert(a.mul > 0 && util_is_power_of_two(a.mul));
+ assert(a.mul > 0 && util_is_power_of_two_nonzero(a.mul));
assert(a.offset < a.mul);
}
mark_uniform_slots_read(struct uniform_slot_info *slots,
unsigned num_slots, unsigned alignment)
{
- assert(alignment > 0 && util_is_power_of_two(alignment));
+ assert(alignment > 0 && util_is_power_of_two_nonzero(alignment));
assert(alignment <= CPLX_ALIGN_MAX_MUL);
/* We can't align a slot to anything less than the slot size */
break;
if (inst->saturate) {
- if (inst->dst.type != inst->src[0].type)
+ /* Full mixed-type saturates don't happen. However, we can end up
+ * with things like:
+ *
+ * mov.sat(8) g21<1>DF -1F
+ *
+ * Other mixed-size-but-same-base-type cases may also be possible.
+ */
+ if (inst->dst.type != inst->src[0].type &&
+ inst->dst.type != BRW_REGISTER_TYPE_DF &&
+ inst->src[0].type != BRW_REGISTER_TYPE_F)
assert(!"unimplemented: saturate mixed types");
- if (brw_saturate_immediate(inst->dst.type,
+ if (brw_saturate_immediate(inst->src[0].type,
&inst->src[0].as_brw_reg())) {
inst->saturate = false;
progress = true;
}
break;
case BRW_OPCODE_OR:
- if (inst->src[0].equals(inst->src[1])) {
+ if (inst->src[0].equals(inst->src[1]) ||
+ inst->src[1].is_zero()) {
inst->opcode = BRW_OPCODE_MOV;
inst->src[1] = reg_undef;
progress = true;
}
break;
+ case SHADER_OPCODE_SHUFFLE:
+ if (is_uniform(inst->src[0])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->sources = 1;
+ progress = true;
+ } else if (inst->src[1].file == IMM) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[0] = component(inst->src[0],
+ inst->src[1].ud);
+ inst->sources = 1;
+ progress = true;
+ }
+ break;
+
default:
break;
}
{
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
- if (stage != MESA_SHADER_FRAGMENT)
+ if (stage != MESA_SHADER_FRAGMENT || dispatch_width > 16)
return false;
if (devinfo->gen != 9 && !devinfo->is_cherryview)
return ((1 << n) - 1) << shift;
}
+bool
+fs_visitor::opt_peephole_csel()
+{
+ if (devinfo->gen < 8)
+ return false;
+
+ bool progress = false;
+
+ foreach_block_reverse(block, cfg) {
+ int ip = block->end_ip + 1;
+
+ foreach_inst_in_block_reverse_safe(fs_inst, inst, block) {
+ ip--;
+
+ if (inst->opcode != BRW_OPCODE_SEL ||
+ inst->predicate != BRW_PREDICATE_NORMAL ||
+ (inst->dst.type != BRW_REGISTER_TYPE_F &&
+ inst->dst.type != BRW_REGISTER_TYPE_D &&
+ inst->dst.type != BRW_REGISTER_TYPE_UD))
+ continue;
+
+ /* Because it is a 3-src instruction, CSEL cannot have an immediate
+ * value as a source, but we can sometimes handle zero.
+ */
+ if ((inst->src[0].file != VGRF && inst->src[0].file != ATTR &&
+ inst->src[0].file != UNIFORM) ||
+ (inst->src[1].file != VGRF && inst->src[1].file != ATTR &&
+ inst->src[1].file != UNIFORM && !inst->src[1].is_zero()))
+ continue;
+
+ foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
+ if (!scan_inst->flags_written())
+ continue;
+
+ if ((scan_inst->opcode != BRW_OPCODE_CMP &&
+ scan_inst->opcode != BRW_OPCODE_MOV) ||
+ scan_inst->predicate != BRW_PREDICATE_NONE ||
+ (scan_inst->src[0].file != VGRF &&
+ scan_inst->src[0].file != ATTR &&
+ scan_inst->src[0].file != UNIFORM) ||
+ scan_inst->src[0].type != BRW_REGISTER_TYPE_F)
+ break;
+
+ if (scan_inst->opcode == BRW_OPCODE_CMP && !scan_inst->src[1].is_zero())
+ break;
+
+ const brw::fs_builder ibld(this, block, inst);
+
+ const enum brw_conditional_mod cond =
+ inst->predicate_inverse
+ ? brw_negate_cmod(scan_inst->conditional_mod)
+ : scan_inst->conditional_mod;
+
+ fs_inst *csel_inst = NULL;
+
+ if (inst->src[1].file != IMM) {
+ csel_inst = ibld.CSEL(inst->dst,
+ inst->src[0],
+ inst->src[1],
+ scan_inst->src[0],
+ cond);
+ } else if (cond == BRW_CONDITIONAL_NZ) {
+ /* Consider the sequence
+ *
+ * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
+ * (+f0) sel g124<1>UD g2<8,8,1>UD 0x00000000UD
+ *
+ * The sel will pick the immediate value 0 if r0 is ±0.0.
+ * Therefore, this sequence is equivalent:
+ *
+ * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
+ * (+f0) sel g124<1>F g2<8,8,1>F (abs)g3<8,8,1>F
+ *
+ * The abs is ensures that the result is 0UD when g3 is -0.0F.
+ * By normal cmp-sel merging, this is also equivalent:
+ *
+ * csel.nz g124<1>F g2<4,4,1>F (abs)g3<4,4,1>F g3<4,4,1>F
+ */
+ csel_inst = ibld.CSEL(inst->dst,
+ inst->src[0],
+ scan_inst->src[0],
+ scan_inst->src[0],
+ cond);
+
+ csel_inst->src[1].abs = true;
+ }
+
+ if (csel_inst != NULL) {
+ progress = true;
+ inst->remove(block);
+ }
+
+ break;
+ }
+ }
+ }
+
+ return progress;
+}
+
bool
fs_visitor::compute_to_mrf()
{
write->mlen = 1;
} else {
assume(key->nr_color_regions > 0);
+
+ struct brw_reg header =
+ retype(brw_message_reg(base_mrf), BRW_REGISTER_TYPE_UD);
+ bld.exec_all().group(16, 0)
+ .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+
for (int i = 0; i < key->nr_color_regions; ++i) {
+ if (i > 0) {
+ bld.exec_all().group(1, 0)
+ .MOV(component(header, 2), brw_imm_ud(i));
+ }
+
write = bld.emit(FS_OPCODE_REP_FB_WRITE);
write->saturate = key->clamp_fragment_color;
write->base_mrf = base_mrf;
}
}
write->eot = true;
+ write->last_rt = true;
calculate_cfg();
inst->dst.type != BRW_REGISTER_TYPE_UD))
continue;
- /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
- * operation directly, but CHV/BXT cannot.
- */
- if (devinfo->gen >= 8 &&
- !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo))
+ if (devinfo->has_integer_dword_mul)
continue;
if (inst->src[1].file == IMM &&
int header_size = 2, payload_header_size;
unsigned length = 0;
- /* From the Sandy Bridge PRM, volume 4, page 198:
- *
- * "Dispatched Pixel Enables. One bit per pixel indicating
- * which pixels were originally enabled when the thread was
- * dispatched. This field is only required for the end-of-
- * thread message and on all dual-source messages."
- */
- if (devinfo->gen >= 6 &&
- (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
- color1.file == BAD_FILE &&
- key->nr_color_regions == 1) {
- header_size = 0;
- }
+ if (devinfo->gen < 6) {
+ /* TODO: Support SIMD32 on gen4-5 */
+ assert(bld.group() < 16);
+
+ /* For gen4-5, we always have a header consisting of g0 and g1. We have
+ * an implied MOV from g0,g1 to the start of the message. The MOV from
+ * g0 is handled by the hardware and the MOV from g1 is provided by the
+ * generator. This is required because, on gen4-5, the generator may
+ * generate two write messages with different message lengths in order
+ * to handle AA data properly.
+ *
+ * Also, since the pixel mask goes in the g0 portion of the message and
+ * since render target writes are the last thing in the shader, we write
+ * the pixel mask directly into g0 and it will get copied as part of the
+ * implied write.
+ */
+ if (prog_data->uses_kill) {
+ bld.exec_all().group(1, 0)
+ .MOV(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW),
+ brw_flag_reg(0, 1));
+ }
- if (header_size != 0) {
- assert(header_size == 2);
- /* Allocate 2 registers for a header */
- length += 2;
+ assert(length == 0);
+ length = 2;
+ } else if ((devinfo->gen <= 7 && !devinfo->is_haswell &&
+ prog_data->uses_kill) ||
+ color1.file != BAD_FILE ||
+ key->nr_color_regions > 1) {
+ /* From the Sandy Bridge PRM, volume 4, page 198:
+ *
+ * "Dispatched Pixel Enables. One bit per pixel indicating
+ * which pixels were originally enabled when the thread was
+ * dispatched. This field is only required for the end-of-
+ * thread message and on all dual-source messages."
+ */
+ const fs_builder ubld = bld.exec_all().group(8, 0);
+
+ fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
+ if (bld.group() < 16) {
+ /* The header starts off as g0 and g1 for the first half */
+ ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UD));
+ } else {
+ /* The header starts off as g0 and g2 for the second half */
+ assert(bld.group() < 32);
+ const fs_reg header_sources[2] = {
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
+ retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD),
+ };
+ ubld.LOAD_PAYLOAD(header, header_sources, 2, 0);
+ }
+
+ uint32_t g00_bits = 0;
+
+ /* Set "Source0 Alpha Present to RenderTarget" bit in message
+ * header.
+ */
+ if (inst->target > 0 && key->replicate_alpha)
+ g00_bits |= 1 << 11;
+
+ /* Set computes stencil to render target */
+ if (prog_data->computed_stencil)
+ g00_bits |= 1 << 14;
+
+ if (g00_bits) {
+ /* OR extra bits into g0.0 */
+ ubld.group(1, 0).OR(component(header, 0),
+ retype(brw_vec1_grf(0, 0),
+ BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(g00_bits));
+ }
+
+ /* Set the render target index for choosing BLEND_STATE. */
+ if (inst->target > 0) {
+ ubld.group(1, 0).MOV(component(header, 2), brw_imm_ud(inst->target));
+ }
+
+ if (prog_data->uses_kill) {
+ assert(bld.group() < 16);
+ ubld.group(1, 0).MOV(retype(component(header, 15),
+ BRW_REGISTER_TYPE_UW),
+ brw_flag_reg(0, 1));
+ }
+
+ assert(length == 0);
+ sources[0] = header;
+ sources[1] = horiz_offset(header, 8);
+ length = 2;
}
+ assert(length == 0 || length == 2);
+ header_size = length;
- if (payload.aa_dest_stencil_reg) {
+ if (payload.aa_dest_stencil_reg[0]) {
+ assert(inst->group < 16);
sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
.MOV(sources[length],
- fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
+ fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg[0], 0)));
length++;
}
bld.exec_all().annotate("FB write oMask")
.MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
- inst->group),
+ inst->group % 16),
sample_mask);
length++;
}
if (src_stencil.file != BAD_FILE) {
assert(devinfo->gen >= 9);
- assert(bld.dispatch_width() != 16);
+ assert(bld.dispatch_width() == 8);
/* XXX: src_stencil is only available on gen9+. dst_depth is never
* available on gen9+. As such it's impossible to have both enabled at the
if (devinfo->gen < 6 && bld.dispatch_width() == 16)
load->dst.nr |= BRW_MRF_COMPR4;
- inst->resize_sources(0);
+ if (devinfo->gen < 6) {
+ /* Set up src[0] for the implied MOV from grf0-1 */
+ inst->resize_sources(1);
+ inst->src[0] = brw_vec8_grf(0, 0);
+ } else {
+ inst->resize_sources(0);
+ }
inst->base_mrf = 1;
}
static void
lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
{
- const fs_builder &ubld = bld.exec_all();
+ const fs_builder &ubld = bld.exec_all().group(8, 0);
const unsigned length = 2;
- const fs_reg header = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD, length);
+ const fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, length);
- ubld.group(16, 0)
- .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ if (bld.group() < 16) {
+ ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UD));
+ } else {
+ assert(bld.group() < 32);
+ const fs_reg header_sources[] = {
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
+ retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD)
+ };
+ ubld.LOAD_PAYLOAD(header, header_sources, ARRAY_SIZE(header_sources), 0);
+ }
inst->resize_sources(1);
inst->src[0] = header;
op == SHADER_OPCODE_SAMPLEINFO ||
is_high_sampler(devinfo, sampler)) {
/* For general texture offsets (no txf workaround), we need a header to
- * put them in. Note that we're only reserving space for it in the
- * message payload as it will be initialized implicitly by the
- * generator.
+ * put them in.
*
* TG4 needs to place its channel select in the header, for interaction
* with ARB_texture_swizzle. The sampler index is only 4-bits, so for
* larger sampler numbers we need to offset the Sampler State Pointer in
* the header.
*/
+ fs_reg header = retype(sources[0], BRW_REGISTER_TYPE_UD);
header_size = 1;
- sources[0] = fs_reg();
length++;
/* If we're requesting fewer than four channels worth of response,
unsigned mask = ~((1 << (regs_written(inst) / reg_width)) - 1) & 0xf;
inst->offset |= mask << 12;
}
+
+ /* Build the actual header */
+ const fs_builder ubld = bld.exec_all().group(8, 0);
+ const fs_builder ubld1 = ubld.group(1, 0);
+ ubld.MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ if (inst->offset) {
+ ubld1.MOV(component(header, 2), brw_imm_ud(inst->offset));
+ } else if (bld.shader->stage != MESA_SHADER_VERTEX &&
+ bld.shader->stage != MESA_SHADER_FRAGMENT) {
+ /* The vertex and fragment stages have g0.2 set to 0, so
+ * header0.2 is 0 when g0 is copied. Other stages may not, so we
+ * must set it to 0 to avoid setting undesirable bits in the
+ * message.
+ */
+ ubld1.MOV(component(header, 2), brw_imm_ud(0));
+ }
+
+ if (is_high_sampler(devinfo, sampler)) {
+ if (sampler.file == BRW_IMMEDIATE_VALUE) {
+ assert(sampler.ud >= 16);
+ const int sampler_state_size = 16; /* 16 bytes */
+
+ ubld1.ADD(component(header, 3),
+ retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(16 * (sampler.ud / 16) * sampler_state_size));
+ } else {
+ fs_reg tmp = ubld1.vgrf(BRW_REGISTER_TYPE_UD);
+ ubld1.AND(tmp, sampler, brw_imm_ud(0x0f0));
+ ubld1.SHL(tmp, tmp, brw_imm_ud(4));
+ ubld1.ADD(component(header, 3),
+ retype(brw_vec1_grf(0, 3), BRW_REGISTER_TYPE_UD),
+ tmp);
+ }
+ }
}
if (shadow_c.file != BAD_FILE) {
lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
const fs_reg &sample_mask)
{
+ const gen_device_info *devinfo = bld.shader->devinfo;
+
/* Get the logical send arguments. */
const fs_reg &addr = inst->src[0];
const fs_reg &src = inst->src[1];
/* Calculate the total number of components of the payload. */
const unsigned addr_sz = inst->components_read(0);
const unsigned src_sz = inst->components_read(1);
- const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
+ /* From the BDW PRM Volume 7, page 147:
+ *
+ * "For the Data Cache Data Port*, the header must be present for the
+ * following message types: [...] Typed read/write/atomics"
+ *
+ * Earlier generations have a similar wording. Because of this restriction
+ * we don't attempt to implement sample masks via predication for such
+ * messages prior to Gen9, since we have to provide a header anyway. On
+ * Gen11+ the header has been removed so we can only use predication.
+ */
+ const unsigned header_sz = devinfo->gen < 9 &&
+ (op == SHADER_OPCODE_TYPED_SURFACE_READ ||
+ op == SHADER_OPCODE_TYPED_SURFACE_WRITE ||
+ op == SHADER_OPCODE_TYPED_ATOMIC) ? 1 : 0;
const unsigned sz = header_sz + addr_sz + src_sz;
/* Allocate space for the payload. */
bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
+ /* Predicate the instruction on the sample mask if no header is
+ * provided.
+ */
+ if (!header_sz && sample_mask.file != BAD_FILE &&
+ sample_mask.file != IMM) {
+ const fs_builder ubld = bld.group(1, 0).exec_all();
+ if (inst->predicate) {
+ assert(inst->predicate == BRW_PREDICATE_NORMAL);
+ assert(!inst->predicate_inverse);
+ assert(inst->flag_subreg < 2);
+ /* Combine the sample mask with the existing predicate by using a
+ * vertical predication mode.
+ */
+ inst->predicate = BRW_PREDICATE_ALIGN1_ALLV;
+ ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg + 2),
+ sample_mask.type),
+ sample_mask);
+ } else {
+ inst->flag_subreg = 2;
+ inst->predicate = BRW_PREDICATE_NORMAL;
+ inst->predicate_inverse = false;
+ ubld.MOV(retype(brw_flag_subreg(inst->flag_subreg), sample_mask.type),
+ sample_mask);
+ }
+ }
+
/* Update the original instruction. */
inst->opcode = op;
inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
type_sz(inst->dst.type) == 4 && inst->dst.stride == 1 &&
type_sz(inst->src[i].type) == 2 && inst->src[i].stride == 1;
+ /* We check size_read(i) against size_written instead of REG_SIZE
+ * because we want to properly handle SIMD32. In SIMD32, you can end
+ * up with writes to 4 registers and a source that reads 2 registers
+ * and we may still need to lower all the way to SIMD8 in that case.
+ */
if (inst->size_written > REG_SIZE &&
- inst->size_read(i) != 0 && inst->size_read(i) <= REG_SIZE &&
+ inst->size_read(i) != 0 &&
+ inst->size_read(i) < inst->size_written &&
!is_scalar_exception && !is_packed_word_exception) {
const unsigned reg_count = DIV_ROUND_UP(inst->size_written, REG_SIZE);
max_width = MIN2(max_width, inst->exec_size / reg_count);
case BRW_OPCODE_MAD:
case BRW_OPCODE_LRP:
case FS_OPCODE_PACK:
+ case SHADER_OPCODE_SEL_EXEC:
+ case SHADER_OPCODE_CLUSTER_BROADCAST:
return get_fpu_lowered_simd_width(devinfo, inst);
case BRW_OPCODE_CMP: {
case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
return MIN2(8, inst->exec_size);
+ case SHADER_OPCODE_QUAD_SWIZZLE:
+ return 8;
+
case SHADER_OPCODE_MOV_INDIRECT: {
/* From IVB and HSW PRMs:
*
* after \p inst, inst->next is a moving target and we need to save
* it off here so that we insert the zip instructions in the right
* place.
+ *
+ * Since we're inserting split instructions after after_inst, the
+ * instructions will end up in the reverse order that we insert them.
+ * However, certain render target writes require that the low group
+ * instructions come before the high group. From the Ivy Bridge PRM
+ * Vol. 4, Pt. 1, Section 3.9.11:
+ *
+ * "If multiple SIMD8 Dual Source messages are delivered by the
+ * pixel shader thread, each SIMD8_DUALSRC_LO message must be
+ * issued before the SIMD8_DUALSRC_HI message with the same Slot
+ * Group Select setting."
+ *
+ * And, from Section 3.9.11.1 of the same PRM:
+ *
+ * "When SIMD32 or SIMD16 PS threads send render target writes
+ * with multiple SIMD8 and SIMD16 messages, the following must
+ * hold:
+ *
+ * All the slots (as described above) must have a corresponding
+ * render target write irrespective of the slot's validity. A slot
+ * is considered valid when at least one sample is enabled. For
+ * example, a SIMD16 PS thread must send two SIMD8 render target
+ * writes to cover all the slots.
+ *
+ * PS thread must send SIMD render target write messages with
+ * increasing slot numbers. For example, SIMD16 thread has
+ * Slot[15:0] and if two SIMD8 render target writes are used, the
+ * first SIMD8 render target write must send Slot[7:0] and the
+ * next one must send Slot[15:8]."
+ *
+ * In order to make low group instructions come before high group
+ * instructions (this is required for some render target writes), we
+ * split from the highest group to lowest.
*/
exec_node *const after_inst = inst->next;
- for (unsigned i = 0; i < n; i++) {
+ for (int i = n - 1; i >= 0; i--) {
/* Emit a copy of the original instruction with the lowered width.
* If the EOT flag was set throw it away except for the last
* instruction to avoid killing the thread prematurely.
*/
fs_inst split_inst = *inst;
split_inst.exec_size = lower_width;
- split_inst.eot = inst->eot && i == 0;
+ split_inst.eot = inst->eot && i == n - 1;
/* Select the correct channel enables for the i-th group, then
* transform the sources and destination and emit the lowered
fs_inst *inst = (fs_inst *)be_inst;
if (inst->predicate) {
- fprintf(file, "(%cf0.%d) ",
- inst->predicate_inverse ? '-' : '+',
- inst->flag_subreg);
+ fprintf(file, "(%cf%d.%d) ",
+ inst->predicate_inverse ? '-' : '+',
+ inst->flag_subreg / 2,
+ inst->flag_subreg % 2);
}
fprintf(file, "%s", brw_instruction_name(devinfo, inst->opcode));
fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
if (!inst->predicate &&
(devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
+ inst->opcode != BRW_OPCODE_CSEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
- fprintf(file, ".f0.%d", inst->flag_subreg);
+ fprintf(file, ".f%d.%d", inst->flag_subreg / 2,
+ inst->flag_subreg % 2);
}
}
fprintf(file, "(%d) ", inst->exec_size);
fprintf(file, "\n");
}
-/**
- * Possibly returns an instruction that set up @param reg.
- *
- * Sometimes we want to take the result of some expression/variable
- * dereference tree and rewrite the instruction generating the result
- * of the tree. When processing the tree, we know that the
- * instructions generated are all writing temporaries that are dead
- * outside of this tree. So, if we have some instructions that write
- * a temporary, we're free to point that temp write somewhere else.
- *
- * Note that this doesn't guarantee that the instruction generated
- * only reg -- it might be the size=4 destination of a texture instruction.
- */
-fs_inst *
-fs_visitor::get_instruction_generating_reg(fs_inst *start,
- fs_inst *end,
- const fs_reg ®)
-{
- if (end == start ||
- end->is_partial_write() ||
- !reg.equals(end->dst)) {
- return NULL;
- } else {
- return end;
- }
-}
-
void
fs_visitor::setup_fs_payload_gen6()
{
assert(stage == MESA_SHADER_FRAGMENT);
struct brw_wm_prog_data *prog_data = brw_wm_prog_data(this->prog_data);
-
+ const unsigned payload_width = MIN2(16, dispatch_width);
+ assert(dispatch_width % payload_width == 0);
assert(devinfo->gen >= 6);
- /* R0-1: masks, pixel X/Y coordinates. */
- payload.num_regs = 2;
- /* R2: only for 32-pixel dispatch.*/
-
- /* R3-26: barycentric interpolation coordinates. These appear in the
- * same order that they appear in the brw_barycentric_mode
- * enum. Each set of coordinates occupies 2 registers if dispatch width
- * == 8 and 4 registers if dispatch width == 16. Coordinates only
- * appear if they were enabled using the "Barycentric Interpolation
- * Mode" bits in WM_STATE.
+ prog_data->uses_src_depth = prog_data->uses_src_w =
+ (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
+
+ prog_data->uses_sample_mask =
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
+
+ /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
+ *
+ * "MSDISPMODE_PERSAMPLE is required in order to select
+ * POSOFFSET_SAMPLE"
+ *
+ * So we can only really get sample positions if we are doing real
+ * per-sample dispatch. If we need gl_SamplePosition and we don't have
+ * persample dispatch, we hard-code it to 0.5.
*/
- for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
- if (prog_data->barycentric_interp_modes & (1 << i)) {
- payload.barycentric_coord_reg[i] = payload.num_regs;
- payload.num_regs += 2;
- if (dispatch_width == 16) {
- payload.num_regs += 2;
- }
- }
+ prog_data->uses_pos_offset = prog_data->persample_dispatch &&
+ (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS);
+
+ /* R0: PS thread payload header. */
+ payload.num_regs++;
+
+ for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
+ /* R1: masks, pixel X/Y coordinates. */
+ payload.subspan_coord_reg[j] = payload.num_regs++;
}
- /* R27: interpolated depth if uses source depth */
- prog_data->uses_src_depth =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
- if (prog_data->uses_src_depth) {
- payload.source_depth_reg = payload.num_regs;
- payload.num_regs++;
- if (dispatch_width == 16) {
- /* R28: interpolated depth if not SIMD8. */
- payload.num_regs++;
+ for (unsigned j = 0; j < dispatch_width / payload_width; j++) {
+ /* R3-26: barycentric interpolation coordinates. These appear in the
+ * same order that they appear in the brw_barycentric_mode enum. Each
+ * set of coordinates occupies 2 registers if dispatch width == 8 and 4
+ * registers if dispatch width == 16. Coordinates only appear if they
+ * were enabled using the "Barycentric Interpolation Mode" bits in
+ * WM_STATE.
+ */
+ for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
+ if (prog_data->barycentric_interp_modes & (1 << i)) {
+ payload.barycentric_coord_reg[i][j] = payload.num_regs;
+ payload.num_regs += payload_width / 4;
+ }
}
- }
- /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
- prog_data->uses_src_w =
- (nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
- if (prog_data->uses_src_w) {
- payload.source_w_reg = payload.num_regs;
- payload.num_regs++;
- if (dispatch_width == 16) {
- /* R30: interpolated W if not SIMD8. */
- payload.num_regs++;
+ /* R27-28: interpolated depth if uses source depth */
+ if (prog_data->uses_src_depth) {
+ payload.source_depth_reg[j] = payload.num_regs;
+ payload.num_regs += payload_width / 8;
}
- }
- /* R31: MSAA position offsets. */
- if (prog_data->persample_dispatch &&
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_POS)) {
- /* From the Ivy Bridge PRM documentation for 3DSTATE_PS:
- *
- * "MSDISPMODE_PERSAMPLE is required in order to select
- * POSOFFSET_SAMPLE"
- *
- * So we can only really get sample positions if we are doing real
- * per-sample dispatch. If we need gl_SamplePosition and we don't have
- * persample dispatch, we hard-code it to 0.5.
- */
- prog_data->uses_pos_offset = true;
- payload.sample_pos_reg = payload.num_regs;
- payload.num_regs++;
- }
+ /* R29-30: interpolated W set if GEN6_WM_USES_SOURCE_W. */
+ if (prog_data->uses_src_w) {
+ payload.source_w_reg[j] = payload.num_regs;
+ payload.num_regs += payload_width / 8;
+ }
- /* R32: MSAA input coverage mask */
- prog_data->uses_sample_mask =
- (nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
- if (prog_data->uses_sample_mask) {
- assert(devinfo->gen >= 7);
- payload.sample_mask_in_reg = payload.num_regs;
- payload.num_regs++;
- if (dispatch_width == 16) {
- /* R33: input coverage mask if not SIMD8. */
+ /* R31: MSAA position offsets. */
+ if (prog_data->uses_pos_offset) {
+ payload.sample_pos_reg[j] = payload.num_regs;
payload.num_regs++;
}
- }
- /* R34-: bary for 32-pixel. */
- /* R58-59: interp W for 32-pixel. */
+ /* R32-33: MSAA input coverage mask */
+ if (prog_data->uses_sample_mask) {
+ assert(devinfo->gen >= 7);
+ payload.sample_mask_in_reg[j] = payload.num_regs;
+ payload.num_regs += payload_width / 8;
+ }
+ }
if (nir->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
source_depth_to_render_target = true;
}
}
-/**
- * Look for repeated FS_OPCODE_MOV_DISPATCH_TO_FLAGS and drop the later ones.
- *
- * The needs_unlit_centroid_workaround ends up producing one of these per
- * channel of centroid input, so it's good to clean them up.
- *
- * An assumption here is that nothing ever modifies the dispatched pixels
- * value that FS_OPCODE_MOV_DISPATCH_TO_FLAGS reads from, but the hardware
- * dictates that anyway.
- */
-bool
-fs_visitor::opt_drop_redundant_mov_to_flags()
-{
- bool flag_mov_found[2] = {false};
- bool progress = false;
-
- /* Instructions removed by this pass can only be added if this were true */
- if (!devinfo->needs_unlit_centroid_workaround)
- return false;
-
- foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- if (inst->is_control_flow()) {
- memset(flag_mov_found, 0, sizeof(flag_mov_found));
- } else if (inst->opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS) {
- if (!flag_mov_found[inst->flag_subreg]) {
- flag_mov_found[inst->flag_subreg] = true;
- } else {
- inst->remove(block);
- progress = true;
- }
- } else if (inst->flags_written()) {
- flag_mov_found[inst->flag_subreg] = false;
- }
- }
-
- return progress;
-}
-
void
fs_visitor::optimize()
{
int iteration = 0;
int pass_num = 0;
- OPT(opt_drop_redundant_mov_to_flags);
OPT(remove_extra_rounding_modes);
do {
OPT(compact_virtual_grfs);
} while (progress);
+ /* Do this after cmod propagation has had every possible opportunity to
+ * propagate results into SEL instructions.
+ */
+ if (OPT(opt_peephole_csel))
+ OPT(dead_code_eliminate);
+
progress = false;
pass_num = 0;
* Initialize it with the dispatched pixels.
*/
if (wm_prog_data->uses_kill) {
- fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
- discard_init->flag_subreg = 1;
+ const fs_reg dispatch_mask =
+ devinfo->gen >= 6 ? brw_vec1_grf(1, 7) : brw_vec1_grf(0, 0);
+ bld.exec_all().group(1, 0)
+ .MOV(retype(brw_flag_reg(0, 1), BRW_REGISTER_TYPE_UW),
+ retype(dispatch_mask, BRW_REGISTER_TYPE_UW));
}
/* Generate FS IR for main(). (the visitor only descends into
const nir_shader *src_shader,
struct gl_program *prog,
int shader_time_index8, int shader_time_index16,
- bool allow_spilling,
+ int shader_time_index32, bool allow_spilling,
bool use_rep_send, struct brw_vue_map *vue_map,
char **error_str)
{
prog_data->barycentric_interp_modes =
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
- cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
- uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
- unsigned simd8_grf_used = 0, simd16_grf_used = 0;
+ cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL, *simd32_cfg = NULL;
fs_visitor v8(compiler, log_data, mem_ctx, key,
&prog_data->base, prog, shader, 8,
return NULL;
} else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
simd8_cfg = v8.cfg;
- simd8_grf_start = v8.payload.num_regs;
- simd8_grf_used = v8.grf_used;
+ prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
+ prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
}
if (v8.max_dispatch_width >= 16 &&
v16.fail_msg);
} else {
simd16_cfg = v16.cfg;
- simd16_grf_start = v16.payload.num_regs;
- simd16_grf_used = v16.grf_used;
+ prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
+ prog_data->reg_blocks_16 = brw_register_blocks(v16.grf_used);
+ }
+ }
+
+ /* Currently, the compiler only supports SIMD32 on SNB+ */
+ if (v8.max_dispatch_width >= 32 && !use_rep_send &&
+ compiler->devinfo->gen >= 6 &&
+ unlikely(INTEL_DEBUG & DEBUG_DO32)) {
+ /* Try a SIMD32 compile */
+ fs_visitor v32(compiler, log_data, mem_ctx, key,
+ &prog_data->base, prog, shader, 32,
+ shader_time_index32);
+ v32.import_uniforms(&v8);
+ if (!v32.run_fs(allow_spilling, false)) {
+ compiler->shader_perf_log(log_data,
+ "SIMD32 shader failed to compile: %s",
+ v32.fail_msg);
+ } else {
+ simd32_cfg = v32.cfg;
+ prog_data->dispatch_grf_start_reg_32 = v32.payload.num_regs;
+ prog_data->reg_blocks_32 = brw_register_blocks(v32.grf_used);
}
}
* Instead, we just give them exactly one shader and we pick the widest one
* available.
*/
- if (compiler->devinfo->gen < 5 && simd16_cfg)
- simd8_cfg = NULL;
+ if (compiler->devinfo->gen < 5) {
+ if (simd32_cfg || simd16_cfg)
+ simd8_cfg = NULL;
+ if (simd32_cfg)
+ simd16_cfg = NULL;
+ }
+
+ /* If computed depth is enabled SNB only allows SIMD8. */
+ if (compiler->devinfo->gen == 6 &&
+ prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF)
+ assert(simd16_cfg == NULL && simd32_cfg == NULL);
+
+ if (compiler->devinfo->gen <= 5 && !simd8_cfg) {
+ /* Iron lake and earlier only have one Dispatch GRF start field. Make
+ * the data available in the base prog data struct for convenience.
+ */
+ if (simd16_cfg) {
+ prog_data->base.dispatch_grf_start_reg =
+ prog_data->dispatch_grf_start_reg_16;
+ } else if (simd32_cfg) {
+ prog_data->base.dispatch_grf_start_reg =
+ prog_data->dispatch_grf_start_reg_32;
+ }
+ }
if (prog_data->persample_dispatch) {
/* Starting with SandyBridge (where we first get MSAA), the different
* through F (SNB PRM Vol. 2 Part 1 Section 7.7.1). On all hardware
* generations, the only configurations supporting persample dispatch
* are are this in which only one dispatch width is enabled.
- *
- * If computed depth is enabled, SNB only allows SIMD8 while IVB+
- * allow SIMD8 or SIMD16 so we choose SIMD16 if available.
*/
- if (compiler->devinfo->gen == 6 &&
- prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF) {
- simd16_cfg = NULL;
- } else if (simd16_cfg) {
+ if (simd32_cfg || simd16_cfg)
simd8_cfg = NULL;
- }
+ if (simd32_cfg)
+ simd16_cfg = NULL;
}
/* We have to compute the flat inputs after the visitor is finished running
*/
brw_compute_flat_inputs(prog_data, shader);
- fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
v8.promoted_constants, v8.runtime_check_aads_emit,
MESA_SHADER_FRAGMENT);
if (simd8_cfg) {
prog_data->dispatch_8 = true;
g.generate_code(simd8_cfg, 8);
- prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
- prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
+ }
- if (simd16_cfg) {
- prog_data->dispatch_16 = true;
- prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
- prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
- prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
- }
- } else if (simd16_cfg) {
+ if (simd16_cfg) {
prog_data->dispatch_16 = true;
- g.generate_code(simd16_cfg, 16);
- prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
- prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
+ prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
+ }
+
+ if (simd32_cfg) {
+ prog_data->dispatch_32 = true;
+ prog_data->prog_offset_32 = g.generate_code(simd32_cfg, 32);
}
- return g.get_assembly(&prog_data->base.program_size);
+ return g.get_assembly();
}
fs_reg *
compile_cs_to_nir(const struct brw_compiler *compiler,
void *mem_ctx,
const struct brw_cs_prog_key *key,
- struct brw_cs_prog_data *prog_data,
const nir_shader *src_shader,
unsigned dispatch_width)
{
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *cfg = NULL;
const char *fail_msg = NULL;
- unsigned promoted_constants;
+ unsigned promoted_constants = 0;
/* Now the main event: Visit the shader IR and generate our CS IR for it.
*/
if (min_dispatch_width <= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 8);
+ src_shader, 8);
v8 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir8, 8, shader_time_index);
!fail_msg && min_dispatch_width <= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 16);
+ src_shader, 16);
v16 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir16, 16, shader_time_index);
if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 32);
+ src_shader, 32);
v32 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir32, 32, shader_time_index);
if (error_str)
*error_str = ralloc_strdup(mem_ctx, fail_msg);
} else {
- fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
promoted_constants, false, MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
g.generate_code(cfg, prog_data->simd_size);
- ret = g.get_assembly(&prog_data->base.program_size);
+ ret = g.get_assembly();
}
delete v8;