vec4_result, surf_index, vec4_offset);
inst->size_written = 4 * vec4_result.component_size(inst->exec_size);
- fs_reg dw = offset(vec4_result, bld, (const_offset & 0xf) / 4);
- switch (type_sz(dst.type)) {
- case 2:
- shuffle_32bit_load_result_to_16bit_data(bld, dst, dw, 0, 1);
- bld.MOV(dst, subscript(dw, dst.type, (const_offset / 2) & 1));
- break;
- case 4:
- bld.MOV(dst, retype(dw, dst.type));
- break;
- case 8:
- shuffle_32bit_load_result_to_64bit_data(bld, dst, dw, 1);
- break;
- default:
- unreachable("Unsupported bit_size");
- }
+ shuffle_from_32bit_read(bld, dst, vec4_result,
+ (const_offset & 0xf) / type_sz(dst.type), 1);
}
/**
case FS_OPCODE_PACK_HALF_2x16_SPLIT:
/* Multiple partial writes to the destination */
return true;
+ case SHADER_OPCODE_SHUFFLE:
+ /* This instruction returns an arbitrary channel from the source and
+ * gets split into smaller instructions in the generator. It's possible
+ * that one of the instructions will read from a channel corresponding
+ * to an earlier instruction.
+ */
+ case SHADER_OPCODE_SEL_EXEC:
+ /* This is implemented as
+ *
+ * mov(16) g4<1>D 0D { align1 WE_all 1H };
+ * mov(16) g4<1>D g5<8,8,1>D { align1 1H }
+ *
+ * Because the source is only read in the second instruction, the first
+ * may stomp all over it.
+ */
+ return true;
default:
/* The SIMD16 compressed instruction
*
stride == r.stride);
}
+bool
+fs_reg::negative_equals(const fs_reg &r) const
+{
+ return (this->backend_reg::negative_equals(r) &&
+ stride == r.stride);
+}
+
bool
fs_reg::is_contiguous() const
{
case GLSL_TYPE_INT16:
case GLSL_TYPE_FLOAT16:
return DIV_ROUND_UP(type->components(), 2);
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return DIV_ROUND_UP(type->components(), 4);
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT64:
else
return 1;
}
+ case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
+ return (i == 0 ? 2 : 1);
default:
return 1;
{
switch (opcode) {
case FS_OPCODE_FB_WRITE:
+ case FS_OPCODE_REP_FB_WRITE:
+ if (arg == 0) {
+ if (base_mrf >= 0)
+ return src[0].file == BAD_FILE ? 0 : 2 * REG_SIZE;
+ else
+ return mlen * REG_SIZE;
+ }
+ break;
+
case FS_OPCODE_FB_READ:
case SHADER_OPCODE_URB_WRITE_SIMD8:
case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
case SHADER_OPCODE_TYPED_ATOMIC:
case SHADER_OPCODE_TYPED_SURFACE_READ:
case SHADER_OPCODE_TYPED_SURFACE_WRITE:
- case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
+ case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
+ case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
case SHADER_OPCODE_BYTE_SCATTERED_WRITE:
case SHADER_OPCODE_BYTE_SCATTERED_READ:
if (arg == 0)
fs_inst::flags_written() const
{
if ((conditional_mod && (opcode != BRW_OPCODE_SEL &&
+ opcode != BRW_OPCODE_CSEL &&
opcode != BRW_OPCODE_IF &&
opcode != BRW_OPCODE_WHILE)) ||
opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS ||
- opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL) {
+ opcode == SHADER_OPCODE_FIND_LIVE_CHANNEL ||
+ opcode == FS_OPCODE_FB_WRITE) {
return flag_mask(this);
} else {
return flag_mask(dst, size_written);
case SHADER_OPCODE_SAMPLEINFO:
return 1;
case FS_OPCODE_FB_WRITE:
- return 2;
+ case FS_OPCODE_REP_FB_WRITE:
+ return inst->src[0].file == BAD_FILE ? 0 : 2;
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
/* gl_FragCoord.z */
if (devinfo->gen >= 6) {
- bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
+ bld.MOV(wpos, fetch_payload_reg(bld, payload.source_depth_reg));
} else {
bld.emit(FS_OPCODE_LINTERP, wpos,
- this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
- interp_reg(VARYING_SLOT_POS, 2));
+ this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL],
+ component(interp_reg(VARYING_SLOT_POS, 2), 0));
}
wpos = offset(wpos, bld, 1);
* The X, Y sample positions come in as bytes in thread payload. So, read
* the positions using vstride=16, width=8, hstride=2.
*/
- struct brw_reg sample_pos_reg =
- stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
- BRW_REGISTER_TYPE_B), 16, 8, 2);
+ const fs_reg sample_pos_reg =
+ fetch_payload_reg(abld, payload.sample_pos_reg, BRW_REGISTER_TYPE_W);
- if (dispatch_width == 8) {
- abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
- } else {
- abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
- abld.half(1).MOV(half(int_sample_x, 1),
- fs_reg(suboffset(sample_pos_reg, 16)));
- }
/* Compute gl_SamplePosition.x */
- compute_sample_position(pos, int_sample_x);
- pos = offset(pos, abld, 1);
- if (dispatch_width == 8) {
- abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
- } else {
- abld.half(0).MOV(half(int_sample_y, 0),
- fs_reg(suboffset(sample_pos_reg, 1)));
- abld.half(1).MOV(half(int_sample_y, 1),
- fs_reg(suboffset(sample_pos_reg, 17)));
- }
+ abld.MOV(int_sample_x, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 0));
+ compute_sample_position(offset(pos, abld, 0), int_sample_x);
+
/* Compute gl_SamplePosition.y */
- compute_sample_position(pos, int_sample_y);
+ abld.MOV(int_sample_y, subscript(sample_pos_reg, BRW_REGISTER_TYPE_B, 1));
+ compute_sample_position(offset(pos, abld, 1), int_sample_y);
return reg;
}
fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
- fs_reg coverage_mask(retype(brw_vec8_grf(payload.sample_mask_in_reg, 0),
- BRW_REGISTER_TYPE_D));
+ fs_reg coverage_mask =
+ fetch_payload_reg(bld, payload.sample_mask_in_reg, BRW_REGISTER_TYPE_D);
if (wm_prog_data->persample_dispatch) {
/* gl_SampleMaskIn[] comes from two sources: the input coverage mask,
* setup regs, now that the location of the constants has been chosen.
*/
foreach_block_and_inst(block, fs_inst, inst, cfg) {
- if (inst->opcode == FS_OPCODE_LINTERP) {
- assert(inst->src[1].file == FIXED_GRF);
- inst->src[1].nr += urb_start;
- }
-
- if (inst->opcode == FS_OPCODE_CINTERP) {
- assert(inst->src[0].file == FIXED_GRF);
- inst->src[0].nr += urb_start;
+ for (int i = 0; i < inst->sources; i++) {
+ if (inst->src[i].file == ATTR) {
+ /* ATTR regs in the FS are in units of logical scalar inputs each
+ * of which consumes half of a GRF register.
+ */
+ assert(inst->src[i].offset < REG_SIZE / 2);
+ const unsigned grf = urb_start + inst->src[i].nr / 2;
+ const unsigned offset = (inst->src[i].nr % 2) * (REG_SIZE / 2) +
+ inst->src[i].offset;
+ const unsigned width = inst->src[i].stride == 0 ?
+ 1 : MIN2(inst->exec_size, 8);
+ struct brw_reg reg = stride(
+ byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
+ offset),
+ width * inst->src[i].stride,
+ width, inst->src[i].stride);
+ reg.abs = inst->src[i].abs;
+ reg.negate = inst->src[i].negate;
+ inst->src[i] = reg;
+ }
}
}
static void
cplx_align_assert_sane(struct cplx_align a)
{
- assert(a.mul > 0 && util_is_power_of_two(a.mul));
+ assert(a.mul > 0 && util_is_power_of_two_nonzero(a.mul));
assert(a.offset < a.mul);
}
mark_uniform_slots_read(struct uniform_slot_info *slots,
unsigned num_slots, unsigned alignment)
{
- assert(alignment > 0 && util_is_power_of_two(alignment));
+ assert(alignment > 0 && util_is_power_of_two_nonzero(alignment));
assert(alignment <= CPLX_ALIGN_MAX_MUL);
/* We can't align a slot to anything less than the slot size */
}
break;
case BRW_OPCODE_OR:
- if (inst->src[0].equals(inst->src[1])) {
+ if (inst->src[0].equals(inst->src[1]) ||
+ inst->src[1].is_zero()) {
inst->opcode = BRW_OPCODE_MOV;
inst->src[1] = reg_undef;
progress = true;
}
break;
+ case SHADER_OPCODE_SHUFFLE:
+ if (is_uniform(inst->src[0])) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->sources = 1;
+ progress = true;
+ } else if (inst->src[1].file == IMM) {
+ inst->opcode = BRW_OPCODE_MOV;
+ inst->src[0] = component(inst->src[0],
+ inst->src[1].ud);
+ inst->sources = 1;
+ progress = true;
+ }
+ break;
+
default:
break;
}
{
brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
- if (stage != MESA_SHADER_FRAGMENT)
+ if (stage != MESA_SHADER_FRAGMENT || dispatch_width > 16)
return false;
if (devinfo->gen != 9 && !devinfo->is_cherryview)
return ((1 << n) - 1) << shift;
}
+bool
+fs_visitor::opt_peephole_csel()
+{
+ if (devinfo->gen < 8)
+ return false;
+
+ bool progress = false;
+
+ foreach_block_reverse(block, cfg) {
+ int ip = block->end_ip + 1;
+
+ foreach_inst_in_block_reverse_safe(fs_inst, inst, block) {
+ ip--;
+
+ if (inst->opcode != BRW_OPCODE_SEL ||
+ inst->predicate != BRW_PREDICATE_NORMAL ||
+ (inst->dst.type != BRW_REGISTER_TYPE_F &&
+ inst->dst.type != BRW_REGISTER_TYPE_D &&
+ inst->dst.type != BRW_REGISTER_TYPE_UD))
+ continue;
+
+ /* Because it is a 3-src instruction, CSEL cannot have an immediate
+ * value as a source, but we can sometimes handle zero.
+ */
+ if ((inst->src[0].file != VGRF && inst->src[0].file != ATTR &&
+ inst->src[0].file != UNIFORM) ||
+ (inst->src[1].file != VGRF && inst->src[1].file != ATTR &&
+ inst->src[1].file != UNIFORM && !inst->src[1].is_zero()))
+ continue;
+
+ foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst) {
+ if (!scan_inst->flags_written())
+ continue;
+
+ if ((scan_inst->opcode != BRW_OPCODE_CMP &&
+ scan_inst->opcode != BRW_OPCODE_MOV) ||
+ scan_inst->predicate != BRW_PREDICATE_NONE ||
+ (scan_inst->src[0].file != VGRF &&
+ scan_inst->src[0].file != ATTR &&
+ scan_inst->src[0].file != UNIFORM) ||
+ scan_inst->src[0].type != BRW_REGISTER_TYPE_F)
+ break;
+
+ if (scan_inst->opcode == BRW_OPCODE_CMP && !scan_inst->src[1].is_zero())
+ break;
+
+ const brw::fs_builder ibld(this, block, inst);
+
+ const enum brw_conditional_mod cond =
+ inst->predicate_inverse
+ ? brw_negate_cmod(scan_inst->conditional_mod)
+ : scan_inst->conditional_mod;
+
+ fs_inst *csel_inst = NULL;
+
+ if (inst->src[1].file != IMM) {
+ csel_inst = ibld.CSEL(inst->dst,
+ inst->src[0],
+ inst->src[1],
+ scan_inst->src[0],
+ cond);
+ } else if (cond == BRW_CONDITIONAL_NZ) {
+ /* Consider the sequence
+ *
+ * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
+ * (+f0) sel g124<1>UD g2<8,8,1>UD 0x00000000UD
+ *
+ * The sel will pick the immediate value 0 if r0 is ±0.0.
+ * Therefore, this sequence is equivalent:
+ *
+ * cmp.nz.f0 null<1>F g3<8,8,1>F 0F
+ * (+f0) sel g124<1>F g2<8,8,1>F (abs)g3<8,8,1>F
+ *
+ * The abs is ensures that the result is 0UD when g3 is -0.0F.
+ * By normal cmp-sel merging, this is also equivalent:
+ *
+ * csel.nz g124<1>F g2<4,4,1>F (abs)g3<4,4,1>F g3<4,4,1>F
+ */
+ csel_inst = ibld.CSEL(inst->dst,
+ inst->src[0],
+ scan_inst->src[0],
+ scan_inst->src[0],
+ cond);
+
+ csel_inst->src[1].abs = true;
+ }
+
+ if (csel_inst != NULL) {
+ progress = true;
+ inst->remove(block);
+ }
+
+ break;
+ }
+ }
+ }
+
+ return progress;
+}
+
bool
fs_visitor::compute_to_mrf()
{
write->mlen = 1;
} else {
assume(key->nr_color_regions > 0);
+
+ struct brw_reg header =
+ retype(brw_message_reg(base_mrf), BRW_REGISTER_TYPE_UD);
+ bld.exec_all().group(16, 0)
+ .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+
for (int i = 0; i < key->nr_color_regions; ++i) {
+ if (i > 0) {
+ bld.exec_all().group(1, 0)
+ .MOV(component(header, 2), brw_imm_ud(i));
+ }
+
write = bld.emit(FS_OPCODE_REP_FB_WRITE);
write->saturate = key->clamp_fragment_color;
write->base_mrf = base_mrf;
}
}
write->eot = true;
+ write->last_rt = true;
calculate_cfg();
int header_size = 2, payload_header_size;
unsigned length = 0;
- /* From the Sandy Bridge PRM, volume 4, page 198:
- *
- * "Dispatched Pixel Enables. One bit per pixel indicating
- * which pixels were originally enabled when the thread was
- * dispatched. This field is only required for the end-of-
- * thread message and on all dual-source messages."
- */
- if (devinfo->gen >= 6 &&
- (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
- color1.file == BAD_FILE &&
- key->nr_color_regions == 1) {
- header_size = 0;
- }
+ if (devinfo->gen < 6) {
+ /* TODO: Support SIMD32 on gen4-5 */
+ assert(bld.group() < 16);
+
+ /* For gen4-5, we always have a header consisting of g0 and g1. We have
+ * an implied MOV from g0,g1 to the start of the message. The MOV from
+ * g0 is handled by the hardware and the MOV from g1 is provided by the
+ * generator. This is required because, on gen4-5, the generator may
+ * generate two write messages with different message lengths in order
+ * to handle AA data properly.
+ *
+ * Also, since the pixel mask goes in the g0 portion of the message and
+ * since render target writes are the last thing in the shader, we write
+ * the pixel mask directly into g0 and it will get copied as part of the
+ * implied write.
+ */
+ if (prog_data->uses_kill) {
+ bld.exec_all().group(1, 0)
+ .MOV(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW),
+ brw_flag_reg(0, 1));
+ }
+
+ assert(length == 0);
+ length = 2;
+ } else if ((devinfo->gen <= 7 && !devinfo->is_haswell &&
+ prog_data->uses_kill) ||
+ color1.file != BAD_FILE ||
+ key->nr_color_regions > 1) {
+ /* From the Sandy Bridge PRM, volume 4, page 198:
+ *
+ * "Dispatched Pixel Enables. One bit per pixel indicating
+ * which pixels were originally enabled when the thread was
+ * dispatched. This field is only required for the end-of-
+ * thread message and on all dual-source messages."
+ */
+ const fs_builder ubld = bld.exec_all().group(8, 0);
+
+ fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
+ if (bld.group() < 16) {
+ /* The header starts off as g0 and g1 for the first half */
+ ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UD));
+ } else {
+ /* The header starts off as g0 and g2 for the second half */
+ assert(bld.group() < 32);
+ const fs_reg header_sources[2] = {
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
+ retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD),
+ };
+ ubld.LOAD_PAYLOAD(header, header_sources, 2, 0);
+ }
- if (header_size != 0) {
- assert(header_size == 2);
- /* Allocate 2 registers for a header */
- length += 2;
+ uint32_t g00_bits = 0;
+
+ /* Set "Source0 Alpha Present to RenderTarget" bit in message
+ * header.
+ */
+ if (inst->target > 0 && key->replicate_alpha)
+ g00_bits |= 1 << 11;
+
+ /* Set computes stencil to render target */
+ if (prog_data->computed_stencil)
+ g00_bits |= 1 << 14;
+
+ if (g00_bits) {
+ /* OR extra bits into g0.0 */
+ ubld.group(1, 0).OR(component(header, 0),
+ retype(brw_vec1_grf(0, 0),
+ BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(g00_bits));
+ }
+
+ /* Set the render target index for choosing BLEND_STATE. */
+ if (inst->target > 0) {
+ ubld.group(1, 0).MOV(component(header, 2), brw_imm_ud(inst->target));
+ }
+
+ if (prog_data->uses_kill) {
+ assert(bld.group() < 16);
+ ubld.group(1, 0).MOV(retype(component(header, 15),
+ BRW_REGISTER_TYPE_UW),
+ brw_flag_reg(0, 1));
+ }
+
+ assert(length == 0);
+ sources[0] = header;
+ sources[1] = horiz_offset(header, 8);
+ length = 2;
}
+ assert(length == 0 || length == 2);
+ header_size = length;
- if (payload.aa_dest_stencil_reg) {
+ if (payload.aa_dest_stencil_reg[0]) {
+ assert(inst->group < 16);
sources[length] = fs_reg(VGRF, bld.shader->alloc.allocate(1));
bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
.MOV(sources[length],
- fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
+ fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg[0], 0)));
length++;
}
bld.exec_all().annotate("FB write oMask")
.MOV(horiz_offset(retype(sources[length], BRW_REGISTER_TYPE_UW),
- inst->group),
+ inst->group % 16),
sample_mask);
length++;
}
if (src_stencil.file != BAD_FILE) {
assert(devinfo->gen >= 9);
- assert(bld.dispatch_width() != 16);
+ assert(bld.dispatch_width() == 8);
/* XXX: src_stencil is only available on gen9+. dst_depth is never
* available on gen9+. As such it's impossible to have both enabled at the
if (devinfo->gen < 6 && bld.dispatch_width() == 16)
load->dst.nr |= BRW_MRF_COMPR4;
- inst->resize_sources(0);
+ if (devinfo->gen < 6) {
+ /* Set up src[0] for the implied MOV from grf0-1 */
+ inst->resize_sources(1);
+ inst->src[0] = brw_vec8_grf(0, 0);
+ } else {
+ inst->resize_sources(0);
+ }
inst->base_mrf = 1;
}
static void
lower_fb_read_logical_send(const fs_builder &bld, fs_inst *inst)
{
- const fs_builder &ubld = bld.exec_all();
+ const fs_builder &ubld = bld.exec_all().group(8, 0);
const unsigned length = 2;
- const fs_reg header = ubld.group(8, 0).vgrf(BRW_REGISTER_TYPE_UD, length);
+ const fs_reg header = ubld.vgrf(BRW_REGISTER_TYPE_UD, length);
- ubld.group(16, 0)
- .MOV(header, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ if (bld.group() < 16) {
+ ubld.group(16, 0).MOV(header, retype(brw_vec8_grf(0, 0),
+ BRW_REGISTER_TYPE_UD));
+ } else {
+ assert(bld.group() < 32);
+ const fs_reg header_sources[] = {
+ retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
+ retype(brw_vec8_grf(2, 0), BRW_REGISTER_TYPE_UD)
+ };
+ ubld.LOAD_PAYLOAD(header, header_sources, ARRAY_SIZE(header_sources), 0);
+ }
inst->resize_sources(1);
inst->src[0] = header;
case BRW_OPCODE_MAD:
case BRW_OPCODE_LRP:
case FS_OPCODE_PACK:
+ case SHADER_OPCODE_SEL_EXEC:
+ case SHADER_OPCODE_CLUSTER_BROADCAST:
return get_fpu_lowered_simd_width(devinfo, inst);
case BRW_OPCODE_CMP: {
case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
return MIN2(8, inst->exec_size);
+ case SHADER_OPCODE_QUAD_SWIZZLE:
+ return 8;
+
case SHADER_OPCODE_MOV_INDIRECT: {
/* From IVB and HSW PRMs:
*
* after \p inst, inst->next is a moving target and we need to save
* it off here so that we insert the zip instructions in the right
* place.
+ *
+ * Since we're inserting split instructions after after_inst, the
+ * instructions will end up in the reverse order that we insert them.
+ * However, certain render target writes require that the low group
+ * instructions come before the high group. From the Ivy Bridge PRM
+ * Vol. 4, Pt. 1, Section 3.9.11:
+ *
+ * "If multiple SIMD8 Dual Source messages are delivered by the
+ * pixel shader thread, each SIMD8_DUALSRC_LO message must be
+ * issued before the SIMD8_DUALSRC_HI message with the same Slot
+ * Group Select setting."
+ *
+ * And, from Section 3.9.11.1 of the same PRM:
+ *
+ * "When SIMD32 or SIMD16 PS threads send render target writes
+ * with multiple SIMD8 and SIMD16 messages, the following must
+ * hold:
+ *
+ * All the slots (as described above) must have a corresponding
+ * render target write irrespective of the slot's validity. A slot
+ * is considered valid when at least one sample is enabled. For
+ * example, a SIMD16 PS thread must send two SIMD8 render target
+ * writes to cover all the slots.
+ *
+ * PS thread must send SIMD render target write messages with
+ * increasing slot numbers. For example, SIMD16 thread has
+ * Slot[15:0] and if two SIMD8 render target writes are used, the
+ * first SIMD8 render target write must send Slot[7:0] and the
+ * next one must send Slot[15:8]."
+ *
+ * In order to make low group instructions come before high group
+ * instructions (this is required for some render target writes), we
+ * split from the highest group to lowest.
*/
exec_node *const after_inst = inst->next;
- for (unsigned i = 0; i < n; i++) {
+ for (int i = n - 1; i >= 0; i--) {
/* Emit a copy of the original instruction with the lowered width.
* If the EOT flag was set throw it away except for the last
* instruction to avoid killing the thread prematurely.
*/
fs_inst split_inst = *inst;
split_inst.exec_size = lower_width;
- split_inst.eot = inst->eot && i == 0;
+ split_inst.eot = inst->eot && i == n - 1;
/* Select the correct channel enables for the i-th group, then
* transform the sources and destination and emit the lowered
fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
if (!inst->predicate &&
(devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
+ inst->opcode != BRW_OPCODE_CSEL &&
inst->opcode != BRW_OPCODE_IF &&
inst->opcode != BRW_OPCODE_WHILE))) {
fprintf(file, ".f%d.%d", inst->flag_subreg / 2,
*/
for (int i = 0; i < BRW_BARYCENTRIC_MODE_COUNT; ++i) {
if (prog_data->barycentric_interp_modes & (1 << i)) {
- payload.barycentric_coord_reg[i] = payload.num_regs;
+ payload.barycentric_coord_reg[i][0] = payload.num_regs;
payload.num_regs += 2;
if (dispatch_width == 16) {
payload.num_regs += 2;
prog_data->uses_src_depth =
(nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_depth) {
- payload.source_depth_reg = payload.num_regs;
+ payload.source_depth_reg[0] = payload.num_regs;
payload.num_regs++;
if (dispatch_width == 16) {
/* R28: interpolated depth if not SIMD8. */
prog_data->uses_src_w =
(nir->info.inputs_read & (1 << VARYING_SLOT_POS)) != 0;
if (prog_data->uses_src_w) {
- payload.source_w_reg = payload.num_regs;
+ payload.source_w_reg[0] = payload.num_regs;
payload.num_regs++;
if (dispatch_width == 16) {
/* R30: interpolated W if not SIMD8. */
* persample dispatch, we hard-code it to 0.5.
*/
prog_data->uses_pos_offset = true;
- payload.sample_pos_reg = payload.num_regs;
+ payload.sample_pos_reg[0] = payload.num_regs;
payload.num_regs++;
}
(nir->info.system_values_read & SYSTEM_BIT_SAMPLE_MASK_IN) != 0;
if (prog_data->uses_sample_mask) {
assert(devinfo->gen >= 7);
- payload.sample_mask_in_reg = payload.num_regs;
+ payload.sample_mask_in_reg[0] = payload.num_regs;
payload.num_regs++;
if (dispatch_width == 16) {
/* R33: input coverage mask if not SIMD8. */
OPT(compact_virtual_grfs);
} while (progress);
+ /* Do this after cmod propagation has had every possible opportunity to
+ * propagate results into SEL instructions.
+ */
+ if (OPT(opt_peephole_csel))
+ OPT(dead_code_eliminate);
+
progress = false;
pass_num = 0;
const nir_shader *src_shader,
struct gl_program *prog,
int shader_time_index8, int shader_time_index16,
- bool allow_spilling,
+ int shader_time_index32, bool allow_spilling,
bool use_rep_send, struct brw_vue_map *vue_map,
char **error_str)
{
brw_compute_barycentric_interp_modes(compiler->devinfo, shader);
cfg_t *simd8_cfg = NULL, *simd16_cfg = NULL;
- uint8_t simd8_grf_start = 0, simd16_grf_start = 0;
- unsigned simd8_grf_used = 0, simd16_grf_used = 0;
fs_visitor v8(compiler, log_data, mem_ctx, key,
&prog_data->base, prog, shader, 8,
return NULL;
} else if (likely(!(INTEL_DEBUG & DEBUG_NO8))) {
simd8_cfg = v8.cfg;
- simd8_grf_start = v8.payload.num_regs;
- simd8_grf_used = v8.grf_used;
+ prog_data->base.dispatch_grf_start_reg = v8.payload.num_regs;
+ prog_data->reg_blocks_8 = brw_register_blocks(v8.grf_used);
}
if (v8.max_dispatch_width >= 16 &&
v16.fail_msg);
} else {
simd16_cfg = v16.cfg;
- simd16_grf_start = v16.payload.num_regs;
- simd16_grf_used = v16.grf_used;
+ prog_data->dispatch_grf_start_reg_16 = v16.payload.num_regs;
+ prog_data->reg_blocks_16 = brw_register_blocks(v16.grf_used);
}
}
if (compiler->devinfo->gen < 5 && simd16_cfg)
simd8_cfg = NULL;
+ if (compiler->devinfo->gen <= 5 && !simd8_cfg) {
+ /* Iron lake and earlier only have one Dispatch GRF start field. Make
+ * the data available in the base prog data struct for convenience.
+ */
+ if (simd16_cfg) {
+ prog_data->base.dispatch_grf_start_reg =
+ prog_data->dispatch_grf_start_reg_16;
+ }
+ }
+
if (prog_data->persample_dispatch) {
/* Starting with SandyBridge (where we first get MSAA), the different
* pixel dispatch combinations are grouped into classifications A
*/
brw_compute_flat_inputs(prog_data, shader);
- fs_generator g(compiler, log_data, mem_ctx, (void *) key, &prog_data->base,
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
v8.promoted_constants, v8.runtime_check_aads_emit,
MESA_SHADER_FRAGMENT);
if (simd8_cfg) {
prog_data->dispatch_8 = true;
g.generate_code(simd8_cfg, 8);
- prog_data->base.dispatch_grf_start_reg = simd8_grf_start;
- prog_data->reg_blocks_0 = brw_register_blocks(simd8_grf_used);
+ }
- if (simd16_cfg) {
- prog_data->dispatch_16 = true;
- prog_data->prog_offset_2 = g.generate_code(simd16_cfg, 16);
- prog_data->dispatch_grf_start_reg_2 = simd16_grf_start;
- prog_data->reg_blocks_2 = brw_register_blocks(simd16_grf_used);
- }
- } else if (simd16_cfg) {
+ if (simd16_cfg) {
prog_data->dispatch_16 = true;
- g.generate_code(simd16_cfg, 16);
- prog_data->base.dispatch_grf_start_reg = simd16_grf_start;
- prog_data->reg_blocks_0 = brw_register_blocks(simd16_grf_used);
+ prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
}
- return g.get_assembly(&prog_data->base.program_size);
+ return g.get_assembly();
}
fs_reg *
compile_cs_to_nir(const struct brw_compiler *compiler,
void *mem_ctx,
const struct brw_cs_prog_key *key,
- struct brw_cs_prog_data *prog_data,
const nir_shader *src_shader,
unsigned dispatch_width)
{
fs_visitor *v8 = NULL, *v16 = NULL, *v32 = NULL;
cfg_t *cfg = NULL;
const char *fail_msg = NULL;
- unsigned promoted_constants;
+ unsigned promoted_constants = 0;
/* Now the main event: Visit the shader IR and generate our CS IR for it.
*/
if (min_dispatch_width <= 8) {
nir_shader *nir8 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 8);
+ src_shader, 8);
v8 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir8, 8, shader_time_index);
!fail_msg && min_dispatch_width <= 16) {
/* Try a SIMD16 compile */
nir_shader *nir16 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 16);
+ src_shader, 16);
v16 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir16, 16, shader_time_index);
if (!fail_msg && (min_dispatch_width > 16 || (INTEL_DEBUG & DEBUG_DO32))) {
/* Try a SIMD32 compile */
nir_shader *nir32 = compile_cs_to_nir(compiler, mem_ctx, key,
- prog_data, src_shader, 32);
+ src_shader, 32);
v32 = new fs_visitor(compiler, log_data, mem_ctx, key, &prog_data->base,
NULL, /* Never used in core profile */
nir32, 32, shader_time_index);
if (error_str)
*error_str = ralloc_strdup(mem_ctx, fail_msg);
} else {
- fs_generator g(compiler, log_data, mem_ctx, (void*) key, &prog_data->base,
+ fs_generator g(compiler, log_data, mem_ctx, &prog_data->base,
promoted_constants, false, MESA_SHADER_COMPUTE);
if (INTEL_DEBUG & DEBUG_CS) {
char *name = ralloc_asprintf(mem_ctx, "%s compute shader %s",
g.generate_code(cfg, prog_data->simd_size);
- ret = g.get_assembly(&prog_data->base.program_size);
+ ret = g.get_assembly();
}
delete v8;