*
*/
+#include "brw_eu.h"
#include "brw_fs.h"
#include "brw_cfg.h"
-#include "glsl/nir/glsl_types.h"
-#include "glsl/ir_optimization.h"
+#include "util/register_allocate.h"
using namespace brw;
assign_reg(unsigned *reg_hw_locations, fs_reg *reg)
{
if (reg->file == VGRF) {
- reg->nr = reg_hw_locations[reg->nr] + reg->reg_offset;
- reg->reg_offset = 0;
+ reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
+ reg->offset %= REG_SIZE;
}
}
static void
brw_alloc_reg_set(struct brw_compiler *compiler, int dispatch_width)
{
- const struct brw_device_info *devinfo = compiler->devinfo;
+ const struct gen_device_info *devinfo = compiler->devinfo;
int base_reg_count = BRW_MAX_GRF;
- int index = (dispatch_width / 8) - 1;
+ const int index = _mesa_logbase2(dispatch_width / 8);
if (dispatch_width > 8 && devinfo->gen >= 7) {
/* For IVB+, we don't need the PLN hacks or the even-reg alignment in
* Additionally, on gen5 we need aligned pairs of registers for the PLN
* instruction, and on gen4 we need 8 contiguous regs for workaround simd16
* texturing.
- *
- * So we have a need for classes for 1, 2, 4, and 8 registers currently,
- * and we add in '3' to make indexing the array easier for the common case
- * (since we'll probably want it for texturing later).
- *
- * And, on gen7 and newer, we do texturing SEND messages from GRFs, which
- * means that we may need any size up to the sampler message size limit (11
- * regs).
*/
- int class_count;
+ const int class_count = MAX_VGRF_SIZE;
int class_sizes[MAX_VGRF_SIZE];
-
- if (devinfo->gen >= 7) {
- for (class_count = 0; class_count < MAX_VGRF_SIZE; class_count++)
- class_sizes[class_count] = class_count + 1;
- } else {
- for (class_count = 0; class_count < 4; class_count++)
- class_sizes[class_count] = class_count + 1;
- class_sizes[class_count++] = 8;
- }
+ for (unsigned i = 0; i < MAX_VGRF_SIZE; i++)
+ class_sizes[i] = i + 1;
memset(compiler->fs_reg_sets[index].class_to_ra_reg_range, 0,
sizeof(compiler->fs_reg_sets[index].class_to_ra_reg_range));
/* Compute the total number of registers across all classes. */
int ra_reg_count = 0;
for (int i = 0; i < class_count; i++) {
- if (devinfo->gen <= 5 && dispatch_width == 16) {
+ if (devinfo->gen <= 5 && dispatch_width >= 16) {
/* From the G45 PRM:
*
* In order to reduce the hardware complexity, the following
int pairs_reg_count = 0;
for (int i = 0; i < class_count; i++) {
int class_reg_count;
- if (devinfo->gen <= 5 && dispatch_width == 16) {
+ if (devinfo->gen <= 5 && dispatch_width >= 16) {
class_reg_count = (base_reg_count - (class_sizes[i] - 1)) / 2;
/* See comment below. The only difference here is that we are
pairs_reg_count = class_reg_count;
}
- if (devinfo->gen <= 5 && dispatch_width == 16) {
+ if (devinfo->gen <= 5 && dispatch_width >= 16) {
for (int j = 0; j < class_reg_count; j++) {
ra_class_add_reg(regs, classes[i], reg);
{
brw_alloc_reg_set(compiler, 8);
brw_alloc_reg_set(compiler, 16);
+ brw_alloc_reg_set(compiler, 32);
}
static int
if (node_nr >= payload_node_count)
continue;
- for (int j = 0; j < inst->regs_read(i); j++) {
+ for (unsigned j = 0; j < regs_read(inst, i); j++) {
payload_last_use_ip[node_nr + j] = use_ip;
- assert(node_nr + j < payload_node_count);
+ assert(node_nr + j < unsigned(payload_node_count));
}
}
}
* The alternative would be to have per-physical-register classes, which
* would just be silly.
*/
- if (devinfo->gen <= 5 && dispatch_width == 16) {
+ if (devinfo->gen <= 5 && dispatch_width >= 16) {
/* We have to divide by 2 here because we only have even numbered
* registers. Some of the payload registers will be odd, but
* that's ok because their physical register numbers have already
}
bool
-fs_visitor::assign_regs(bool allow_spilling)
+fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
{
/* Most of this allocation was written for a reg_width of 1
* (dispatch_width == 8). In extending to SIMD16, the code was
int reg_width = dispatch_width / 8;
unsigned hw_reg_mapping[this->alloc.count];
int payload_node_count = ALIGN(this->first_non_payload_grf, reg_width);
- int rsi = reg_width - 1; /* Which compiler->fs_reg_sets[] to use */
+ int rsi = _mesa_logbase2(reg_width); /* Which compiler->fs_reg_sets[] to use */
calculate_live_intervals();
int node_count = this->alloc.count;
* second operand of a PLN instruction needs to be an
* even-numbered register, so we have a special register class
* wm_aligned_pairs_class to handle this case. pre-GEN6 always
- * uses this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] as the
+ * uses this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] as the
* second operand of a PLN instruction (since it doesn't support
* any other interpolation modes). So all we need to do is find
* that register and set it to the appropriate class.
*/
if (compiler->fs_reg_sets[rsi].aligned_pairs_class >= 0 &&
- this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].file == VGRF &&
- this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].nr == i) {
+ this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].file == VGRF &&
+ this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].nr == i) {
c = compiler->fs_reg_sets[rsi].aligned_pairs_class;
}
}
}
+ /* Certain instructions can't safely use the same register for their
+ * sources and destination. Add interference.
+ */
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
+ if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
+ for (unsigned i = 0; i < 3; i++) {
+ if (inst->src[i].file == VGRF) {
+ ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
+ }
+ }
+ }
+ }
+
setup_payload_interference(g, payload_node_count, first_payload_node);
if (devinfo->gen >= 7) {
int first_used_mrf = BRW_MAX_MRF(devinfo->gen);
}
/* Debug of register spilling: Go spill everything. */
- if (unlikely(INTEL_DEBUG & DEBUG_SPILL_FS)) {
+ if (unlikely(spill_all)) {
int reg = choose_spill_reg(g);
if (reg != -1) {
return true;
}
-void
-fs_visitor::emit_unspill(bblock_t *block, fs_inst *inst, fs_reg dst,
- uint32_t spill_offset, int count)
+namespace {
+ /**
+ * Maximum spill block size we expect to encounter in 32B units.
+ *
+ * This is somewhat arbitrary and doesn't necessarily limit the maximum
+ * variable size that can be spilled -- A higher value will allow a
+ * variable of a given size to be spilled more efficiently with a smaller
+ * number of scratch messages, but will increase the likelihood of a
+ * collision between the MRFs reserved for spilling and other MRFs used by
+ * the program (and possibly increase GRF register pressure on platforms
+ * without hardware MRFs), what could cause register allocation to fail.
+ *
+ * For the moment reserve just enough space so a register of 32 bit
+ * component type and natural region width can be spilled without splitting
+ * into multiple (force_writemask_all) scratch messages.
+ */
+ unsigned
+ spill_max_size(const backend_shader *s)
+ {
+ /* FINISHME - On Gen7+ it should be possible to avoid this limit
+ * altogether by spilling directly from the temporary GRF
+ * allocated to hold the result of the instruction (and the
+ * scratch write header).
+ */
+ /* FINISHME - The shader's dispatch width probably belongs in
+ * backend_shader (or some nonexistent fs_shader class?)
+ * rather than in the visitor class.
+ */
+ return static_cast<const fs_visitor *>(s)->dispatch_width / 8;
+ }
+
+ /**
+ * First MRF register available for spilling.
+ */
+ unsigned
+ spill_base_mrf(const backend_shader *s)
+ {
+ return BRW_MAX_MRF(s->devinfo->gen) - spill_max_size(s) - 1;
+ }
+}
+
+static void
+emit_unspill(const fs_builder &bld, fs_reg dst,
+ uint32_t spill_offset, unsigned count)
{
- int reg_size = 1;
- if (dispatch_width == 16 && count % 2 == 0)
- reg_size = 2;
-
- const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
- .group(reg_size * 8, 0)
- .at(block, inst);
-
- for (int i = 0; i < count / reg_size; i++) {
- /* The gen7 descriptor-based offset is 12 bits of HWORD units. */
- bool gen7_read = devinfo->gen >= 7 && spill_offset < (1 << 12) * REG_SIZE;
- fs_inst *unspill_inst = ibld.emit(gen7_read ?
- SHADER_OPCODE_GEN7_SCRATCH_READ :
- SHADER_OPCODE_GEN4_SCRATCH_READ,
- dst);
+ const gen_device_info *devinfo = bld.shader->devinfo;
+ const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
+ REG_SIZE;
+ assert(count % reg_size == 0);
+
+ for (unsigned i = 0; i < count / reg_size; i++) {
+ /* The Gen7 descriptor-based offset is 12 bits of HWORD units. Because
+ * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
+ * it would cause the DC to do an IA-coherent read, what largely
+ * outweighs the slight advantage from not having to provide the address
+ * as part of the message header, so we're better off using plain old
+ * oword block reads.
+ */
+ bool gen7_read = (devinfo->gen >= 7 && devinfo->gen < 9 &&
+ spill_offset < (1 << 12) * REG_SIZE);
+ fs_inst *unspill_inst = bld.emit(gen7_read ?
+ SHADER_OPCODE_GEN7_SCRATCH_READ :
+ SHADER_OPCODE_GEN4_SCRATCH_READ,
+ dst);
unspill_inst->offset = spill_offset;
- unspill_inst->regs_written = reg_size;
if (!gen7_read) {
- unspill_inst->base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
+ unspill_inst->base_mrf = spill_base_mrf(bld.shader);
unspill_inst->mlen = 1; /* header contains offset */
}
- dst.reg_offset += reg_size;
+ dst.offset += reg_size * REG_SIZE;
spill_offset += reg_size * REG_SIZE;
}
}
-void
-fs_visitor::emit_spill(bblock_t *block, fs_inst *inst, fs_reg src,
- uint32_t spill_offset, int count)
+static void
+emit_spill(const fs_builder &bld, fs_reg src,
+ uint32_t spill_offset, unsigned count)
{
- int reg_size = 1;
- int spill_base_mrf = FIRST_SPILL_MRF(devinfo->gen) + 1;
- if (dispatch_width == 16 && count % 2 == 0) {
- spill_base_mrf = FIRST_SPILL_MRF(devinfo->gen);
- reg_size = 2;
- }
-
- const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
- .group(reg_size * 8, 0)
- .at(block, inst->next);
+ const unsigned reg_size = src.component_size(bld.dispatch_width()) /
+ REG_SIZE;
+ assert(count % reg_size == 0);
- for (int i = 0; i < count / reg_size; i++) {
+ for (unsigned i = 0; i < count / reg_size; i++) {
fs_inst *spill_inst =
- ibld.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE, ibld.null_reg_f(), src);
- src.reg_offset += reg_size;
+ bld.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE, bld.null_reg_f(), src);
+ src.offset += reg_size * REG_SIZE;
spill_inst->offset = spill_offset + i * reg_size * REG_SIZE;
spill_inst->mlen = 1 + reg_size; /* header, value */
- spill_inst->base_mrf = spill_base_mrf;
+ spill_inst->base_mrf = spill_base_mrf(bld.shader);
}
}
*/
foreach_block_and_inst(block, fs_inst, inst, cfg) {
for (unsigned int i = 0; i < inst->sources; i++) {
- if (inst->src[i].file == VGRF) {
+ if (inst->src[i].file == VGRF)
spill_costs[inst->src[i].nr] += loop_scale;
-
- /* Register spilling logic assumes full-width registers; smeared
- * registers have a width of 1 so if we try to spill them we'll
- * generate invalid assembly. This shouldn't be a problem because
- * smeared registers are only used as short-term temporaries when
- * loading pull constants, so spilling them is unlikely to reduce
- * register pressure anyhow.
- */
- if (!inst->src[i].is_contiguous()) {
- no_spill[inst->src[i].nr] = true;
- }
- }
}
- if (inst->dst.file == VGRF) {
- spill_costs[inst->dst.nr] += inst->regs_written * loop_scale;
-
- if (!inst->dst.is_contiguous()) {
- no_spill[inst->dst.nr] = true;
- }
- }
+ if (inst->dst.file == VGRF)
+ spill_costs[inst->dst.nr] += DIV_ROUND_UP(inst->size_written, REG_SIZE)
+ * loop_scale;
switch (inst->opcode) {
int size = alloc.sizes[spill_reg];
unsigned int spill_offset = last_scratch;
assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
- int spill_base_mrf = dispatch_width > 8 ? FIRST_SPILL_MRF(devinfo->gen) :
- FIRST_SPILL_MRF(devinfo->gen) + 1;
/* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done
* using up to 11 MRFs starting from either m1 or m2, and fb writes can use
bool mrf_used[BRW_MAX_MRF(devinfo->gen)];
get_used_mrfs(this, mrf_used);
- for (int i = spill_base_mrf; i < BRW_MAX_MRF(devinfo->gen); i++) {
+ for (int i = spill_base_mrf(this); i < BRW_MAX_MRF(devinfo->gen); i++) {
if (mrf_used[i]) {
fail("Register spilling not supported with m%d used", i);
return;
* could just spill/unspill the GRF being accessed.
*/
foreach_block_and_inst (block, fs_inst, inst, cfg) {
+ const fs_builder ibld = fs_builder(this, block, inst);
+
for (unsigned int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == VGRF &&
inst->src[i].nr == spill_reg) {
- int regs_read = inst->regs_read(i);
- int subset_spill_offset = (spill_offset +
- REG_SIZE * inst->src[i].reg_offset);
- fs_reg unspill_dst(VGRF, alloc.allocate(regs_read));
+ int count = regs_read(inst, i);
+ int subset_spill_offset = spill_offset +
+ ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
+ fs_reg unspill_dst(VGRF, alloc.allocate(count));
inst->src[i].nr = unspill_dst.nr;
- inst->src[i].reg_offset = 0;
+ inst->src[i].offset %= REG_SIZE;
- emit_unspill(block, inst, unspill_dst, subset_spill_offset,
- regs_read);
+ /* We read the largest power-of-two divisor of the register count
+ * (because only POT scratch read blocks are allowed by the
+ * hardware) up to the maximum supported block size.
+ */
+ const unsigned width =
+ MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
+
+ /* Set exec_all() on unspill messages under the (rather
+ * pessimistic) assumption that there is no one-to-one
+ * correspondence between channels of the spilled variable in
+ * scratch space and the scratch read message, which operates on
+ * 32 bit channels. It shouldn't hurt in any case because the
+ * unspill destination is a block-local temporary.
+ */
+ emit_unspill(ibld.exec_all().group(width, 0),
+ unspill_dst, subset_spill_offset, count);
}
}
if (inst->dst.file == VGRF &&
inst->dst.nr == spill_reg) {
- int subset_spill_offset = (spill_offset +
- REG_SIZE * inst->dst.reg_offset);
- fs_reg spill_src(VGRF, alloc.allocate(inst->regs_written));
+ int subset_spill_offset = spill_offset +
+ ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
+ fs_reg spill_src(VGRF, alloc.allocate(regs_written(inst)));
inst->dst.nr = spill_src.nr;
- inst->dst.reg_offset = 0;
+ inst->dst.offset %= REG_SIZE;
/* If we're immediately spilling the register, we should not use
* destination dependency hints. Doing so will cause the GPU do
inst->no_dd_clear = false;
inst->no_dd_check = false;
+ /* Calculate the execution width of the scratch messages (which work
+ * in terms of 32 bit components so we have a fixed number of eight
+ * channels per spilled register). We attempt to write one
+ * exec_size-wide component of the variable at a time without
+ * exceeding the maximum number of (fake) MRF registers reserved for
+ * spills.
+ */
+ const unsigned width = 8 * MIN2(
+ DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE),
+ spill_max_size(this));
+
+ /* Spills should only write data initialized by the instruction for
+ * whichever channels are enabled in the excution mask. If that's
+ * not possible we'll have to emit a matching unspill before the
+ * instruction and set force_writemask_all on the spill.
+ */
+ const bool per_channel =
+ inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
+ inst->exec_size == width;
+
+ /* Builder used to emit the scratch messages. */
+ const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
+
/* If our write is going to affect just part of the
- * inst->regs_written(), then we need to unspill the destination
- * since we write back out all of the regs_written().
+ * regs_written(inst), then we need to unspill the destination since
+ * we write back out all of the regs_written(). If the original
+ * instruction had force_writemask_all set and is not a partial
+ * write, there should be no need for the unspill since the
+ * instruction will be overwriting the whole destination in any case.
*/
- if (inst->is_partial_write())
- emit_unspill(block, inst, spill_src, subset_spill_offset,
- inst->regs_written);
+ if (inst->is_partial_write() ||
+ (!inst->force_writemask_all && !per_channel))
+ emit_unspill(ubld, spill_src, subset_spill_offset,
+ regs_written(inst));
- emit_spill(block, inst, spill_src, subset_spill_offset,
- inst->regs_written);
+ emit_spill(ubld.at(block, inst->next), spill_src,
+ subset_spill_offset, regs_written(inst));
}
}