* IN THE SOFTWARE.
*/
-extern "C" {
-#include "main/macros.h"
-#include "program/register_allocate.h"
-} /* extern "C" */
-
+#include "util/register_allocate.h"
#include "brw_vec4.h"
-#include "brw_vs.h"
+#include "brw_cfg.h"
using namespace brw;
namespace brw {
static void
-assign(unsigned int *reg_hw_locations, reg *reg)
+assign(unsigned int *reg_hw_locations, backend_reg *reg)
{
- if (reg->file == GRF) {
- reg->reg = reg_hw_locations[reg->reg];
+ if (reg->file == VGRF) {
+ reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
+ reg->offset %= REG_SIZE;
}
}
bool
vec4_visitor::reg_allocate_trivial()
{
- unsigned int hw_reg_mapping[this->virtual_grf_count];
- bool virtual_grf_used[this->virtual_grf_count];
- int i;
+ unsigned int hw_reg_mapping[this->alloc.count];
+ bool virtual_grf_used[this->alloc.count];
int next;
/* Calculate which virtual GRFs are actually in use after whatever
* optimization passes have occurred.
*/
- for (int i = 0; i < this->virtual_grf_count; i++) {
+ for (unsigned i = 0; i < this->alloc.count; i++) {
virtual_grf_used[i] = false;
}
- foreach_iter(exec_list_iterator, iter, this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)iter.get();
-
- if (inst->dst.file == GRF)
- virtual_grf_used[inst->dst.reg] = true;
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
+ if (inst->dst.file == VGRF)
+ virtual_grf_used[inst->dst.nr] = true;
- for (int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF)
- virtual_grf_used[inst->src[i].reg] = true;
+ for (unsigned i = 0; i < 3; i++) {
+ if (inst->src[i].file == VGRF)
+ virtual_grf_used[inst->src[i].nr] = true;
}
}
hw_reg_mapping[0] = this->first_non_payload_grf;
- next = hw_reg_mapping[0] + this->virtual_grf_sizes[0];
- for (i = 1; i < this->virtual_grf_count; i++) {
+ next = hw_reg_mapping[0] + this->alloc.sizes[0];
+ for (unsigned i = 1; i < this->alloc.count; i++) {
if (virtual_grf_used[i]) {
hw_reg_mapping[i] = next;
- next += this->virtual_grf_sizes[i];
+ next += this->alloc.sizes[i];
}
}
prog_data->total_grf = next;
- foreach_iter(exec_list_iterator, iter, this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)iter.get();
-
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
assign(hw_reg_mapping, &inst->dst);
assign(hw_reg_mapping, &inst->src[0]);
assign(hw_reg_mapping, &inst->src[1]);
return true;
}
-static void
-brw_alloc_reg_set_for_classes(struct brw_context *brw,
- int *class_sizes,
- int class_count,
- int base_reg_count)
+extern "C" void
+brw_vec4_alloc_reg_set(struct brw_compiler *compiler)
{
+ int base_reg_count =
+ compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
+
+ /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
+ * SEND-from-GRF sources cannot be split, so we also need classes for each
+ * potential message length.
+ */
+ const int class_count = MAX_VGRF_SIZE;
+ int class_sizes[MAX_VGRF_SIZE];
+
+ for (int i = 0; i < class_count; i++)
+ class_sizes[i] = i + 1;
+
/* Compute the total number of registers across all classes. */
int ra_reg_count = 0;
for (int i = 0; i < class_count; i++) {
ra_reg_count += base_reg_count - (class_sizes[i] - 1);
}
- ralloc_free(brw->vs.ra_reg_to_grf);
- brw->vs.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count);
- ralloc_free(brw->vs.regs);
- brw->vs.regs = ra_alloc_reg_set(brw, ra_reg_count);
- if (brw->gen >= 6)
- ra_set_allocate_round_robin(brw->vs.regs);
- ralloc_free(brw->vs.classes);
- brw->vs.classes = ralloc_array(brw, int, class_count + 1);
+ ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf);
+ compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
+ ralloc_free(compiler->vec4_reg_set.regs);
+ compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count, false);
+ if (compiler->devinfo->gen >= 6)
+ ra_set_allocate_round_robin(compiler->vec4_reg_set.regs);
+ ralloc_free(compiler->vec4_reg_set.classes);
+ compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count);
/* Now, add the registers to their classes, and add the conflicts
* between them and the base GRF registers (and also each other).
*/
int reg = 0;
+ unsigned *q_values[MAX_VGRF_SIZE];
for (int i = 0; i < class_count; i++) {
int class_reg_count = base_reg_count - (class_sizes[i] - 1);
- brw->vs.classes[i] = ra_alloc_reg_class(brw->vs.regs);
+ compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs);
+
+ q_values[i] = new unsigned[MAX_VGRF_SIZE];
for (int j = 0; j < class_reg_count; j++) {
- ra_class_add_reg(brw->vs.regs, brw->vs.classes[i], reg);
+ ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg);
- brw->vs.ra_reg_to_grf[reg] = j;
+ compiler->vec4_reg_set.ra_reg_to_grf[reg] = j;
for (int base_reg = j;
base_reg < j + class_sizes[i];
base_reg++) {
- ra_add_transitive_reg_conflict(brw->vs.regs, base_reg, reg);
+ ra_add_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg);
}
reg++;
}
+
+ for (int j = 0; j < class_count; j++) {
+ /* Calculate the q values manually because the algorithm used by
+ * ra_set_finalize() to do it has higher complexity affecting the
+ * start-up time of some applications. q(i, j) is just the maximum
+ * number of registers from class i a register from class j can
+ * conflict with.
+ */
+ q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
+ }
}
assert(reg == ra_reg_count);
- ra_set_finalize(brw->vs.regs, NULL);
+ for (int reg = 0; reg < base_reg_count; reg++)
+ ra_make_reg_conflicts_transitive(compiler->vec4_reg_set.regs, reg);
+
+ ra_set_finalize(compiler->vec4_reg_set.regs, q_values);
+
+ for (int i = 0; i < MAX_VGRF_SIZE; i++)
+ delete[] q_values[i];
+}
+
+void
+vec4_visitor::setup_payload_interference(struct ra_graph *g,
+ int first_payload_node,
+ int reg_node_count)
+{
+ int payload_node_count = this->first_non_payload_grf;
+
+ for (int i = 0; i < payload_node_count; i++) {
+ /* Mark each payload reg node as being allocated to its physical register.
+ *
+ * The alternative would be to have per-physical register classes, which
+ * would just be silly.
+ */
+ ra_set_node_reg(g, first_payload_node + i, i);
+
+ /* For now, just mark each payload node as interfering with every other
+ * node to be allocated.
+ */
+ for (int j = 0; j < reg_node_count; j++) {
+ ra_add_node_interference(g, first_payload_node + i, j);
+ }
+ }
}
bool
vec4_visitor::reg_allocate()
{
- unsigned int hw_reg_mapping[virtual_grf_count];
- int first_assigned_grf = this->first_non_payload_grf;
- int base_reg_count = max_grf - first_assigned_grf;
- int class_sizes[base_reg_count];
- int class_count = 0;
+ unsigned int hw_reg_mapping[alloc.count];
+ int payload_reg_count = this->first_non_payload_grf;
/* Using the trivial allocator can be useful in debugging undefined
* register access as a result of broken optimization passes.
calculate_live_intervals();
- /* Set up the register classes.
- *
- * The base registers store a vec4. However, we'll need larger
- * storage for arrays, structures, and matrices, which will be sets
- * of contiguous registers.
- */
- class_sizes[class_count++] = 1;
+ int node_count = alloc.count;
+ int first_payload_node = node_count;
+ node_count += payload_reg_count;
+ struct ra_graph *g =
+ ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count);
- for (int r = 0; r < virtual_grf_count; r++) {
- int i;
+ for (unsigned i = 0; i < alloc.count; i++) {
+ int size = this->alloc.sizes[i];
+ assert(size >= 1 && size <= MAX_VGRF_SIZE);
+ ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
- for (i = 0; i < class_count; i++) {
- if (class_sizes[i] == this->virtual_grf_sizes[r])
- break;
- }
- if (i == class_count) {
- if (this->virtual_grf_sizes[r] >= base_reg_count) {
- fail("Object too large to register allocate.\n");
+ for (unsigned j = 0; j < i; j++) {
+ if (virtual_grf_interferes(i, j)) {
+ ra_add_node_interference(g, i, j);
}
-
- class_sizes[class_count++] = this->virtual_grf_sizes[r];
}
}
- brw_alloc_reg_set_for_classes(brw, class_sizes, class_count, base_reg_count);
-
- struct ra_graph *g = ra_alloc_interference_graph(brw->vs.regs,
- virtual_grf_count);
-
- for (int i = 0; i < virtual_grf_count; i++) {
- for (int c = 0; c < class_count; c++) {
- if (class_sizes[c] == this->virtual_grf_sizes[i]) {
- ra_set_node_class(g, i, brw->vs.classes[c]);
- break;
- }
- }
-
- for (int j = 0; j < i; j++) {
- if (virtual_grf_interferes(i, j)) {
- ra_add_node_interference(g, i, j);
- }
+ /* Certain instructions can't safely use the same register for their
+ * sources and destination. Add interference.
+ */
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
+ if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
+ for (unsigned i = 0; i < 3; i++) {
+ if (inst->src[i].file == VGRF) {
+ ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
+ }
+ }
}
}
- if (!ra_allocate_no_spills(g)) {
+ setup_payload_interference(g, first_payload_node, node_count);
+
+ if (!ra_allocate(g)) {
/* Failed to allocate registers. Spill a reg, and the caller will
* loop back into here to try again.
*/
int reg = choose_spill_reg(g);
- if (reg == -1) {
+ if (this->no_spills) {
+ fail("Failure to register allocate. Reduce number of live "
+ "values to avoid this.");
+ } else if (reg == -1) {
fail("no register to spill\n");
} else {
spill_reg(reg);
* regs in the register classes back down to real hardware reg
* numbers.
*/
- prog_data->total_grf = first_assigned_grf;
- for (int i = 0; i < virtual_grf_count; i++) {
+ prog_data->total_grf = payload_reg_count;
+ for (unsigned i = 0; i < alloc.count; i++) {
int reg = ra_get_node_reg(g, i);
- hw_reg_mapping[i] = first_assigned_grf + brw->vs.ra_reg_to_grf[reg];
+ hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg];
prog_data->total_grf = MAX2(prog_data->total_grf,
- hw_reg_mapping[i] + virtual_grf_sizes[i]);
+ hw_reg_mapping[i] + alloc.sizes[i]);
}
- foreach_list(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *)node;
-
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
assign(hw_reg_mapping, &inst->dst);
assign(hw_reg_mapping, &inst->src[0]);
assign(hw_reg_mapping, &inst->src[1]);
return true;
}
+/**
+ * When we decide to spill a register, instead of blindly spilling every use,
+ * save unspills when the spill register is used (read) in consecutive
+ * instructions. This can potentially save a bunch of unspills that would
+ * have very little impact in register allocation anyway.
+ *
+ * Notice that we need to account for this behavior when spilling a register
+ * and when evaluating spilling costs. This function is designed so it can
+ * be called from both places and avoid repeating the logic.
+ *
+ * - When we call this function from spill_reg(), we pass in scratch_reg the
+ * actual unspill/spill register that we want to reuse in the current
+ * instruction.
+ *
+ * - When we call this from evaluate_spill_costs(), we pass the register for
+ * which we are evaluating spilling costs.
+ *
+ * In either case, we check if the previous instructions read scratch_reg until
+ * we find one that writes to it with a compatible mask or does not read/write
+ * scratch_reg at all.
+ */
+static bool
+can_use_scratch_for_source(const vec4_instruction *inst, unsigned i,
+ unsigned scratch_reg)
+{
+ assert(inst->src[i].file == VGRF);
+ bool prev_inst_read_scratch_reg = false;
+
+ /* See if any previous source in the same instructions reads scratch_reg */
+ for (unsigned n = 0; n < i; n++) {
+ if (inst->src[n].file == VGRF && inst->src[n].nr == scratch_reg)
+ prev_inst_read_scratch_reg = true;
+ }
+
+ /* Now check if previous instructions read/write scratch_reg */
+ for (vec4_instruction *prev_inst = (vec4_instruction *) inst->prev;
+ !prev_inst->is_head_sentinel();
+ prev_inst = (vec4_instruction *) prev_inst->prev) {
+
+ /* If the previous instruction writes to scratch_reg then we can reuse
+ * it if the write is not conditional and the channels we write are
+ * compatible with our read mask
+ */
+ if (prev_inst->dst.file == VGRF && prev_inst->dst.nr == scratch_reg) {
+ return (!prev_inst->predicate || prev_inst->opcode == BRW_OPCODE_SEL) &&
+ (brw_mask_for_swizzle(inst->src[i].swizzle) &
+ ~prev_inst->dst.writemask) == 0;
+ }
+
+ /* Skip scratch read/writes so that instructions generated by spilling
+ * other registers (that won't read/write scratch_reg) do not stop us from
+ * reusing scratch_reg for this instruction.
+ */
+ if (prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_WRITE ||
+ prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_READ)
+ continue;
+
+ /* If the previous instruction does not write to scratch_reg, then check
+ * if it reads it
+ */
+ int n;
+ for (n = 0; n < 3; n++) {
+ if (prev_inst->src[n].file == VGRF &&
+ prev_inst->src[n].nr == scratch_reg) {
+ prev_inst_read_scratch_reg = true;
+ break;
+ }
+ }
+ if (n == 3) {
+ /* The previous instruction does not read scratch_reg. At this point,
+ * if no previous instruction has read scratch_reg it means that we
+ * will need to unspill it here and we can't reuse it (so we return
+ * false). Otherwise, if we found at least one consecutive instruction
+ * that read scratch_reg, then we know that we got here from
+ * evaluate_spill_costs (since for the spill_reg path any block of
+ * consecutive instructions using scratch_reg must start with a write
+ * to that register, so we would've exited the loop in the check for
+ * the write that we have at the start of this loop), and in that case
+ * it means that we found the point at which the scratch_reg would be
+ * unspilled. Since we always unspill a full vec4, it means that we
+ * have all the channels available and we can just return true to
+ * signal that we can reuse the register in the current instruction
+ * too.
+ */
+ return prev_inst_read_scratch_reg;
+ }
+ }
+
+ return prev_inst_read_scratch_reg;
+}
+
void
vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill)
{
float loop_scale = 1.0;
- for (int i = 0; i < this->virtual_grf_count; i++) {
+ for (unsigned i = 0; i < this->alloc.count; i++) {
spill_costs[i] = 0.0;
- no_spill[i] = virtual_grf_sizes[i] != 1;
+ no_spill[i] = alloc.sizes[i] != 1 && alloc.sizes[i] != 2;
}
/* Calculate costs for spilling nodes. Call it a cost of 1 per
* spill/unspill we'll have to do, and guess that the insides of
* loops run 10 times.
*/
- foreach_list(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *) node;
-
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
for (unsigned int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF) {
- spill_costs[inst->src[i].reg] += loop_scale;
- if (inst->src[i].reladdr)
- no_spill[inst->src[i].reg] = true;
- }
+ if (inst->src[i].file == VGRF && !no_spill[inst->src[i].nr]) {
+ /* We will only unspill src[i] it it wasn't unspilled for the
+ * previous instruction, in which case we'll just reuse the scratch
+ * reg for this instruction.
+ */
+ if (!can_use_scratch_for_source(inst, i, inst->src[i].nr)) {
+ spill_costs[inst->src[i].nr] += loop_scale;
+ if (inst->src[i].reladdr ||
+ inst->src[i].offset >= REG_SIZE)
+ no_spill[inst->src[i].nr] = true;
+
+ /* We don't support unspills of partial DF reads.
+ *
+ * Our 64-bit unspills are implemented with two 32-bit scratch
+ * messages, each one reading that for both SIMD4x2 threads that
+ * we need to shuffle into correct 64-bit data. Ensure that we
+ * are reading data for both threads.
+ */
+ if (type_sz(inst->src[i].type) == 8 && inst->exec_size != 8)
+ no_spill[inst->src[i].nr] = true;
+ }
+ }
}
- if (inst->dst.file == GRF) {
- spill_costs[inst->dst.reg] += loop_scale;
- if (inst->dst.reladdr)
- no_spill[inst->dst.reg] = true;
+ if (inst->dst.file == VGRF && !no_spill[inst->dst.nr]) {
+ spill_costs[inst->dst.nr] += loop_scale;
+ if (inst->dst.reladdr || inst->dst.offset >= REG_SIZE)
+ no_spill[inst->dst.nr] = true;
+
+ /* We don't support spills of partial DF writes.
+ *
+ * Our 64-bit spills are implemented with two 32-bit scratch messages,
+ * each one writing that for both SIMD4x2 threads. Ensure that we
+ * are writing data for both threads.
+ */
+ if (type_sz(inst->dst.type) == 8 && inst->exec_size != 8)
+ no_spill[inst->dst.nr] = true;
}
switch (inst->opcode) {
case BRW_OPCODE_DO:
- loop_scale *= 10;
- break;
+ loop_scale *= 10;
+ break;
case BRW_OPCODE_WHILE:
- loop_scale /= 10;
- break;
+ loop_scale /= 10;
+ break;
- case VS_OPCODE_SCRATCH_READ:
- case VS_OPCODE_SCRATCH_WRITE:
+ case SHADER_OPCODE_GEN4_SCRATCH_READ:
+ case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
for (int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF)
- no_spill[inst->src[i].reg] = true;
+ if (inst->src[i].file == VGRF)
+ no_spill[inst->src[i].nr] = true;
}
- if (inst->dst.file == GRF)
- no_spill[inst->dst.reg] = true;
- break;
+ if (inst->dst.file == VGRF)
+ no_spill[inst->dst.nr] = true;
+ break;
default:
- break;
+ break;
}
}
}
int
vec4_visitor::choose_spill_reg(struct ra_graph *g)
{
- float spill_costs[this->virtual_grf_count];
- bool no_spill[this->virtual_grf_count];
+ float spill_costs[this->alloc.count];
+ bool no_spill[this->alloc.count];
evaluate_spill_costs(spill_costs, no_spill);
- for (int i = 0; i < this->virtual_grf_count; i++) {
+ for (unsigned i = 0; i < this->alloc.count; i++) {
if (!no_spill[i])
ra_set_node_spill_cost(g, i, spill_costs[i]);
}
void
vec4_visitor::spill_reg(int spill_reg_nr)
{
- assert(virtual_grf_sizes[spill_reg_nr] == 1);
- unsigned int spill_offset = c->last_scratch++;
+ assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
+ unsigned int spill_offset = last_scratch;
+ last_scratch += alloc.sizes[spill_reg_nr];
/* Generate spill/unspill instructions for the objects being spilled. */
- foreach_list(node, &this->instructions) {
- vec4_instruction *inst = (vec4_instruction *) node;
-
+ int scratch_reg = -1;
+ foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
for (unsigned int i = 0; i < 3; i++) {
- if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg_nr) {
- src_reg spill_reg = inst->src[i];
- inst->src[i].reg = virtual_grf_alloc(1);
- dst_reg temp = dst_reg(inst->src[i]);
-
- /* Only read the necessary channels, to avoid overwriting the rest
- * with data that may not have been written to scratch.
- */
- temp.writemask = 0;
- for (int c = 0; c < 4; c++)
- temp.writemask |= (1 << BRW_GET_SWZ(inst->src[i].swizzle, c));
- assert(temp.writemask != 0);
-
- emit_scratch_read(inst, temp, spill_reg, spill_offset);
+ if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
+ if (scratch_reg == -1 ||
+ !can_use_scratch_for_source(inst, i, scratch_reg)) {
+ /* We need to unspill anyway so make sure we read the full vec4
+ * in any case. This way, the cached register can be reused
+ * for consecutive instructions that read different channels of
+ * the same vec4.
+ */
+ scratch_reg = alloc.allocate(alloc.sizes[spill_reg_nr]);
+ src_reg temp = inst->src[i];
+ temp.nr = scratch_reg;
+ temp.offset = 0;
+ temp.swizzle = BRW_SWIZZLE_XYZW;
+ emit_scratch_read(block, inst,
+ dst_reg(temp), inst->src[i], spill_offset);
+ temp.offset = inst->src[i].offset;
+ }
+ assert(scratch_reg != -1);
+ inst->src[i].nr = scratch_reg;
}
}
- if (inst->dst.file == GRF && inst->dst.reg == spill_reg_nr) {
- emit_scratch_write(inst, spill_offset);
+ if (inst->dst.file == VGRF && inst->dst.nr == spill_reg_nr) {
+ emit_scratch_write(block, inst, spill_offset);
+ scratch_reg = inst->dst.nr;
}
}
- this->live_intervals_valid = false;
+ invalidate_live_intervals();
}
} /* namespace brw */