X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_vec4_reg_allocate.cpp;h=a49eca56118304b23e7d80f2c8c564bfc09a7080;hb=094877f9d23169b1d209fb0c97f9b6d4679842d9;hp=57b05192b357e3a2d3538c9a61141f595a0d9d52;hpb=9195191e50429d9cf25e6498f9fb108758ac2be6;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp b/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp index 57b05192b35..a49eca56118 100644 --- a/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp +++ b/src/mesa/drivers/dri/i965/brw_vec4_reg_allocate.cpp @@ -21,66 +21,60 @@ * IN THE SOFTWARE. */ -extern "C" { #include "main/macros.h" -#include "program/register_allocate.h" -} /* extern "C" */ - +#include "util/register_allocate.h" #include "brw_vec4.h" -#include "glsl/ir_print_visitor.h" +#include "brw_vs.h" +#include "brw_cfg.h" using namespace brw; namespace brw { static void -assign(int *reg_hw_locations, reg *reg) +assign(unsigned int *reg_hw_locations, backend_reg *reg) { if (reg->file == GRF) { - reg->reg = reg_hw_locations[reg->reg]; + reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset; + reg->reg_offset = 0; } } -void +bool vec4_visitor::reg_allocate_trivial() { - int hw_reg_mapping[this->virtual_grf_count]; - bool virtual_grf_used[this->virtual_grf_count]; - int i; + unsigned int hw_reg_mapping[this->alloc.count]; + bool virtual_grf_used[this->alloc.count]; int next; /* Calculate which virtual GRFs are actually in use after whatever * optimization passes have occurred. */ - for (int i = 0; i < this->virtual_grf_count; i++) { + for (unsigned i = 0; i < this->alloc.count; i++) { virtual_grf_used[i] = false; } - foreach_iter(exec_list_iterator, iter, this->instructions) { - vec4_instruction *inst = (vec4_instruction *)iter.get(); - + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { if (inst->dst.file == GRF) virtual_grf_used[inst->dst.reg] = true; - for (int i = 0; i < 3; i++) { + for (unsigned i = 0; i < 3; i++) { if (inst->src[i].file == GRF) virtual_grf_used[inst->src[i].reg] = true; } } hw_reg_mapping[0] = this->first_non_payload_grf; - next = hw_reg_mapping[0] + this->virtual_grf_sizes[0]; - for (i = 1; i < this->virtual_grf_count; i++) { + next = hw_reg_mapping[0] + this->alloc.sizes[0]; + for (unsigned i = 1; i < this->alloc.count; i++) { if (virtual_grf_used[i]) { hw_reg_mapping[i] = next; - next += this->virtual_grf_sizes[i]; + next += this->alloc.sizes[i]; } } prog_data->total_grf = next; - foreach_iter(exec_list_iterator, iter, this->instructions) { - vec4_instruction *inst = (vec4_instruction *)iter.get(); - + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { assign(hw_reg_mapping, &inst->dst); assign(hw_reg_mapping, &inst->src[0]); assign(hw_reg_mapping, &inst->src[1]); @@ -90,140 +84,178 @@ vec4_visitor::reg_allocate_trivial() if (prog_data->total_grf > max_grf) { fail("Ran out of regs on trivial allocator (%d/%d)\n", prog_data->total_grf, max_grf); + return false; } + + return true; } -static void -brw_alloc_reg_set_for_classes(struct brw_context *brw, - int *class_sizes, - int class_count, - int base_reg_count) +extern "C" void +brw_vec4_alloc_reg_set(struct brw_compiler *compiler) { + int base_reg_count = + compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF; + + /* After running split_virtual_grfs(), almost all VGRFs will be of size 1. + * SEND-from-GRF sources cannot be split, so we also need classes for each + * potential message length. + */ + const int class_count = MAX_VGRF_SIZE; + int class_sizes[MAX_VGRF_SIZE]; + + for (int i = 0; i < class_count; i++) + class_sizes[i] = i + 1; + /* Compute the total number of registers across all classes. */ int ra_reg_count = 0; for (int i = 0; i < class_count; i++) { ra_reg_count += base_reg_count - (class_sizes[i] - 1); } - ralloc_free(brw->vs.ra_reg_to_grf); - brw->vs.ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); - ralloc_free(brw->vs.regs); - brw->vs.regs = ra_alloc_reg_set(brw, ra_reg_count); - ralloc_free(brw->vs.classes); - brw->vs.classes = ralloc_array(brw, int, class_count + 1); + ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf); + compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count); + ralloc_free(compiler->vec4_reg_set.regs); + compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count, false); + if (compiler->devinfo->gen >= 6) + ra_set_allocate_round_robin(compiler->vec4_reg_set.regs); + ralloc_free(compiler->vec4_reg_set.classes); + compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count); /* Now, add the registers to their classes, and add the conflicts * between them and the base GRF registers (and also each other). */ int reg = 0; + unsigned *q_values[MAX_VGRF_SIZE]; for (int i = 0; i < class_count; i++) { int class_reg_count = base_reg_count - (class_sizes[i] - 1); - brw->vs.classes[i] = ra_alloc_reg_class(brw->vs.regs); + compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs); + + q_values[i] = new unsigned[MAX_VGRF_SIZE]; for (int j = 0; j < class_reg_count; j++) { - ra_class_add_reg(brw->vs.regs, brw->vs.classes[i], reg); + ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg); - brw->vs.ra_reg_to_grf[reg] = j; + compiler->vec4_reg_set.ra_reg_to_grf[reg] = j; for (int base_reg = j; base_reg < j + class_sizes[i]; base_reg++) { - ra_add_transitive_reg_conflict(brw->vs.regs, base_reg, reg); + ra_add_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg); } reg++; } + + for (int j = 0; j < class_count; j++) { + /* Calculate the q values manually because the algorithm used by + * ra_set_finalize() to do it has higher complexity affecting the + * start-up time of some applications. q(i, j) is just the maximum + * number of registers from class i a register from class j can + * conflict with. + */ + q_values[i][j] = class_sizes[i] + class_sizes[j] - 1; + } } assert(reg == ra_reg_count); - ra_set_finalize(brw->vs.regs); + for (int reg = 0; reg < base_reg_count; reg++) + ra_make_reg_conflicts_transitive(compiler->vec4_reg_set.regs, reg); + + ra_set_finalize(compiler->vec4_reg_set.regs, q_values); + + for (int i = 0; i < MAX_VGRF_SIZE; i++) + delete[] q_values[i]; } void +vec4_visitor::setup_payload_interference(struct ra_graph *g, + int first_payload_node, + int reg_node_count) +{ + int payload_node_count = this->first_non_payload_grf; + + for (int i = 0; i < payload_node_count; i++) { + /* Mark each payload reg node as being allocated to its physical register. + * + * The alternative would be to have per-physical register classes, which + * would just be silly. + */ + ra_set_node_reg(g, first_payload_node + i, i); + + /* For now, just mark each payload node as interfering with every other + * node to be allocated. + */ + for (int j = 0; j < reg_node_count; j++) { + ra_add_node_interference(g, first_payload_node + i, j); + } + } +} + +bool vec4_visitor::reg_allocate() { - int hw_reg_mapping[virtual_grf_count]; - int first_assigned_grf = this->first_non_payload_grf; - int base_reg_count = max_grf - first_assigned_grf; - int class_sizes[base_reg_count]; - int class_count = 0; + unsigned int hw_reg_mapping[alloc.count]; + int payload_reg_count = this->first_non_payload_grf; /* Using the trivial allocator can be useful in debugging undefined * register access as a result of broken optimization passes. */ - if (0) { - reg_allocate_trivial(); - return; - } + if (0) + return reg_allocate_trivial(); calculate_live_intervals(); - /* Set up the register classes. - * - * The base registers store a vec4. However, we'll need larger - * storage for arrays, structures, and matrices, which will be sets - * of contiguous registers. - */ - class_sizes[class_count++] = 1; - - for (int r = 0; r < virtual_grf_count; r++) { - int i; - - for (i = 0; i < class_count; i++) { - if (class_sizes[i] == this->virtual_grf_sizes[r]) - break; - } - if (i == class_count) { - if (this->virtual_grf_sizes[r] >= base_reg_count) { - fail("Object too large to register allocate.\n"); - } + int node_count = alloc.count; + int first_payload_node = node_count; + node_count += payload_reg_count; + struct ra_graph *g = + ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count); - class_sizes[class_count++] = this->virtual_grf_sizes[r]; - } - } - - brw_alloc_reg_set_for_classes(brw, class_sizes, class_count, base_reg_count); - - struct ra_graph *g = ra_alloc_interference_graph(brw->vs.regs, - virtual_grf_count); - - for (int i = 0; i < virtual_grf_count; i++) { - for (int c = 0; c < class_count; c++) { - if (class_sizes[c] == this->virtual_grf_sizes[i]) { - ra_set_node_class(g, i, brw->vs.classes[c]); - break; - } - } + for (unsigned i = 0; i < alloc.count; i++) { + int size = this->alloc.sizes[i]; + assert(size >= 1 && size <= MAX_VGRF_SIZE); + ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]); - for (int j = 0; j < i; j++) { + for (unsigned j = 0; j < i; j++) { if (virtual_grf_interferes(i, j)) { ra_add_node_interference(g, i, j); } } } - if (!ra_allocate_no_spills(g)) { + setup_payload_interference(g, first_payload_node, node_count); + + if (!ra_allocate(g)) { + /* Failed to allocate registers. Spill a reg, and the caller will + * loop back into here to try again. + */ + int reg = choose_spill_reg(g); + if (this->no_spills) { + fail("Failure to register allocate. Reduce number of live " + "values to avoid this."); + } else if (reg == -1) { + fail("no register to spill\n"); + } else { + spill_reg(reg); + } ralloc_free(g); - fail("No register spilling support yet\n"); - return; + return false; } /* Get the chosen virtual registers for each node, and map virtual * regs in the register classes back down to real hardware reg * numbers. */ - prog_data->total_grf = first_assigned_grf; - for (int i = 0; i < virtual_grf_count; i++) { + prog_data->total_grf = payload_reg_count; + for (unsigned i = 0; i < alloc.count; i++) { int reg = ra_get_node_reg(g, i); - hw_reg_mapping[i] = first_assigned_grf + brw->vs.ra_reg_to_grf[reg]; + hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg]; prog_data->total_grf = MAX2(prog_data->total_grf, - hw_reg_mapping[i] + virtual_grf_sizes[i]); + hw_reg_mapping[i] + alloc.sizes[i]); } - foreach_list(node, &this->instructions) { - vec4_instruction *inst = (vec4_instruction *)node; - + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { assign(hw_reg_mapping, &inst->dst); assign(hw_reg_mapping, &inst->src[0]); assign(hw_reg_mapping, &inst->src[1]); @@ -231,6 +263,215 @@ vec4_visitor::reg_allocate() } ralloc_free(g); + + return true; +} + +/** + * When we decide to spill a register, instead of blindly spilling every use, + * save unspills when the spill register is used (read) in consecutive + * instructions. This can potentially save a bunch of unspills that would + * have very little impact in register allocation anyway. + * + * Notice that we need to account for this behavior when spilling a register + * and when evaluating spilling costs. This function is designed so it can + * be called from both places and avoid repeating the logic. + * + * - When we call this function from spill_reg(), we pass in scratch_reg the + * actual unspill/spill register that we want to reuse in the current + * instruction. + * + * - When we call this from evaluate_spill_costs(), we pass the register for + * which we are evaluating spilling costs. + * + * In either case, we check if the previous instructions read scratch_reg until + * we find one that writes to it with a compatible mask or does not read/write + * scratch_reg at all. + */ +static bool +can_use_scratch_for_source(const vec4_instruction *inst, unsigned i, + unsigned scratch_reg) +{ + assert(inst->src[i].file == GRF); + bool prev_inst_read_scratch_reg = false; + + /* See if any previous source in the same instructions reads scratch_reg */ + for (unsigned n = 0; n < i; n++) { + if (inst->src[n].file == GRF && inst->src[n].reg == scratch_reg) + prev_inst_read_scratch_reg = true; + } + + /* Now check if previous instructions read/write scratch_reg */ + for (vec4_instruction *prev_inst = (vec4_instruction *) inst->prev; + !prev_inst->is_head_sentinel(); + prev_inst = (vec4_instruction *) prev_inst->prev) { + + /* If the previous instruction writes to scratch_reg then we can reuse + * it if the write is not conditional and the channels we write are + * compatible with our read mask + */ + if (prev_inst->dst.file == GRF && prev_inst->dst.reg == scratch_reg) { + return (!prev_inst->predicate || prev_inst->opcode == BRW_OPCODE_SEL) && + (brw_mask_for_swizzle(inst->src[i].swizzle) & + ~prev_inst->dst.writemask) == 0; + } + + /* Skip scratch read/writes so that instructions generated by spilling + * other registers (that won't read/write scratch_reg) do not stop us from + * reusing scratch_reg for this instruction. + */ + if (prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_WRITE || + prev_inst->opcode == SHADER_OPCODE_GEN4_SCRATCH_READ) + continue; + + /* If the previous instruction does not write to scratch_reg, then check + * if it reads it + */ + int n; + for (n = 0; n < 3; n++) { + if (prev_inst->src[n].file == GRF && + prev_inst->src[n].reg == scratch_reg) { + prev_inst_read_scratch_reg = true; + break; + } + } + if (n == 3) { + /* The previous instruction does not read scratch_reg. At this point, + * if no previous instruction has read scratch_reg it means that we + * will need to unspill it here and we can't reuse it (so we return + * false). Otherwise, if we found at least one consecutive instruction + * that read scratch_reg, then we know that we got here from + * evaluate_spill_costs (since for the spill_reg path any block of + * consecutive instructions using scratch_reg must start with a write + * to that register, so we would've exited the loop in the check for + * the write that we have at the start of this loop), and in that case + * it means that we found the point at which the scratch_reg would be + * unspilled. Since we always unspill a full vec4, it means that we + * have all the channels available and we can just return true to + * signal that we can reuse the register in the current instruction + * too. + */ + return prev_inst_read_scratch_reg; + } + } + + return prev_inst_read_scratch_reg; +} + +void +vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill) +{ + float loop_scale = 1.0; + + for (unsigned i = 0; i < this->alloc.count; i++) { + spill_costs[i] = 0.0; + no_spill[i] = alloc.sizes[i] != 1; + } + + /* Calculate costs for spilling nodes. Call it a cost of 1 per + * spill/unspill we'll have to do, and guess that the insides of + * loops run 10 times. + */ + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { + for (unsigned int i = 0; i < 3; i++) { + if (inst->src[i].file == GRF) { + /* We will only unspill src[i] it it wasn't unspilled for the + * previous instruction, in which case we'll just reuse the scratch + * reg for this instruction. + */ + if (!can_use_scratch_for_source(inst, i, inst->src[i].reg)) { + spill_costs[inst->src[i].reg] += loop_scale; + if (inst->src[i].reladdr) + no_spill[inst->src[i].reg] = true; + } + } + } + + if (inst->dst.file == GRF) { + spill_costs[inst->dst.reg] += loop_scale; + if (inst->dst.reladdr) + no_spill[inst->dst.reg] = true; + } + + switch (inst->opcode) { + + case BRW_OPCODE_DO: + loop_scale *= 10; + break; + + case BRW_OPCODE_WHILE: + loop_scale /= 10; + break; + + case SHADER_OPCODE_GEN4_SCRATCH_READ: + case SHADER_OPCODE_GEN4_SCRATCH_WRITE: + for (int i = 0; i < 3; i++) { + if (inst->src[i].file == GRF) + no_spill[inst->src[i].reg] = true; + } + if (inst->dst.file == GRF) + no_spill[inst->dst.reg] = true; + break; + + default: + break; + } + } +} + +int +vec4_visitor::choose_spill_reg(struct ra_graph *g) +{ + float spill_costs[this->alloc.count]; + bool no_spill[this->alloc.count]; + + evaluate_spill_costs(spill_costs, no_spill); + + for (unsigned i = 0; i < this->alloc.count; i++) { + if (!no_spill[i]) + ra_set_node_spill_cost(g, i, spill_costs[i]); + } + + return ra_get_best_spill_node(g); +} + +void +vec4_visitor::spill_reg(int spill_reg_nr) +{ + assert(alloc.sizes[spill_reg_nr] == 1); + unsigned int spill_offset = last_scratch++; + + /* Generate spill/unspill instructions for the objects being spilled. */ + int scratch_reg = -1; + foreach_block_and_inst(block, vec4_instruction, inst, cfg) { + for (unsigned int i = 0; i < 3; i++) { + if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg_nr) { + if (scratch_reg == -1 || + !can_use_scratch_for_source(inst, i, scratch_reg)) { + /* We need to unspill anyway so make sure we read the full vec4 + * in any case. This way, the cached register can be reused + * for consecutive instructions that read different channels of + * the same vec4. + */ + scratch_reg = alloc.allocate(1); + src_reg temp = inst->src[i]; + temp.reg = scratch_reg; + temp.swizzle = BRW_SWIZZLE_XYZW; + emit_scratch_read(block, inst, + dst_reg(temp), inst->src[i], spill_offset); + } + assert(scratch_reg != -1); + inst->src[i].reg = scratch_reg; + } + } + + if (inst->dst.file == GRF && inst->dst.reg == spill_reg_nr) { + emit_scratch_write(block, inst, spill_offset); + scratch_reg = inst->dst.reg; + } + } + + invalidate_live_intervals(); } } /* namespace brw */