X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_fs_reg_allocate.cpp;h=eba2fdd0816fdd8d05c1376701a9d38e8a037097;hb=65dd4a255a16a0b5cf843ff1d4657fe346caf116;hp=f0f4ad9a9280fcbcd4ecee0b2c4831bca58b9d99;hpb=ee21c8b1e6d3a506fa04d8f86e99b2afe9fca841;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp b/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp index f0f4ad9a928..eba2fdd0816 100644 --- a/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp +++ b/src/mesa/drivers/dri/i965/brw_fs_reg_allocate.cpp @@ -26,15 +26,16 @@ */ #include "brw_fs.h" +#include "brw_cfg.h" #include "glsl/glsl_types.h" #include "glsl/ir_optimization.h" static void -assign_reg(int *reg_hw_locations, fs_reg *reg, int reg_width) +assign_reg(int *reg_hw_locations, fs_reg *reg) { if (reg->file == GRF) { assert(reg->reg_offset >= 0); - reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset * reg_width; + reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset; reg->reg_offset = 0; } } @@ -50,35 +51,36 @@ fs_visitor::assign_regs_trivial() hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width); for (i = 1; i <= this->virtual_grf_count; i++) { hw_reg_mapping[i] = (hw_reg_mapping[i - 1] + - this->virtual_grf_sizes[i - 1] * reg_width); + this->virtual_grf_sizes[i - 1]); } this->grf_used = hw_reg_mapping[this->virtual_grf_count]; - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; - - assign_reg(hw_reg_mapping, &inst->dst, reg_width); - assign_reg(hw_reg_mapping, &inst->src[0], reg_width); - assign_reg(hw_reg_mapping, &inst->src[1], reg_width); - assign_reg(hw_reg_mapping, &inst->src[2], reg_width); + foreach_block_and_inst(block, fs_inst, inst, cfg) { + assign_reg(hw_reg_mapping, &inst->dst); + for (i = 0; i < inst->sources; i++) { + assign_reg(hw_reg_mapping, &inst->src[i]); + } } if (this->grf_used >= max_grf) { fail("Ran out of regs on trivial allocator (%d/%d)\n", this->grf_used, max_grf); + } else { + this->virtual_grf_count = this->grf_used; } } static void -brw_alloc_reg_set(struct brw_context *brw, int reg_width) +brw_alloc_reg_set(struct intel_screen *screen, int reg_width) { - int base_reg_count = BRW_MAX_GRF / reg_width; + const struct brw_device_info *devinfo = screen->devinfo; + int base_reg_count = BRW_MAX_GRF; int index = reg_width - 1; /* The registers used to make up almost all values handled in the compiler * are a scalar value occupying a single register (or 2 registers in the - * case of 16-wide, which is handled by dividing base_reg_count by 2 and + * case of SIMD16, which is handled by dividing base_reg_count by 2 and * multiplying allocated register numbers by 2). Things that were * aggregates of scalar values at the GLSL level were split to scalar * values by split_virtual_grfs(). @@ -100,10 +102,10 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) * regs). */ int class_count; - int class_sizes[BRW_MAX_MRF]; + int class_sizes[MAX_VGRF_SIZE]; - if (brw->gen >= 7) { - for (class_count = 0; class_count < 11; class_count++) + if (devinfo->gen >= 7) { + for (class_count = 0; class_count < MAX_VGRF_SIZE; class_count++) class_sizes[class_count] = class_count + 1; } else { for (class_count = 0; class_count < 4; class_count++) @@ -111,19 +113,52 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) class_sizes[class_count++] = 8; } + memset(screen->wm_reg_sets[index].class_to_ra_reg_range, 0, + sizeof(screen->wm_reg_sets[index].class_to_ra_reg_range)); + int *class_to_ra_reg_range = screen->wm_reg_sets[index].class_to_ra_reg_range; + /* Compute the total number of registers across all classes. */ int ra_reg_count = 0; for (int i = 0; i < class_count; i++) { - ra_reg_count += base_reg_count - (class_sizes[i] - 1); + if (devinfo->gen <= 5 && reg_width == 2) { + /* From the G45 PRM: + * + * In order to reduce the hardware complexity, the following + * rules and restrictions apply to the compressed instruction: + * ... + * * Operand Alignment Rule: With the exceptions listed below, a + * source/destination operand in general should be aligned to + * even 256-bit physical register with a region size equal to + * two 256-bit physical register + */ + ra_reg_count += (base_reg_count - (class_sizes[i] - 1)) / 2; + } else { + ra_reg_count += base_reg_count - (class_sizes[i] - 1); + } + /* Mark the last register. We'll fill in the beginnings later. */ + class_to_ra_reg_range[class_sizes[i]] = ra_reg_count; } - uint8_t *ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count); - struct ra_regs *regs = ra_alloc_reg_set(brw, ra_reg_count); - if (brw->gen >= 6) + /* Fill out the rest of the range markers */ + for (int i = 1; i < 17; ++i) { + if (class_to_ra_reg_range[i] == 0) + class_to_ra_reg_range[i] = class_to_ra_reg_range[i-1]; + } + + uint8_t *ra_reg_to_grf = ralloc_array(screen, uint8_t, ra_reg_count); + struct ra_regs *regs = ra_alloc_reg_set(screen, ra_reg_count); + if (devinfo->gen >= 6) ra_set_allocate_round_robin(regs); - int *classes = ralloc_array(brw, int, class_count); + int *classes = ralloc_array(screen, int, class_count); int aligned_pairs_class = -1; + /* Allocate space for q values. We allocate class_count + 1 because we + * want to leave room for the aligned pairs class if we have it. */ + unsigned int **q_values = ralloc_array(screen, unsigned int *, + class_count + 1); + for (int i = 0; i < class_count + 1; ++i) + q_values[i] = ralloc_array(q_values, unsigned int, class_count + 1); + /* Now, add the registers to their classes, and add the conflicts * between them and the base GRF registers (and also each other). */ @@ -131,27 +166,81 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) int pairs_base_reg = 0; int pairs_reg_count = 0; for (int i = 0; i < class_count; i++) { - int class_reg_count = base_reg_count - (class_sizes[i] - 1); + int class_reg_count; + if (devinfo->gen <= 5 && reg_width == 2) { + class_reg_count = (base_reg_count - (class_sizes[i] - 1)) / 2; + + /* See comment below. The only difference here is that we are + * dealing with pairs of registers instead of single registers. + * Registers of odd sizes simply get rounded up. */ + for (int j = 0; j < class_count; j++) + q_values[i][j] = (class_sizes[i] + 1) / 2 + + (class_sizes[j] + 1) / 2 - 1; + } else { + class_reg_count = base_reg_count - (class_sizes[i] - 1); + + /* From register_allocate.c: + * + * q(B,C) (indexed by C, B is this register class) in + * Runeson/Nyström paper. This is "how many registers of B could + * the worst choice register from C conflict with". + * + * If we just let the register allocation algorithm compute these + * values, is extremely expensive. However, since all of our + * registers are laid out, we can very easily compute them + * ourselves. View the register from C as fixed starting at GRF n + * somwhere in the middle, and the register from B as sliding back + * and forth. Then the first register to conflict from B is the + * one starting at n - class_size[B] + 1 and the last register to + * conflict will start at n + class_size[B] - 1. Therefore, the + * number of conflicts from B is class_size[B] + class_size[C] - 1. + * + * +-+-+-+-+-+-+ +-+-+-+-+-+-+ + * B | | | | | |n| --> | | | | | | | + * +-+-+-+-+-+-+ +-+-+-+-+-+-+ + * +-+-+-+-+-+ + * C |n| | | | | + * +-+-+-+-+-+ + */ + for (int j = 0; j < class_count; j++) + q_values[i][j] = class_sizes[i] + class_sizes[j] - 1; + } classes[i] = ra_alloc_reg_class(regs); /* Save this off for the aligned pair class at the end. */ if (class_sizes[i] == 2) { - pairs_base_reg = reg; - pairs_reg_count = class_reg_count; + pairs_base_reg = reg; + pairs_reg_count = class_reg_count; } - for (int j = 0; j < class_reg_count; j++) { - ra_class_add_reg(regs, classes[i], reg); + if (devinfo->gen <= 5 && reg_width == 2) { + for (int j = 0; j < class_reg_count; j++) { + ra_class_add_reg(regs, classes[i], reg); - ra_reg_to_grf[reg] = j; + ra_reg_to_grf[reg] = j * 2; - for (int base_reg = j; - base_reg < j + class_sizes[i]; - base_reg++) { - ra_add_transitive_reg_conflict(regs, base_reg, reg); - } + for (int base_reg = j; + base_reg < j + (class_sizes[i] + 1) / 2; + base_reg++) { + ra_add_transitive_reg_conflict(regs, base_reg, reg); + } + + reg++; + } + } else { + for (int j = 0; j < class_reg_count; j++) { + ra_class_add_reg(regs, classes[i], reg); + + ra_reg_to_grf[reg] = j; + + for (int base_reg = j; + base_reg < j + class_sizes[i]; + base_reg++) { + ra_add_transitive_reg_conflict(regs, base_reg, reg); + } - reg++; + reg++; + } } } assert(reg == ra_reg_count); @@ -159,7 +248,7 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) /* Add a special class for aligned pairs, which we'll put delta_x/y * in on gen5 so that we can do PLN. */ - if (brw->has_pln && reg_width == 1 && brw->gen < 6) { + if (devinfo->has_pln && reg_width == 1 && devinfo->gen < 6) { aligned_pairs_class = ra_alloc_reg_class(regs); for (int i = 0; i < pairs_reg_count; i++) { @@ -167,47 +256,62 @@ brw_alloc_reg_set(struct brw_context *brw, int reg_width) ra_class_add_reg(regs, aligned_pairs_class, pairs_base_reg + i); } } + + for (int i = 0; i < class_count; i++) { + /* These are a little counter-intuitive because the pair registers + * are required to be aligned while the register they are + * potentially interferring with are not. In the case where the + * size is even, the worst-case is that the register is + * odd-aligned. In the odd-size case, it doesn't matter. + */ + q_values[class_count][i] = class_sizes[i] / 2 + 1; + q_values[i][class_count] = class_sizes[i] + 1; + } + q_values[class_count][class_count] = 1; } - ra_set_finalize(regs, NULL); + ra_set_finalize(regs, q_values); - brw->wm.reg_sets[index].regs = regs; - for (unsigned i = 0; i < ARRAY_SIZE(brw->wm.reg_sets[index].classes); i++) - brw->wm.reg_sets[index].classes[i] = -1; + ralloc_free(q_values); + + screen->wm_reg_sets[index].regs = regs; + for (unsigned i = 0; i < ARRAY_SIZE(screen->wm_reg_sets[index].classes); i++) + screen->wm_reg_sets[index].classes[i] = -1; for (int i = 0; i < class_count; i++) - brw->wm.reg_sets[index].classes[class_sizes[i] - 1] = classes[i]; - brw->wm.reg_sets[index].ra_reg_to_grf = ra_reg_to_grf; - brw->wm.reg_sets[index].aligned_pairs_class = aligned_pairs_class; + screen->wm_reg_sets[index].classes[class_sizes[i] - 1] = classes[i]; + screen->wm_reg_sets[index].ra_reg_to_grf = ra_reg_to_grf; + screen->wm_reg_sets[index].aligned_pairs_class = aligned_pairs_class; } void -brw_fs_alloc_reg_sets(struct brw_context *brw) +brw_fs_alloc_reg_sets(struct intel_screen *screen) { - brw_alloc_reg_set(brw, 1); - brw_alloc_reg_set(brw, 2); + brw_alloc_reg_set(screen, 1); + brw_alloc_reg_set(screen, 2); } -int -count_to_loop_end(fs_inst *do_inst) +static int +count_to_loop_end(const bblock_t *block) { + if (block->end()->opcode == BRW_OPCODE_WHILE) + return block->end_ip; + int depth = 1; - int ip = 1; - for (fs_inst *inst = (fs_inst *)do_inst->next; + /* Skip the first block, since we don't want to count the do the calling + * function found. + */ + for (block = block->next(); depth > 0; - inst = (fs_inst *)inst->next) { - switch (inst->opcode) { - case BRW_OPCODE_DO: + block = block->next()) { + if (block->start()->opcode == BRW_OPCODE_DO) depth++; - break; - case BRW_OPCODE_WHILE: + if (block->end()->opcode == BRW_OPCODE_WHILE) { depth--; - break; - default: - break; + if (depth == 0) + return block->end_ip; } - ip++; } - return ip; + unreachable("not reached"); } /** @@ -220,28 +324,25 @@ count_to_loop_end(fs_inst *do_inst) * * The layout of the payload registers is: * - * 0..nr_payload_regs-1: fixed function setup (including bary coordinates). - * nr_payload_regs..nr_payload_regs+curb_read_lengh-1: uniform data - * nr_payload_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients. + * 0..payload.num_regs-1: fixed function setup (including bary coordinates). + * payload.num_regs..payload.num_regs+curb_read_lengh-1: uniform data + * payload.num_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients. * * And we have payload_node_count nodes covering these registers in order - * (note that in 16-wide, a node is two registers). + * (note that in SIMD16, a node is two registers). */ void fs_visitor::setup_payload_interference(struct ra_graph *g, int payload_node_count, int first_payload_node) { - int reg_width = dispatch_width / 8; int loop_depth = 0; int loop_end_ip = 0; int payload_last_use_ip[payload_node_count]; memset(payload_last_use_ip, 0, sizeof(payload_last_use_ip)); int ip = 0; - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; - + foreach_block_and_inst(block, fs_inst, inst, cfg) { switch (inst->opcode) { case BRW_OPCODE_DO: loop_depth++; @@ -252,7 +353,7 @@ fs_visitor::setup_payload_interference(struct ra_graph *g, * the end now. */ if (loop_depth == 1) - loop_end_ip = ip + count_to_loop_end(inst); + loop_end_ip = count_to_loop_end(block); break; case BRW_OPCODE_WHILE: loop_depth--; @@ -271,10 +372,10 @@ fs_visitor::setup_payload_interference(struct ra_graph *g, * assign_curbe_setup(), and interpolation uses fixed hardware regs from * the start (see interp_reg()). */ - for (int i = 0; i < 3; i++) { + for (int i = 0; i < inst->sources; i++) { if (inst->src[i].file == HW_REG && inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { - int node_nr = inst->src[i].fixed_hw_reg.nr / reg_width; + int node_nr = inst->src[i].fixed_hw_reg.nr; if (node_nr >= payload_node_count) continue; @@ -284,31 +385,33 @@ fs_visitor::setup_payload_interference(struct ra_graph *g, /* Special case instructions which have extra implied registers used. */ switch (inst->opcode) { + case SHADER_OPCODE_URB_WRITE_SIMD8: case FS_OPCODE_FB_WRITE: /* We could omit this for the !inst->header_present case, except that * the simulator apparently incorrectly reads from g0/g1 instead of * sideband. It also really freaks out driver developers to see g0 * used in unusual places, so just always reserve it. */ - payload_last_use_ip[0 / reg_width] = use_ip; - payload_last_use_ip[1 / reg_width] = use_ip; + payload_last_use_ip[0] = use_ip; + payload_last_use_ip[1] = use_ip; break; case FS_OPCODE_LINTERP: - /* On gen6+ in 16-wide, there are 4 adjacent registers (so 2 nodes) - * used by PLN's sourcing of the deltas, while we list only the first - * two in the arguments (1 node). Pre-gen6, the deltas are computed - * in normal VGRFs. + /* On gen6+ in SIMD16, there are 4 adjacent registers used by + * PLN's sourcing of the deltas, while we list only the first one + * in the arguments. Pre-gen6, the deltas are computed in normal + * VGRFs. */ if (brw->gen >= 6) { int delta_x_arg = 0; if (inst->src[delta_x_arg].file == HW_REG && inst->src[delta_x_arg].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) { - int sechalf_node = (inst->src[delta_x_arg].fixed_hw_reg.nr / - reg_width) + 1; - assert(sechalf_node < payload_node_count); - payload_last_use_ip[sechalf_node] = use_ip; + for (int i = 1; i < 4; ++i) { + int node = inst->src[delta_x_arg].fixed_hw_reg.nr + i; + assert(node < payload_node_count); + payload_last_use_ip[node] = use_ip; + } } } break; @@ -342,26 +445,35 @@ fs_visitor::setup_payload_interference(struct ra_graph *g, * The alternative would be to have per-physical-register classes, which * would just be silly. */ - ra_set_node_reg(g, first_payload_node + i, i); + if (brw->intelScreen->devinfo->gen <= 5 && dispatch_width == 16) { + /* We have to divide by 2 here because we only have even numbered + * registers. Some of the payload registers will be odd, but + * that's ok because their physical register numbers have already + * been assigned. The only thing this is used for is interference. + */ + ra_set_node_reg(g, first_payload_node + i, i / 2); + } else { + ra_set_node_reg(g, first_payload_node + i, i); + } } } /** - * Sets interference between virtual GRFs and usage of the high GRFs for SEND - * messages (treated as MRFs in code generation). + * Sets the mrf_used array to indicate which MRFs are used by the shader IR + * + * This is used in assign_regs() to decide which of the GRFs that we use as + * MRFs on gen7 get normally register allocated, and in register spilling to + * see if we can actually use MRFs to do spills without overwriting normal MRF + * contents. */ void -fs_visitor::setup_mrf_hack_interference(struct ra_graph *g, int first_mrf_node) +fs_visitor::get_used_mrfs(bool *mrf_used) { - int mrf_count = BRW_MAX_GRF - GEN7_MRF_HACK_START; int reg_width = dispatch_width / 8; - /* Identify all the MRFs used in the program. */ - bool mrf_used[mrf_count]; - memset(mrf_used, 0, sizeof(mrf_used)); - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; + memset(mrf_used, 0, BRW_MAX_MRF * sizeof(bool)); + foreach_block_and_inst(block, fs_inst, inst, cfg) { if (inst->dst.file == MRF) { int reg = inst->dst.reg & ~BRW_MRF_COMPR4; mrf_used[reg] = true; @@ -380,15 +492,25 @@ fs_visitor::setup_mrf_hack_interference(struct ra_graph *g, int first_mrf_node) } } } +} + +/** + * Sets interference between virtual GRFs and usage of the high GRFs for SEND + * messages (treated as MRFs in code generation). + */ +void +fs_visitor::setup_mrf_hack_interference(struct ra_graph *g, int first_mrf_node) +{ + bool mrf_used[BRW_MAX_MRF]; + get_used_mrfs(mrf_used); - for (int i = 0; i < mrf_count; i++) { - /* Mark each payload reg node as being allocated to its physical register. + for (int i = 0; i < BRW_MAX_MRF; i++) { + /* Mark each MRF reg node as being allocated to its physical register. * * The alternative would be to have per-physical-register classes, which * would just be silly. */ - ra_set_node_reg(g, first_mrf_node + i, - (GEN7_MRF_HACK_START + i) / reg_width); + ra_set_node_reg(g, first_mrf_node + i, GEN7_MRF_HACK_START + i); /* Since we don't have any live/dead analysis on the MRFs, just mark all * that are used as conflicting with all virtual GRFs. @@ -401,20 +523,33 @@ fs_visitor::setup_mrf_hack_interference(struct ra_graph *g, int first_mrf_node) } } +static bool +is_last_send(fs_inst *inst) +{ + switch (inst->opcode) { + case SHADER_OPCODE_URB_WRITE_SIMD8: + case FS_OPCODE_FB_WRITE: + return inst->eot; + default: + assert(!inst->eot); + return false; + } +} + bool -fs_visitor::assign_regs() +fs_visitor::assign_regs(bool allow_spilling) { + struct intel_screen *screen = brw->intelScreen; /* Most of this allocation was written for a reg_width of 1 - * (dispatch_width == 8). In extending to 16-wide, the code was + * (dispatch_width == 8). In extending to SIMD16, the code was * left in place and it was converted to have the hardware * registers it's allocating be contiguous physical pairs of regs * for reg_width == 2. */ int reg_width = dispatch_width / 8; int hw_reg_mapping[this->virtual_grf_count]; - int payload_node_count = (ALIGN(this->first_non_payload_grf, reg_width) / - reg_width); - int rsi = reg_width - 1; /* Which brw->wm.reg_sets[] to use */ + int payload_node_count = ALIGN(this->first_non_payload_grf, reg_width); + int rsi = reg_width - 1; /* Which screen->wm_reg_sets[] to use */ calculate_live_intervals(); int node_count = this->virtual_grf_count; @@ -423,16 +558,16 @@ fs_visitor::assign_regs() int first_mrf_hack_node = node_count; if (brw->gen >= 7) node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START; - struct ra_graph *g = ra_alloc_interference_graph(brw->wm.reg_sets[rsi].regs, - node_count); + struct ra_graph *g = + ra_alloc_interference_graph(screen->wm_reg_sets[rsi].regs, node_count); for (int i = 0; i < this->virtual_grf_count; i++) { unsigned size = this->virtual_grf_sizes[i]; int c; - assert(size <= ARRAY_SIZE(brw->wm.reg_sets[rsi].classes) && + assert(size <= ARRAY_SIZE(screen->wm_reg_sets[rsi].classes) && "Register allocation relies on split_virtual_grfs()"); - c = brw->wm.reg_sets[rsi].classes[size - 1]; + c = screen->wm_reg_sets[rsi].classes[size - 1]; /* Special case: on pre-GEN6 hardware that supports PLN, the * second operand of a PLN instruction needs to be an @@ -443,9 +578,10 @@ fs_visitor::assign_regs() * any other interpolation modes). So all we need to do is find * that register and set it to the appropriate class. */ - if (brw->wm.reg_sets[rsi].aligned_pairs_class >= 0 && + if (screen->wm_reg_sets[rsi].aligned_pairs_class >= 0 && + this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].file == GRF && this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg == i) { - c = brw->wm.reg_sets[rsi].aligned_pairs_class; + c = screen->wm_reg_sets[rsi].aligned_pairs_class; } ra_set_node_class(g, i, c); @@ -458,10 +594,65 @@ fs_visitor::assign_regs() } setup_payload_interference(g, payload_node_count, first_payload_node); - if (brw->gen >= 7) + if (brw->gen >= 7) { setup_mrf_hack_interference(g, first_mrf_hack_node); - if (!ra_allocate_no_spills(g)) { + foreach_block_and_inst(block, fs_inst, inst, cfg) { + /* When we do send-from-GRF for FB writes, we need to ensure that + * the last write instruction sends from a high register. This is + * because the vertex fetcher wants to start filling the low + * payload registers while the pixel data port is still working on + * writing out the memory. If we don't do this, we get rendering + * artifacts. + * + * We could just do "something high". Instead, we just pick the + * highest register that works. + */ + if (is_last_send(inst)) { + int size = virtual_grf_sizes[inst->src[0].reg]; + int reg = screen->wm_reg_sets[rsi].class_to_ra_reg_range[size] - 1; + ra_set_node_reg(g, inst->src[0].reg, reg); + break; + } + } + } + + if (dispatch_width > 8) { + /* In 16-wide dispatch we have an issue where a compressed + * instruction is actually two instructions executed simultaneiously. + * It's actually ok to have the source and destination registers be + * the same. In this case, each instruction over-writes its own + * source and there's no problem. The real problem here is if the + * source and destination registers are off by one. Then you can end + * up in a scenario where the first instruction over-writes the + * source of the second instruction. Since the compiler doesn't know + * about this level of granularity, we simply make the source and + * destination interfere. + */ + foreach_block_and_inst(block, fs_inst, inst, cfg) { + if (inst->dst.file != GRF) + continue; + + for (int i = 0; i < inst->sources; ++i) { + if (inst->src[i].file == GRF) { + ra_add_node_interference(g, inst->dst.reg, inst->src[i].reg); + } + } + } + } + + /* Debug of register spilling: Go spill everything. */ + if (0) { + int reg = choose_spill_reg(g); + + if (reg != -1) { + spill_reg(reg); + ralloc_free(g); + return false; + } + } + + if (!ra_allocate(g)) { /* Failed to allocate registers. Spill a reg, and the caller will * loop back into here to try again. */ @@ -469,15 +660,11 @@ fs_visitor::assign_regs() if (reg == -1) { fail("no register to spill:\n"); - dump_instructions(); - } else if (dispatch_width == 16) { - fail("Failure to register allocate. Reduce number of live scalar " - "values to avoid this."); - } else { - spill_reg(reg); + dump_instructions(NULL); + } else if (allow_spilling) { + spill_reg(reg); } - ralloc_free(g); return false; @@ -487,44 +674,87 @@ fs_visitor::assign_regs() * regs in the register classes back down to real hardware reg * numbers. */ - this->grf_used = payload_node_count * reg_width; + this->grf_used = payload_node_count; for (int i = 0; i < this->virtual_grf_count; i++) { int reg = ra_get_node_reg(g, i); - hw_reg_mapping[i] = brw->wm.reg_sets[rsi].ra_reg_to_grf[reg] * reg_width; + hw_reg_mapping[i] = screen->wm_reg_sets[rsi].ra_reg_to_grf[reg]; this->grf_used = MAX2(this->grf_used, - hw_reg_mapping[i] + this->virtual_grf_sizes[i] * - reg_width); + hw_reg_mapping[i] + this->virtual_grf_sizes[i]); } - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; - - assign_reg(hw_reg_mapping, &inst->dst, reg_width); - assign_reg(hw_reg_mapping, &inst->src[0], reg_width); - assign_reg(hw_reg_mapping, &inst->src[1], reg_width); - assign_reg(hw_reg_mapping, &inst->src[2], reg_width); + foreach_block_and_inst(block, fs_inst, inst, cfg) { + assign_reg(hw_reg_mapping, &inst->dst); + for (int i = 0; i < inst->sources; i++) { + assign_reg(hw_reg_mapping, &inst->src[i]); + } } + this->virtual_grf_count = this->grf_used; + ralloc_free(g); return true; } void -fs_visitor::emit_unspill(fs_inst *inst, fs_reg dst, uint32_t spill_offset) +fs_visitor::emit_unspill(bblock_t *block, fs_inst *inst, fs_reg dst, + uint32_t spill_offset, int count) { - fs_inst *unspill_inst = new(mem_ctx) fs_inst(FS_OPCODE_UNSPILL, dst); - unspill_inst->offset = spill_offset; - unspill_inst->ir = inst->ir; - unspill_inst->annotation = inst->annotation; + int reg_size = 1; + if (dispatch_width == 16 && count % 2 == 0) { + reg_size = 2; + dst.width = 16; + } - /* Choose a MRF that won't conflict with an MRF that's live across the - * spill. Nothing else will make it up to MRF 14/15. - */ - unspill_inst->base_mrf = 14; - unspill_inst->mlen = 1; /* header contains offset */ - inst->insert_before(unspill_inst); + for (int i = 0; i < count / reg_size; i++) { + /* The gen7 descriptor-based offset is 12 bits of HWORD units. */ + bool gen7_read = brw->gen >= 7 && spill_offset < (1 << 12) * REG_SIZE; + + fs_inst *unspill_inst = + new(mem_ctx) fs_inst(gen7_read ? + SHADER_OPCODE_GEN7_SCRATCH_READ : + SHADER_OPCODE_GEN4_SCRATCH_READ, + dst); + unspill_inst->offset = spill_offset; + unspill_inst->ir = inst->ir; + unspill_inst->annotation = inst->annotation; + unspill_inst->regs_written = reg_size; + + if (!gen7_read) { + unspill_inst->base_mrf = 14; + unspill_inst->mlen = 1; /* header contains offset */ + } + inst->insert_before(block, unspill_inst); + + dst.reg_offset += reg_size; + spill_offset += reg_size * REG_SIZE; + } +} + +void +fs_visitor::emit_spill(bblock_t *block, fs_inst *inst, fs_reg src, + uint32_t spill_offset, int count) +{ + int reg_size = 1; + int spill_base_mrf = 14; + if (dispatch_width == 16 && count % 2 == 0) { + spill_base_mrf = 13; + reg_size = 2; + } + + for (int i = 0; i < count / reg_size; i++) { + fs_inst *spill_inst = + new(mem_ctx) fs_inst(SHADER_OPCODE_GEN4_SCRATCH_WRITE, + reg_size * 8, reg_null_f, src); + src.reg_offset += reg_size; + spill_inst->offset = spill_offset + i * reg_size * REG_SIZE; + spill_inst->ir = inst->ir; + spill_inst->annotation = inst->annotation; + spill_inst->mlen = 1 + reg_size; /* header, value */ + spill_inst->base_mrf = spill_base_mrf; + inst->insert_after(block, spill_inst); + } } int @@ -543,10 +773,8 @@ fs_visitor::choose_spill_reg(struct ra_graph *g) * spill/unspill we'll have to do, and guess that the insides of * loops run 10 times. */ - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; - - for (unsigned int i = 0; i < 3; i++) { + foreach_block_and_inst(block, fs_inst, inst, cfg) { + for (unsigned int i = 0; i < inst->sources; i++) { if (inst->src[i].file == GRF) { spill_costs[inst->src[i].reg] += loop_scale; @@ -557,7 +785,7 @@ fs_visitor::choose_spill_reg(struct ra_graph *g) * loading pull constants, so spilling them is unlikely to reduce * register pressure anyhow. */ - if (inst->src[i].smear >= 0) { + if (!inst->src[i].is_contiguous()) { no_spill[inst->src[i].reg] = true; } } @@ -566,7 +794,7 @@ fs_visitor::choose_spill_reg(struct ra_graph *g) if (inst->dst.file == GRF) { spill_costs[inst->dst.reg] += inst->regs_written * loop_scale; - if (inst->dst.smear >= 0) { + if (!inst->dst.is_contiguous()) { no_spill[inst->dst.reg] = true; } } @@ -581,12 +809,13 @@ fs_visitor::choose_spill_reg(struct ra_graph *g) loop_scale /= 10; break; - case FS_OPCODE_SPILL: + case SHADER_OPCODE_GEN4_SCRATCH_WRITE: if (inst->src[0].file == GRF) no_spill[inst->src[0].reg] = true; break; - case FS_OPCODE_UNSPILL: + case SHADER_OPCODE_GEN4_SCRATCH_READ: + case SHADER_OPCODE_GEN7_SCRATCH_READ: if (inst->dst.file == GRF) no_spill[inst->dst.reg] = true; break; @@ -608,24 +837,52 @@ void fs_visitor::spill_reg(int spill_reg) { int size = virtual_grf_sizes[spill_reg]; - unsigned int spill_offset = c->last_scratch; + unsigned int spill_offset = last_scratch; assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */ - c->last_scratch += size * REG_SIZE; + int spill_base_mrf = dispatch_width > 8 ? 13 : 14; + + /* Spills may use MRFs 13-15 in the SIMD16 case. Our texturing is done + * using up to 11 MRFs starting from either m1 or m2, and fb writes can use + * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or + * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst + * depth), starting from m1. In summary: We may not be able to spill in + * SIMD16 mode, because we'd stomp the FB writes. + */ + if (!spilled_any_registers) { + bool mrf_used[BRW_MAX_MRF]; + get_used_mrfs(mrf_used); + + for (int i = spill_base_mrf; i < BRW_MAX_MRF; i++) { + if (mrf_used[i]) { + fail("Register spilling not supported with m%d used", i); + return; + } + } + + spilled_any_registers = true; + } + + last_scratch += size * REG_SIZE; /* Generate spill/unspill instructions for the objects being * spilled. Right now, we spill or unspill the whole thing to a * virtual grf of the same size. For most instructions, though, we * could just spill/unspill the GRF being accessed. */ - foreach_list(node, &this->instructions) { - fs_inst *inst = (fs_inst *)node; - - for (unsigned int i = 0; i < 3; i++) { + foreach_block_and_inst (block, fs_inst, inst, cfg) { + for (unsigned int i = 0; i < inst->sources; i++) { if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg) { - inst->src[i].reg = virtual_grf_alloc(1); - emit_unspill(inst, inst->src[i], - spill_offset + REG_SIZE * inst->src[i].reg_offset); + int regs_read = inst->regs_read(this, i); + int subset_spill_offset = (spill_offset + + REG_SIZE * inst->src[i].reg_offset); + fs_reg unspill_dst(GRF, virtual_grf_alloc(regs_read)); + + inst->src[i].reg = unspill_dst.reg; + inst->src[i].reg_offset = 0; + + emit_unspill(block, inst, unspill_dst, subset_spill_offset, + regs_read); } } @@ -633,39 +890,29 @@ fs_visitor::spill_reg(int spill_reg) inst->dst.reg == spill_reg) { int subset_spill_offset = (spill_offset + REG_SIZE * inst->dst.reg_offset); - inst->dst.reg = virtual_grf_alloc(inst->regs_written); + fs_reg spill_src(GRF, virtual_grf_alloc(inst->regs_written)); + + inst->dst.reg = spill_src.reg; inst->dst.reg_offset = 0; + /* If we're immediately spilling the register, we should not use + * destination dependency hints. Doing so will cause the GPU do + * try to read and write the register at the same time and may + * hang the GPU. + */ + inst->no_dd_clear = false; + inst->no_dd_check = false; + /* If our write is going to affect just part of the * inst->regs_written(), then we need to unspill the destination * since we write back out all of the regs_written(). */ - if (inst->predicate || inst->force_uncompressed || inst->force_sechalf) { - fs_reg unspill_reg = inst->dst; - for (int chan = 0; chan < inst->regs_written; chan++) { - emit_unspill(inst, unspill_reg, - subset_spill_offset + REG_SIZE * chan); - unspill_reg.reg_offset++; - } - } + if (inst->is_partial_write()) + emit_unspill(block, inst, spill_src, subset_spill_offset, + inst->regs_written); - fs_reg spill_src = inst->dst; - spill_src.reg_offset = 0; - spill_src.abs = false; - spill_src.negate = false; - spill_src.smear = -1; - - for (int chan = 0; chan < inst->regs_written; chan++) { - fs_inst *spill_inst = new(mem_ctx) fs_inst(FS_OPCODE_SPILL, - reg_null_f, spill_src); - spill_src.reg_offset++; - spill_inst->offset = subset_spill_offset + chan * REG_SIZE; - spill_inst->ir = inst->ir; - spill_inst->annotation = inst->annotation; - spill_inst->base_mrf = 14; - spill_inst->mlen = 2; /* header, value */ - inst->insert_after(spill_inst); - } + emit_spill(block, inst, spill_src, subset_spill_offset, + inst->regs_written); } }