#include "brw_fs.h"
#include "glsl/glsl_types.h"
#include "glsl/ir_optimization.h"
-#include "glsl/ir_print_visitor.h"
static void
assign_reg(int *reg_hw_locations, fs_reg *reg, int reg_width)
{
int hw_reg_mapping[this->virtual_grf_count + 1];
int i;
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
/* Note that compressed instructions require alignment to 2 registers. */
hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
static void
brw_alloc_reg_set(struct brw_context *brw, int reg_width)
{
- struct intel_context *intel = &brw->intel;
int base_reg_count = BRW_MAX_GRF / reg_width;
int index = reg_width - 1;
* less some day.
*
* Additionally, on gen5 we need aligned pairs of registers for the PLN
- * instruction.
+ * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
+ * texturing.
*
- * So we have a need for classes for 1, 2, and 4 registers currently, and
- * we add in '3' to make indexing the array easier (since we'll probably
- * want it for texturing later).
+ * So we have a need for classes for 1, 2, 4, and 8 registers currently,
+ * and we add in '3' to make indexing the array easier for the common case
+ * (since we'll probably want it for texturing later).
*/
- const int class_sizes[4] = {1, 2, 3, 4};
- const int class_count = 4;
+ const int class_count = 5;
+ const int class_sizes[class_count] = {1, 2, 3, 4, 8};
/* Compute the total number of registers across all classes. */
int ra_reg_count = 0;
uint8_t *ra_reg_to_grf = ralloc_array(brw, uint8_t, ra_reg_count);
struct ra_regs *regs = ra_alloc_reg_set(brw, ra_reg_count);
+ if (brw->gen >= 6)
+ ra_set_allocate_round_robin(regs);
int *classes = ralloc_array(brw, int, class_count);
int aligned_pairs_class = -1;
/* Add a special class for aligned pairs, which we'll put delta_x/y
* in on gen5 so that we can do PLN.
*/
- if (brw->has_pln && reg_width == 1 && intel->gen < 6) {
+ if (brw->has_pln && reg_width == 1 && brw->gen < 6) {
aligned_pairs_class = ra_alloc_reg_class(regs);
for (int i = 0; i < pairs_reg_count; i++) {
int payload_node_count,
int first_payload_node)
{
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
int loop_depth = 0;
int loop_end_ip = 0;
* the start (see interp_reg()).
*/
for (int i = 0; i < 3; i++) {
- if (inst->src[i].file == FIXED_HW_REG &&
+ if (inst->src[i].file == HW_REG &&
inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
int node_nr = inst->src[i].fixed_hw_reg.nr / reg_width;
if (node_nr >= payload_node_count)
payload_last_use_ip[0 / reg_width] = use_ip;
payload_last_use_ip[1 / reg_width] = use_ip;
break;
- case FS_OPCODE_DISCARD:
- payload_last_use_ip[1 / reg_width] = use_ip;
- break;
case FS_OPCODE_LINTERP:
/* On gen6+ in 16-wide, there are 4 adjacent registers (so 2 nodes)
* two in the arguments (1 node). Pre-gen6, the deltas are computed
* in normal VGRFs.
*/
- if (intel->gen >= 6) {
+ if (brw->gen >= 6) {
int delta_x_arg = 0;
- if (inst->src[delta_x_arg].file == FIXED_HW_REG &&
+ if (inst->src[delta_x_arg].file == HW_REG &&
inst->src[delta_x_arg].fixed_hw_reg.file ==
BRW_GENERAL_REGISTER_FILE) {
int sechalf_node = (inst->src[delta_x_arg].fixed_hw_reg.nr /
* node.
*/
for (int j = 0; j < this->virtual_grf_count; j++) {
- if (this->virtual_grf_def[j] <= payload_last_use_ip[i] ||
- this->virtual_grf_use[j] <= payload_last_use_ip[i]) {
+ /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
+ * in order to not have to worry about the uniform issue described in
+ * calculate_live_intervals().
+ */
+ if (this->virtual_grf_start[j] <= payload_last_use_ip[i]) {
ra_add_node_interference(g, first_payload_node + i, j);
}
}
fs_visitor::setup_mrf_hack_interference(struct ra_graph *g, int first_mrf_node)
{
int mrf_count = BRW_MAX_GRF - GEN7_MRF_HACK_START;
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
/* Identify all the MRFs used in the program. */
bool mrf_used[mrf_count];
* registers it's allocating be contiguous physical pairs of regs
* for reg_width == 2.
*/
- int reg_width = c->dispatch_width / 8;
+ int reg_width = dispatch_width / 8;
int hw_reg_mapping[this->virtual_grf_count];
int payload_node_count = (ALIGN(this->first_non_payload_grf, reg_width) /
reg_width);
int first_payload_node = node_count;
node_count += payload_node_count;
int first_mrf_hack_node = node_count;
- if (intel->gen >= 7)
+ if (brw->gen >= 7)
node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START;
struct ra_graph *g = ra_alloc_interference_graph(brw->wm.reg_sets[rsi].regs,
node_count);
for (int i = 0; i < this->virtual_grf_count; i++) {
- assert(this->virtual_grf_sizes[i] >= 1 &&
- this->virtual_grf_sizes[i] <= 4 &&
- "Register allocation relies on split_virtual_grfs()");
- int c = brw->wm.reg_sets[rsi].classes[this->virtual_grf_sizes[i] - 1];
+ int size = this->virtual_grf_sizes[i];
+ int c;
+
+ if (size == 8) {
+ c = 4;
+ } else {
+ assert(size >= 1 &&
+ size <= 4 &&
+ "Register allocation relies on split_virtual_grfs()");
+ c = brw->wm.reg_sets[rsi].classes[size - 1];
+ }
/* Special case: on pre-GEN6 hardware that supports PLN, the
* second operand of a PLN instruction needs to be an
}
setup_payload_interference(g, payload_node_count, first_payload_node);
- if (intel->gen >= 7)
+ if (brw->gen >= 7)
setup_mrf_hack_interference(g, first_mrf_hack_node);
if (!ra_allocate_no_spills(g)) {
int reg = choose_spill_reg(g);
if (reg == -1) {
- fail("no register to spill\n");
- } else if (c->dispatch_width == 16) {
+ fail("no register to spill:\n");
+ dump_instructions();
+ } else if (dispatch_width == 16) {
fail("Failure to register allocate. Reduce number of live scalar "
"values to avoid this.");
} else {
}
if (inst->dst.file == GRF) {
- spill_costs[inst->dst.reg] += inst->regs_written() * loop_scale;
+ spill_costs[inst->dst.reg] += inst->regs_written * loop_scale;
if (inst->dst.smear >= 0) {
no_spill[inst->dst.reg] = true;
inst->dst.reg == spill_reg) {
int subset_spill_offset = (spill_offset +
REG_SIZE * inst->dst.reg_offset);
- inst->dst.reg = virtual_grf_alloc(inst->regs_written());
+ inst->dst.reg = virtual_grf_alloc(inst->regs_written);
inst->dst.reg_offset = 0;
/* If our write is going to affect just part of the
*/
if (inst->predicate || inst->force_uncompressed || inst->force_sechalf) {
fs_reg unspill_reg = inst->dst;
- for (int chan = 0; chan < inst->regs_written(); chan++) {
+ for (int chan = 0; chan < inst->regs_written; chan++) {
emit_unspill(inst, unspill_reg,
subset_spill_offset + REG_SIZE * chan);
unspill_reg.reg_offset++;
spill_src.negate = false;
spill_src.smear = -1;
- for (int chan = 0; chan < inst->regs_written(); chan++) {
+ for (int chan = 0; chan < inst->regs_written; chan++) {
fs_inst *spill_inst = new(mem_ctx) fs_inst(FS_OPCODE_SPILL,
reg_null_f, spill_src);
spill_src.reg_offset++;