bool progress = false;
int depth = 0;
- int remap[alloc.count];
- memset(remap, -1, sizeof(int) * alloc.count);
+ unsigned remap[alloc.count];
+ memset(remap, ~0u, sizeof(unsigned) * alloc.count);
foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
/* Rewrite instruction sources. */
for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == VGRF &&
- remap[inst->src[i].nr] != -1 &&
+ remap[inst->src[i].nr] != ~0u &&
remap[inst->src[i].nr] != inst->src[i].nr) {
inst->src[i].nr = remap[inst->src[i].nr];
progress = true;
}
}
- const int dst = inst->dst.nr;
+ const unsigned dst = inst->dst.nr;
if (depth == 0 &&
inst->dst.file == VGRF &&
alloc.sizes[inst->dst.nr] * REG_SIZE == inst->size_written &&
!inst->is_partial_write()) {
- if (remap[dst] == -1) {
+ if (remap[dst] == ~0u) {
remap[dst] = dst;
} else {
remap[dst] = alloc.allocate(regs_written(inst));
progress = true;
}
} else if (inst->dst.file == VGRF &&
- remap[dst] != -1 &&
+ remap[dst] != ~0u &&
remap[dst] != dst) {
inst->dst.nr = remap[dst];
progress = true;
invalidate_live_intervals();
for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
- if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != -1) {
+ if (delta_xy[i].file == VGRF && remap[delta_xy[i].nr] != ~0u) {
delta_xy[i].nr = remap[delta_xy[i].nr];
}
}
fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
{
int write_len = regs_written(inst);
- int first_write_grf = inst->dst.nr;
+ unsigned first_write_grf = inst->dst.nr;
bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
assert(write_len < (int)sizeof(needs_dep) - 1);
bld.MOV(sources[length++], min_lod);
}
- int mlen;
+ unsigned mlen;
if (reg_width == 2)
mlen = length * reg_width - header_size;
else
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
int first_payload_node);
int choose_spill_reg(struct ra_graph *g);
- void spill_reg(int spill_reg);
+ void spill_reg(unsigned spill_reg);
void split_virtual_grfs();
bool compact_virtual_grfs();
void assign_constant_locations();
}
/* Store the control data bits in the message payload and send it. */
- int mlen = 2;
+ unsigned mlen = 2;
if (channel_mask.file != BAD_FILE)
mlen += 4; /* channel masks, plus 3 extra copies of the data */
if (per_slot_offset.file != BAD_FILE)
fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
- int i = 0;
+ unsigned i = 0;
sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
if (per_slot_offset.file != BAD_FILE)
sources[i++] = per_slot_offset;
}
void
-fs_visitor::spill_reg(int spill_reg)
+fs_visitor::spill_reg(unsigned spill_reg)
{
int size = alloc.sizes[spill_reg];
unsigned int spill_offset = last_scratch;
int src_size = 0;
int channels_remaining = 0;
- int src_reg = -1, dst_reg = -1;
+ unsigned src_reg = ~0u, dst_reg = ~0u;
int dst_reg_offset[MAX_VGRF_SIZE];
fs_inst *mov[MAX_VGRF_SIZE];
int dst_var[MAX_VGRF_SIZE];
if (dst_reg_offset[i] != dst_reg_offset[0] + i) {
/* Registers are out-of-order. */
can_coalesce = false;
- src_reg = -1;
+ src_reg = ~0u;
break;
}
if (!can_coalesce_vars(live_intervals, cfg, inst,
dst_var[i], src_var[i])) {
can_coalesce = false;
- src_reg = -1;
+ src_reg = ~0u;
break;
}
}
MAX2(live_intervals->end[dst_var[i]],
live_intervals->end[src_var[i]]);
}
- src_reg = -1;
+ src_reg = ~0u;
}
if (progress) {
class instruction_scheduler {
public:
instruction_scheduler(backend_shader *s, int grf_count,
- int hw_reg_count, int block_count,
+ unsigned hw_reg_count, int block_count,
instruction_scheduler_mode mode)
{
this->bs = s;
bool post_reg_alloc;
int instructions_to_schedule;
int grf_count;
- int hw_reg_count;
+ unsigned hw_reg_count;
int reg_pressure;
int block_idx;
exec_list instructions;
int payload_last_use_ip[hw_reg_count];
v->calculate_payload_ranges(hw_reg_count, payload_last_use_ip);
- for (int i = 0; i < hw_reg_count; i++) {
+ for (unsigned i = 0; i < hw_reg_count; i++) {
if (payload_last_use_ip[i] == -1)
continue;
bool progress = false;
foreach_block(block, cfg) {
- int last_reg = -1, last_offset = -1;
+ unsigned last_reg = ~0u, last_offset = ~0u;
enum brw_reg_file last_reg_file = BAD_FILE;
uint8_t imm[4] = { 0 };
need_type = BRW_REGISTER_TYPE_F;
}
} else {
- last_reg = -1;
+ last_reg = ~0u;
}
/* If this wasn't a MOV, or the destination register doesn't match,
}
inst_count = 0;
- last_reg = -1;
+ last_reg = ~0u;;
writemask = 0;
dest_type = BRW_REGISTER_TYPE_F;
* in the register instead.
*/
if (to_mrf && scan_inst->mlen > 0) {
- if (inst->dst.nr >= scan_inst->base_mrf &&
- inst->dst.nr < scan_inst->base_mrf + scan_inst->mlen) {
+ unsigned start = scan_inst->base_mrf;
+ unsigned end = scan_inst->base_mrf + scan_inst->mlen;
+
+ if (inst->dst.nr >= start && inst->dst.nr < end) {
break;
}
} else {
bool reg_allocate();
void evaluate_spill_costs(float *spill_costs, bool *no_spill);
int choose_spill_reg(struct ra_graph *g);
- void spill_reg(int spill_reg);
+ void spill_reg(unsigned spill_reg);
void move_grf_array_access_to_scratch();
void move_uniform_array_access_to_pull_constants();
void move_push_constants_to_pull_constants();
}
void
-vec4_visitor::spill_reg(int spill_reg_nr)
+vec4_visitor::spill_reg(unsigned spill_reg_nr)
{
assert(alloc.sizes[spill_reg_nr] == 1 || alloc.sizes[spill_reg_nr] == 2);
- unsigned int spill_offset = last_scratch;
+ unsigned spill_offset = last_scratch;
last_scratch += alloc.sizes[spill_reg_nr];
/* Generate spill/unspill instructions for the objects being spilled. */
- int scratch_reg = -1;
+ unsigned scratch_reg = ~0u;
foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
- for (unsigned int i = 0; i < 3; i++) {
+ for (unsigned i = 0; i < 3; i++) {
if (inst->src[i].file == VGRF && inst->src[i].nr == spill_reg_nr) {
- if (scratch_reg == -1 ||
+ if (scratch_reg == ~0u ||
!can_use_scratch_for_source(inst, i, scratch_reg)) {
/* We need to unspill anyway so make sure we read the full vec4
* in any case. This way, the cached register can be reused
dst_reg(temp), inst->src[i], spill_offset);
temp.offset = inst->src[i].offset;
}
- assert(scratch_reg != -1);
+ assert(scratch_reg != ~0u);
inst->src[i].nr = scratch_reg;
}
}
}
}
-static int
-align_interleaved_urb_mlen(const struct gen_device_info *devinfo, int mlen)
+static unsigned
+align_interleaved_urb_mlen(const struct gen_device_info *devinfo, unsigned mlen)
{
if (devinfo->gen >= 6) {
/* URB data written (does not include the message header reg) must
emit(GS_OPCODE_SET_DWORD_2, dst_reg(MRF, mrf), flags_data);
}
-static int
-align_interleaved_urb_mlen(int mlen)
+static unsigned
+align_interleaved_urb_mlen(unsigned mlen)
{
/* URB data written (does not include the message header reg) must
* be a multiple of 256 bits, or 2 VS registers. See vol5c.5,