prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
/* Map the offsets in the UNIFORM file to fixed HW regs. */
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
for (unsigned int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == UNIFORM) {
int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
/* Offset all the urb_setup[] index by the actual position of the
* setup regs, now that the location of the constants has been chosen.
*/
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->opcode == FS_OPCODE_LINTERP) {
assert(inst->src[2].file == HW_REG);
inst->src[2].fixed_hw_reg.nr += urb_start;
false;
}
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
/* If there's a SEND message that requires contiguous destination
* registers, no splitting is allowed.
*/
}
}
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->dst.file == GRF &&
split_grf[inst->dst.reg] &&
inst->dst.reg_offset != 0) {
int remap_table[this->virtual_grf_count];
memset(remap_table, -1, sizeof(remap_table));
- foreach_in_list(const fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, const fs_inst, inst, cfg) {
if (inst->dst.file == GRF)
remap_table[inst->dst.reg] = 0;
this->virtual_grf_count = new_index;
/* Patch all the instructions to use the newly renumbered registers */
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->dst.file == GRF)
inst->dst.reg = remap_table[inst->dst.reg];
* Note that we don't move constant-indexed accesses to arrays. No
* testing has been done of the performance impact of this choice.
*/
- foreach_in_list_safe(fs_inst, inst, &instructions) {
+ foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
for (int i = 0 ; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
continue;
is_live[i] = false;
}
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
for (int i = 0; i < inst->sources; i++) {
if (inst->src[i].file != UNIFORM)
continue;
{
bool progress = false;
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
switch (inst->opcode) {
case BRW_OPCODE_MUL:
if (inst->src[1].file != IMM)
int remap[virtual_grf_count];
memset(remap, -1, sizeof(int) * virtual_grf_count);
- foreach_in_list(fs_inst, inst, &this->instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
depth++;
} else if (inst->opcode == BRW_OPCODE_ENDIF ||
}
int ip = 0, max_pressure = 0;
- foreach_in_list(backend_instruction, inst, &instructions) {
+ foreach_block_and_inst(block, backend_instruction, inst, cfg) {
max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
dump_instruction(inst, file);
}
this->grf_used = hw_reg_mapping[this->virtual_grf_count];
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
assign_reg(hw_reg_mapping, &inst->dst, reg_width);
for (i = 0; i < inst->sources; i++) {
assign_reg(hw_reg_mapping, &inst->src[i], reg_width);
int payload_last_use_ip[payload_node_count];
memset(payload_last_use_ip, 0, sizeof(payload_last_use_ip));
int ip = 0;
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
switch (inst->opcode) {
case BRW_OPCODE_DO:
loop_depth++;
memset(mrf_used, 0, BRW_MAX_MRF * sizeof(bool));
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (inst->dst.file == MRF) {
int reg = inst->dst.reg & ~BRW_MRF_COMPR4;
mrf_used[reg] = true;
reg_width);
}
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
assign_reg(hw_reg_mapping, &inst->dst, reg_width);
for (int i = 0; i < inst->sources; i++) {
assign_reg(hw_reg_mapping, &inst->src[i], reg_width);
* spill/unspill we'll have to do, and guess that the insides of
* loops run 10 times.
*/
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
for (unsigned int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == GRF) {
spill_costs[inst->src[i].reg] += loop_scale;
static bool
can_coalesce_vars(brw::fs_live_variables *live_intervals,
- const exec_list *instructions, const fs_inst *inst,
+ const cfg_t *cfg, const fs_inst *inst,
int var_to, int var_from)
{
if (!live_intervals->vars_interfere(var_from, var_to))
int start_ip = MIN2(start_to, start_from);
int scan_ip = -1;
- foreach_in_list(fs_inst, scan_inst, instructions) {
+ foreach_block_and_inst(block, fs_inst, scan_inst, cfg) {
scan_ip++;
if (scan_ip < start_ip)
continue;
if (scan_ip > live_intervals->end[var_to])
- break;
+ return true;
if (scan_inst->dst.equals(inst->dst) ||
scan_inst->dst.equals(inst->src[0]))
int var_to[MAX_SAMPLER_MESSAGE_SIZE];
int var_from[MAX_SAMPLER_MESSAGE_SIZE];
- foreach_in_list(fs_inst, inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, inst, cfg) {
if (!is_coalesce_candidate(inst, virtual_grf_sizes))
continue;
var_to[i] = live_intervals->var_from_vgrf[reg_to] + reg_to_offset[i];
var_from[i] = live_intervals->var_from_vgrf[reg_from] + i;
- if (!can_coalesce_vars(live_intervals, &instructions, inst,
+ if (!can_coalesce_vars(live_intervals, cfg, inst,
var_to[i], var_from[i])) {
can_coalesce = false;
reg_from = -1;
}
}
- foreach_in_list(fs_inst, scan_inst, &instructions) {
+ foreach_block_and_inst(block, fs_inst, scan_inst, cfg) {
for (int i = 0; i < src_size; i++) {
if (mov[i] || was_load_payload) {
if (scan_inst->dst.file == GRF &&