#include "brw_fs.h"
#include "brw_vec4.h"
+#include "brw_cfg.h"
+#include "brw_shader.h"
#include "glsl/glsl_types.h"
#include "glsl/ir_optimization.h"
this->remaining_grf_uses = NULL;
this->grf_active = NULL;
}
+ v->calculate_cfg();
}
~instruction_scheduler()
void add_dep(schedule_node *before, schedule_node *after, int latency);
void add_dep(schedule_node *before, schedule_node *after);
- void run(exec_list *instructions);
- void add_inst(backend_instruction *inst);
+ void run(cfg_t *cfg);
+ void add_insts_from_block(bblock_t *block);
void compute_delay(schedule_node *node);
virtual void calculate_deps() = 0;
virtual schedule_node *choose_instruction_to_schedule() = 0;
virtual void update_register_pressure(backend_instruction *inst) = 0;
virtual int get_register_pressure_benefit(backend_instruction *inst) = 0;
- void schedule_instructions(backend_instruction *next_block_header);
+ void schedule_instructions(bblock_t *block);
void *mem_ctx;
}
void
-instruction_scheduler::add_inst(backend_instruction *inst)
+instruction_scheduler::add_insts_from_block(bblock_t *block)
{
- schedule_node *n = new(mem_ctx) schedule_node(inst, this);
+ /* Removing the last instruction from a basic block removes the block as
+ * well, so put a NOP at the end to keep it alive.
+ */
+ if (!block->end->is_control_flow()) {
+ backend_instruction *nop = new(mem_ctx) backend_instruction();
+ nop->opcode = BRW_OPCODE_NOP;
+ block->end->insert_after(block, nop);
+ }
- assert(!inst->is_head_sentinel());
- assert(!inst->is_tail_sentinel());
+ foreach_inst_in_block_safe(backend_instruction, inst, block) {
+ if (inst->opcode == BRW_OPCODE_NOP || inst->is_control_flow())
+ continue;
- this->instructions_to_schedule++;
+ schedule_node *n = new(mem_ctx) schedule_node(inst, this);
- inst->remove();
- instructions.push_tail(n);
+ this->instructions_to_schedule++;
+
+ inst->remove(block);
+ instructions.push_tail(n);
+ }
}
/** Recursive computation of the delay member of a node. */
}
void
-instruction_scheduler::schedule_instructions(backend_instruction *next_block_header)
+instruction_scheduler::schedule_instructions(bblock_t *block)
{
struct brw_context *brw = bv->brw;
+ backend_instruction *inst = block->end;
time = 0;
/* Remove non-DAG heads from the list. */
/* Schedule this instruction. */
assert(chosen);
chosen->remove();
- next_block_header->insert_before(chosen->inst);
+ inst->insert_before(block, chosen->inst);
instructions_to_schedule--;
update_register_pressure(chosen->inst);
}
}
+ if (block->end->opcode == BRW_OPCODE_NOP)
+ block->end->remove(block);
assert(instructions_to_schedule == 0);
}
void
-instruction_scheduler::run(exec_list *all_instructions)
+instruction_scheduler::run(cfg_t *cfg)
{
- backend_instruction *next_block_header =
- (backend_instruction *)all_instructions->head;
-
if (debug) {
fprintf(stderr, "\nInstructions before scheduling (reg_alloc %d)\n",
post_reg_alloc);
* scheduling.
*/
if (remaining_grf_uses) {
- foreach_in_list(schedule_node, node, all_instructions) {
- count_remaining_grf_uses((backend_instruction *)node);
+ foreach_block_and_inst(block, backend_instruction, inst, cfg) {
+ count_remaining_grf_uses(inst);
}
}
- while (!next_block_header->is_tail_sentinel()) {
- /* Add things to be scheduled until we get to a new BB. */
- while (!next_block_header->is_tail_sentinel()) {
- backend_instruction *inst = next_block_header;
- next_block_header = (backend_instruction *)next_block_header->next;
+ foreach_block(block, cfg) {
+ if (block->end_ip - block->start_ip <= 1)
+ continue;
+
+ add_insts_from_block(block);
- add_inst(inst);
- if (inst->is_control_flow())
- break;
- }
calculate_deps();
foreach_in_list(schedule_node, n, &instructions) {
compute_delay(n);
}
- schedule_instructions(next_block_header);
+ schedule_instructions(block);
}
if (debug) {
grf_count = virtual_grf_count;
fs_instruction_scheduler sched(this, grf_count, mode);
- sched.run(&instructions);
+ sched.run(cfg);
if (unlikely(INTEL_DEBUG & DEBUG_WM) && mode == SCHEDULE_POST) {
fprintf(stderr, "fs%d estimated execution time: %d cycles\n",
dispatch_width, sched.time);
}
- invalidate_live_intervals();
+ invalidate_live_intervals(false);
}
void
vec4_visitor::opt_schedule_instructions()
{
vec4_instruction_scheduler sched(this, prog_data->total_grf);
- sched.run(&instructions);
+ sched.run(cfg);
if (unlikely(debug_flag)) {
fprintf(stderr, "vec4 estimated execution time: %d cycles\n", sched.time);
}
- invalidate_live_intervals();
+ invalidate_live_intervals(false);
}