#include "vc4_qir.h"
#include "vc4_qpu.h"
#include "util/ralloc.h"
+#include "util/dag.h"
static bool debug;
+struct schedule_node_child;
+
struct schedule_node {
- struct simple_node link;
+ struct dag_node dag;
+ struct list_head link;
struct queued_qpu_inst *inst;
- struct schedule_node **children;
- uint32_t child_count;
- uint32_t child_array_size;
- uint32_t parent_count;
+
+ /* Longest cycles + instruction_latency() of any parent of this node. */
+ uint32_t unblocked_time;
+
+ /**
+ * Minimum number of cycles from scheduling this instruction until the
+ * end of the program, based on the slowest dependency chain through
+ * the children.
+ */
uint32_t delay;
+
+ /**
+ * cycles between this instruction being scheduled and when its result
+ * can be consumed.
+ */
+ uint32_t latency;
+
+ /**
+ * Which uniform from uniform_data[] this instruction read, or -1 if
+ * not reading a uniform.
+ */
+ int uniform;
};
/* When walking the instructions in reverse, we need to swap before/after in
enum direction { F, R };
struct schedule_state {
+ struct dag *dag;
struct schedule_node *last_r[6];
struct schedule_node *last_ra[32];
struct schedule_node *last_rb[32];
struct schedule_node *last_sf;
struct schedule_node *last_vpm_read;
- struct schedule_node *last_unif_read;
struct schedule_node *last_tmu_write;
struct schedule_node *last_tlb;
struct schedule_node *last_vpm;
+ struct schedule_node *last_uniforms_reset;
enum direction dir;
+ /* Estimated cycle when the current instruction would start. */
+ uint32_t time;
};
static void
add_dep(struct schedule_state *state,
struct schedule_node *before,
- struct schedule_node *after)
+ struct schedule_node *after,
+ bool write)
{
+ bool write_after_read = !write && state->dir == R;
+ void *edge_data = (void *)(uintptr_t)write_after_read;
+
if (!before || !after)
return;
assert(before != after);
- if (state->dir == R) {
- struct schedule_node *t = before;
- before = after;
- after = t;
- }
-
- for (int i = 0; i < before->child_count; i++) {
- if (before->children[i] == after)
- return;
- }
-
- if (before->child_array_size <= before->child_count) {
- before->child_array_size = MAX2(before->child_array_size * 2, 16);
- before->children = reralloc(before, before->children,
- struct schedule_node *,
- before->child_array_size);
- }
+ if (state->dir == F)
+ dag_add_edge(&before->dag, &after->dag, edge_data);
+ else
+ dag_add_edge(&after->dag, &before->dag, edge_data);
+}
- before->children[before->child_count] = after;
- before->child_count++;
- after->parent_count++;
+static void
+add_read_dep(struct schedule_state *state,
+ struct schedule_node *before,
+ struct schedule_node *after)
+{
+ add_dep(state, before, after, false);
}
static void
struct schedule_node **before,
struct schedule_node *after)
{
- add_dep(state, *before, after);
+ add_dep(state, *before, after, true);
*before = after;
}
break;
case QPU_R_UNIF:
- add_write_dep(state, &state->last_unif_read, n);
+ add_read_dep(state, state->last_uniforms_reset, n);
break;
case QPU_R_NOP:
default:
if (raddr < 32) {
if (is_a)
- add_dep(state, state->last_ra[raddr], n);
+ add_read_dep(state, state->last_ra[raddr], n);
else
- add_dep(state, state->last_rb[raddr], n);
+ add_read_dep(state, state->last_rb[raddr], n);
} else {
fprintf(stderr, "unknown raddr %d\n", raddr);
abort();
}
}
+static bool
+reads_uniform(uint64_t inst)
+{
+ if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_LOAD_IMM)
+ return false;
+
+ return (QPU_GET_FIELD(inst, QPU_RADDR_A) == QPU_R_UNIF ||
+ (QPU_GET_FIELD(inst, QPU_RADDR_B) == QPU_R_UNIF &&
+ QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_SMALL_IMM) ||
+ is_tmu_write(QPU_GET_FIELD(inst, QPU_WADDR_ADD)) ||
+ is_tmu_write(QPU_GET_FIELD(inst, QPU_WADDR_MUL)));
+}
+
static void
process_mux_deps(struct schedule_state *state, struct schedule_node *n,
uint32_t mux)
{
if (mux != QPU_MUX_A && mux != QPU_MUX_B)
- add_dep(state, state->last_r[mux], n);
+ add_read_dep(state, state->last_r[mux], n);
}
-static bool
-is_direct_tmu_read(uint64_t inst)
-{
- /* If it's a direct read, we happen to structure the code such that
- * there's an explicit uniform read in the instruction (for kernel
- * texture reloc processing).
- */
- return (QPU_GET_FIELD(inst, QPU_RADDR_A) == QPU_R_UNIF ||
- QPU_GET_FIELD(inst, QPU_RADDR_B) == QPU_R_UNIF);
-}
-
static void
process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
uint32_t waddr, bool is_add)
}
} else if (is_tmu_write(waddr)) {
add_write_dep(state, &state->last_tmu_write, n);
-
- /* There is an implicit uniform read in texture ops in
- * hardware, unless this is a direct-addressed uniform read,
- * so we need to keep it in the same order as the other
- * uniforms.
- */
- if (!is_direct_tmu_read(n->inst->inst))
- add_write_dep(state, &state->last_unif_read, n);
- } else if (qpu_waddr_is_tlb(waddr)) {
+ add_read_dep(state, state->last_uniforms_reset, n);
+ } else if (qpu_waddr_is_tlb(waddr) ||
+ waddr == QPU_W_MS_FLAGS) {
add_write_dep(state, &state->last_tlb, n);
} else {
switch (waddr) {
break;
case QPU_W_VPM:
- case QPU_W_VPMVCD_SETUP:
add_write_dep(state, &state->last_vpm, n);
break;
+ case QPU_W_VPMVCD_SETUP:
+ if (is_a)
+ add_write_dep(state, &state->last_vpm_read, n);
+ else
+ add_write_dep(state, &state->last_vpm, n);
+ break;
+
case QPU_W_SFU_RECIP:
case QPU_W_SFU_RECIPSQRT:
case QPU_W_SFU_EXP:
add_write_dep(state, &state->last_tlb, n);
break;
+ case QPU_W_MS_FLAGS:
+ add_write_dep(state, &state->last_tlb, n);
+ break;
+
+ case QPU_W_UNIFORMS_ADDRESS:
+ add_write_dep(state, &state->last_uniforms_reset, n);
+ break;
+
case QPU_W_NOP:
break;
case QPU_COND_ALWAYS:
break;
default:
- add_dep(state, state->last_sf, n);
+ add_read_dep(state, state->last_sf, n);
break;
}
}
uint32_t mul_b = QPU_GET_FIELD(inst, QPU_MUL_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
- process_raddr_deps(state, n, raddr_a, true);
- process_raddr_deps(state, n, raddr_b, false);
+ if (sig != QPU_SIG_LOAD_IMM) {
+ process_raddr_deps(state, n, raddr_a, true);
+ if (sig != QPU_SIG_SMALL_IMM &&
+ sig != QPU_SIG_BRANCH)
+ process_raddr_deps(state, n, raddr_b, false);
+ }
+
if (add_op != QPU_A_NOP) {
process_mux_deps(state, n, add_a);
process_mux_deps(state, n, add_b);
switch (sig) {
case QPU_SIG_SW_BREAKPOINT:
case QPU_SIG_NONE:
- case QPU_SIG_THREAD_SWITCH:
- case QPU_SIG_LAST_THREAD_SWITCH:
case QPU_SIG_SMALL_IMM:
case QPU_SIG_LOAD_IMM:
break;
+ case QPU_SIG_THREAD_SWITCH:
+ case QPU_SIG_LAST_THREAD_SWITCH:
+ /* All accumulator contents and flags are undefined after the
+ * switch.
+ */
+ for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
+ add_write_dep(state, &state->last_r[i], n);
+ add_write_dep(state, &state->last_sf, n);
+
+ /* Scoreboard-locking operations have to stay after the last
+ * thread switch.
+ */
+ add_write_dep(state, &state->last_tlb, n);
+
+ add_write_dep(state, &state->last_tmu_write, n);
+ break;
+
case QPU_SIG_LOAD_TMU0:
case QPU_SIG_LOAD_TMU1:
/* TMU loads are coming from a FIFO, so ordering is important.
break;
case QPU_SIG_COLOR_LOAD:
- add_dep(state, state->last_tlb, n);
+ add_read_dep(state, state->last_tlb, n);
+ break;
+
+ case QPU_SIG_BRANCH:
+ add_read_dep(state, state->last_sf, n);
break;
case QPU_SIG_PROG_END:
case QPU_SIG_COVERAGE_LOAD:
case QPU_SIG_COLOR_LOAD_END:
case QPU_SIG_ALPHA_MASK_LOAD:
- case QPU_SIG_BRANCH:
fprintf(stderr, "Unhandled signal bits %d\n", sig);
abort();
}
process_cond_deps(state, n, QPU_GET_FIELD(inst, QPU_COND_ADD));
- process_cond_deps(state, n, QPU_GET_FIELD(inst, QPU_COND_ADD));
- if (inst & QPU_SF)
+ process_cond_deps(state, n, QPU_GET_FIELD(inst, QPU_COND_MUL));
+ if ((inst & QPU_SF) && sig != QPU_SIG_BRANCH)
add_write_dep(state, &state->last_sf, n);
}
static void
-calculate_forward_deps(struct vc4_compile *c, struct simple_node *schedule_list)
+calculate_forward_deps(struct vc4_compile *c, struct dag *dag,
+ struct list_head *schedule_list)
{
- struct simple_node *node;
struct schedule_state state;
memset(&state, 0, sizeof(state));
+ state.dag = dag;
state.dir = F;
- foreach(node, schedule_list)
- calculate_deps(&state, (struct schedule_node *)node);
+ list_for_each_entry(struct schedule_node, node, schedule_list, link)
+ calculate_deps(&state, node);
}
static void
-calculate_reverse_deps(struct vc4_compile *c, struct simple_node *schedule_list)
+calculate_reverse_deps(struct vc4_compile *c, struct dag *dag,
+ struct list_head *schedule_list)
{
- struct simple_node *node;
struct schedule_state state;
memset(&state, 0, sizeof(state));
+ state.dag = dag;
state.dir = R;
- for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
+ list_for_each_entry_rev(struct schedule_node, node, schedule_list,
+ link) {
calculate_deps(&state, (struct schedule_node *)node);
}
}
struct choose_scoreboard {
+ struct dag *dag;
int tick;
int last_sfu_write_tick;
+ int last_uniforms_reset_tick;
uint32_t last_waddr_a, last_waddr_b;
+ bool tlb_locked;
};
static bool
{
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ /* Full immediate loads don't read any registers. */
+ if (sig == QPU_SIG_LOAD_IMM)
+ return false;
+
uint32_t src_muxes[] = {
QPU_GET_FIELD(inst, QPU_ADD_A),
QPU_GET_FIELD(inst, QPU_ADD_B),
raddr_a < 32 &&
scoreboard->last_waddr_a == raddr_a) ||
(src_muxes[i] == QPU_MUX_B &&
+ sig != QPU_SIG_SMALL_IMM &&
raddr_b < 32 &&
scoreboard->last_waddr_b == raddr_b)) {
return true;
}
}
+ if (sig == QPU_SIG_SMALL_IMM &&
+ QPU_GET_FIELD(inst, QPU_SMALL_IMM) >= QPU_SMALL_IMM_MUL_ROT) {
+ uint32_t mux_a = QPU_GET_FIELD(inst, QPU_MUL_A);
+ uint32_t mux_b = QPU_GET_FIELD(inst, QPU_MUL_B);
+
+ if (scoreboard->last_waddr_a == mux_a + QPU_W_ACC0 ||
+ scoreboard->last_waddr_a == mux_b + QPU_W_ACC0 ||
+ scoreboard->last_waddr_b == mux_a + QPU_W_ACC0 ||
+ scoreboard->last_waddr_b == mux_b + QPU_W_ACC0) {
+ return true;
+ }
+ }
+
+ if (reads_uniform(inst) &&
+ scoreboard->tick - scoreboard->last_uniforms_reset_tick <= 2) {
+ return true;
+ }
+
return false;
}
uint32_t baseline_score;
uint32_t next_score = 0;
- /* Schedule texture read setup early to hide their latency better. */
- if (is_tmu_write(waddr_add) || is_tmu_write(waddr_mul))
+ /* Schedule TLB operations as late as possible, to get more
+ * parallelism between shaders.
+ */
+ if (qpu_inst_is_tlb(inst))
return next_score;
next_score++;
- /* Default score for things that aren't otherwise special. */
- baseline_score = next_score;
- next_score++;
-
/* Schedule texture read results collection late to hide latency. */
if (sig == QPU_SIG_LOAD_TMU0 || sig == QPU_SIG_LOAD_TMU1)
return next_score;
next_score++;
- /* Schedule TLB operations as late as possible, to get more
- * parallelism between shaders.
- */
- if (qpu_inst_is_tlb(inst))
+ /* Default score for things that aren't otherwise special. */
+ baseline_score = next_score;
+ next_score++;
+
+ /* Schedule texture read setup early to hide their latency better. */
+ if (is_tmu_write(waddr_add) || is_tmu_write(waddr_mul))
return next_score;
next_score++;
static struct schedule_node *
choose_instruction_to_schedule(struct choose_scoreboard *scoreboard,
- struct simple_node *schedule_list)
+ struct list_head *schedule_list,
+ struct schedule_node *prev_inst)
{
struct schedule_node *chosen = NULL;
- struct simple_node *node;
int chosen_prio = 0;
- foreach(node, schedule_list) {
- struct schedule_node *n = (struct schedule_node *)node;
+ /* Don't pair up anything with a thread switch signal -- emit_thrsw()
+ * will handle pairing it along with filling the delay slots.
+ */
+ if (prev_inst) {
+ uint32_t prev_sig = QPU_GET_FIELD(prev_inst->inst->inst,
+ QPU_SIG);
+ if (prev_sig == QPU_SIG_THREAD_SWITCH ||
+ prev_sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ return NULL;
+ }
+ }
+
+ list_for_each_entry(struct schedule_node, n, &scoreboard->dag->heads,
+ dag.link) {
uint64_t inst = n->inst->inst;
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ /* Don't choose the branch instruction until it's the last one
+ * left. XXX: We could potentially choose it before it's the
+ * last one, if the remaining instructions fit in the delay
+ * slots.
+ */
+ if (sig == QPU_SIG_BRANCH &&
+ !list_is_singular(&scoreboard->dag->heads)) {
+ continue;
+ }
/* "An instruction must not read from a location in physical
* regfile A or B that was written to by the previous
if (pixel_scoreboard_too_soon(scoreboard, inst))
continue;
+ /* If we're trying to pair with another instruction, check
+ * that they're compatible.
+ */
+ if (prev_inst) {
+ /* Don't pair up a thread switch signal -- we'll
+ * handle pairing it when we pick it on its own.
+ */
+ if (sig == QPU_SIG_THREAD_SWITCH ||
+ sig == QPU_SIG_LAST_THREAD_SWITCH) {
+ continue;
+ }
+
+ if (prev_inst->uniform != -1 && n->uniform != -1)
+ continue;
+
+ /* Don't merge in something that will lock the TLB.
+ * Hopwefully what we have in inst will release some
+ * other instructions, allowing us to delay the
+ * TLB-locking instruction until later.
+ */
+ if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
+ continue;
+
+ inst = qpu_merge_inst(prev_inst->inst->inst, inst);
+ if (!inst)
+ continue;
+ }
+
int prio = get_instruction_priority(inst);
/* Found a valid instruction. If nothing better comes along,
} else if (prio < chosen_prio) {
continue;
}
+
+ if (n->delay > chosen->delay) {
+ chosen = n;
+ chosen_prio = prio;
+ } else if (n->delay < chosen->delay) {
+ continue;
+ }
}
return chosen;
(waddr_mul >= QPU_W_SFU_RECIP && waddr_mul <= QPU_W_SFU_LOG)) {
scoreboard->last_sfu_write_tick = scoreboard->tick;
}
+
+ if (waddr_add == QPU_W_UNIFORMS_ADDRESS ||
+ waddr_mul == QPU_W_UNIFORMS_ADDRESS) {
+ scoreboard->last_uniforms_reset_tick = scoreboard->tick;
+ }
+
+ if (qpu_inst_is_tlb(inst))
+ scoreboard->tlb_locked = true;
}
static void
-dump_state(struct simple_node *schedule_list)
+dump_state(struct dag *dag)
{
- struct simple_node *node;
-
- uint32_t i = 0;
- foreach(node, schedule_list) {
- struct schedule_node *n = (struct schedule_node *)node;
-
- fprintf(stderr, "%3d: ", i++);
+ list_for_each_entry(struct schedule_node, n, &dag->heads, dag.link) {
+ fprintf(stderr, " t=%4d: ", n->unblocked_time);
vc4_qpu_disasm(&n->inst->inst, 1);
fprintf(stderr, "\n");
- for (int i = 0; i < n->child_count; i++) {
- struct schedule_node *child = n->children[i];
- fprintf(stderr, " - ");
+ util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
+ struct schedule_node *child =
+ (struct schedule_node *)edge->child;
+ if (!child)
+ continue;
+
+ fprintf(stderr, " - ");
vc4_qpu_disasm(&child->inst->inst, 1);
- fprintf(stderr, " (%d parents)\n", child->parent_count);
+ fprintf(stderr, " (%d parents, %c)\n",
+ child->dag.parent_count,
+ edge->data ? 'w' : 'r');
}
}
}
+static uint32_t waddr_latency(uint32_t waddr, uint64_t after)
+{
+ if (waddr < 32)
+ return 2;
+
+ /* Apply some huge latency between texture fetch requests and getting
+ * their results back.
+ *
+ * FIXME: This is actually pretty bogus. If we do:
+ *
+ * mov tmu0_s, a
+ * <a bit of math>
+ * mov tmu0_s, b
+ * load_tmu0
+ * <more math>
+ * load_tmu0
+ *
+ * we count that as worse than
+ *
+ * mov tmu0_s, a
+ * mov tmu0_s, b
+ * <lots of math>
+ * load_tmu0
+ * <more math>
+ * load_tmu0
+ *
+ * because we associate the first load_tmu0 with the *second* tmu0_s.
+ */
+ if (waddr == QPU_W_TMU0_S) {
+ if (QPU_GET_FIELD(after, QPU_SIG) == QPU_SIG_LOAD_TMU0)
+ return 100;
+ }
+ if (waddr == QPU_W_TMU1_S) {
+ if (QPU_GET_FIELD(after, QPU_SIG) == QPU_SIG_LOAD_TMU1)
+ return 100;
+ }
+
+ switch(waddr) {
+ case QPU_W_SFU_RECIP:
+ case QPU_W_SFU_RECIPSQRT:
+ case QPU_W_SFU_EXP:
+ case QPU_W_SFU_LOG:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static uint32_t
+instruction_latency(struct schedule_node *before, struct schedule_node *after)
+{
+ uint64_t before_inst = before->inst->inst;
+ uint64_t after_inst = after->inst->inst;
+
+ return MAX2(waddr_latency(QPU_GET_FIELD(before_inst, QPU_WADDR_ADD),
+ after_inst),
+ waddr_latency(QPU_GET_FIELD(before_inst, QPU_WADDR_MUL),
+ after_inst));
+}
+
/** Recursive computation of the delay member of a node. */
static void
-compute_delay(struct schedule_node *n)
+compute_delay(struct dag_node *node, void *state)
{
- if (!n->child_count) {
- n->delay = 1;
- } else {
- for (int i = 0; i < n->child_count; i++) {
- if (!n->children[i]->delay)
- compute_delay(n->children[i]);
- n->delay = MAX2(n->delay, n->children[i]->delay + 1);
- }
+ struct schedule_node *n = (struct schedule_node *)node;
+
+ n->delay = 1;
+
+ util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
+ struct schedule_node *child =
+ (struct schedule_node *)edge->child;
+ n->delay = MAX2(n->delay, (child->delay +
+ instruction_latency(n, child)));
}
}
+/* Removes a DAG head, but removing only the WAR edges. (dag_prune_head()
+ * should be called on it later to finish pruning the other edges).
+ */
static void
-schedule_instructions(struct vc4_compile *c, struct simple_node *schedule_list)
+pre_remove_head(struct dag *dag, struct schedule_node *n)
{
- struct simple_node *node, *t;
- struct choose_scoreboard scoreboard;
+ list_delinit(&n->dag.link);
- memset(&scoreboard, 0, sizeof(scoreboard));
- scoreboard.last_waddr_a = ~0;
- scoreboard.last_waddr_b = ~0;
- scoreboard.last_sfu_write_tick = -10;
+ util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
+ if (edge->data)
+ dag_remove_edge(dag, edge);
+ }
+}
- if (debug) {
- fprintf(stderr, "initial deps:\n");
- dump_state(schedule_list);
- fprintf(stderr, "\n");
+static void
+mark_instruction_scheduled(struct dag *dag,
+ uint32_t time,
+ struct schedule_node *node)
+{
+ if (!node)
+ return;
+
+ util_dynarray_foreach(&node->dag.edges, struct dag_edge, edge) {
+ struct schedule_node *child =
+ (struct schedule_node *)edge->child;
+
+ if (!child)
+ continue;
+
+ uint32_t latency = instruction_latency(node, child);
+
+ child->unblocked_time = MAX2(child->unblocked_time,
+ time + latency);
}
+ dag_prune_head(dag, &node->dag);
+}
+
+/**
+ * Emits a THRSW/LTHRSW signal in the stream, trying to move it up to pair
+ * with another instruction.
+ */
+static void
+emit_thrsw(struct vc4_compile *c,
+ struct choose_scoreboard *scoreboard,
+ uint64_t inst)
+{
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+
+ /* There should be nothing in a thrsw inst being scheduled other than
+ * the signal bits.
+ */
+ assert(QPU_GET_FIELD(inst, QPU_OP_ADD) == QPU_A_NOP);
+ assert(QPU_GET_FIELD(inst, QPU_OP_MUL) == QPU_M_NOP);
- /* Remove non-DAG heads from the list. */
- foreach_s(node, t, schedule_list) {
- struct schedule_node *n = (struct schedule_node *)node;
+ /* Try to find an earlier scheduled instruction that we can merge the
+ * thrsw into.
+ */
+ int thrsw_ip = c->qpu_inst_count;
+ for (int i = 1; i <= MIN2(c->qpu_inst_count, 3); i++) {
+ uint64_t prev_instr = c->qpu_insts[c->qpu_inst_count - i];
+ uint32_t prev_sig = QPU_GET_FIELD(prev_instr, QPU_SIG);
- if (n->parent_count != 0)
- remove_from_list(&n->link);
+ if (prev_sig == QPU_SIG_NONE)
+ thrsw_ip = c->qpu_inst_count - i;
}
- while (!is_empty_list(schedule_list)) {
+ if (thrsw_ip != c->qpu_inst_count) {
+ /* Merge the thrsw into the existing instruction. */
+ c->qpu_insts[thrsw_ip] =
+ QPU_UPDATE_FIELD(c->qpu_insts[thrsw_ip], sig, QPU_SIG);
+ } else {
+ qpu_serialize_one_inst(c, inst);
+ update_scoreboard_for_chosen(scoreboard, inst);
+ }
+
+ /* Fill the delay slots. */
+ while (c->qpu_inst_count < thrsw_ip + 3) {
+ update_scoreboard_for_chosen(scoreboard, qpu_NOP());
+ qpu_serialize_one_inst(c, qpu_NOP());
+ }
+}
+
+static uint32_t
+schedule_instructions(struct vc4_compile *c,
+ struct choose_scoreboard *scoreboard,
+ struct qblock *block,
+ struct list_head *schedule_list,
+ enum quniform_contents *orig_uniform_contents,
+ uint32_t *orig_uniform_data,
+ uint32_t *next_uniform)
+{
+ uint32_t time = 0;
+
+ while (!list_empty(&scoreboard->dag->heads)) {
struct schedule_node *chosen =
- choose_instruction_to_schedule(&scoreboard,
- schedule_list);
+ choose_instruction_to_schedule(scoreboard,
+ schedule_list,
+ NULL);
+ struct schedule_node *merge = NULL;
/* If there are no valid instructions to schedule, drop a NOP
* in.
uint64_t inst = chosen ? chosen->inst->inst : qpu_NOP();
if (debug) {
- fprintf(stderr, "current list:\n");
- dump_state(schedule_list);
- fprintf(stderr, "chose: ");
+ fprintf(stderr, "t=%4d: current list:\n",
+ time);
+ dump_state(scoreboard->dag);
+ fprintf(stderr, "t=%4d: chose: ", time);
vc4_qpu_disasm(&inst, 1);
- fprintf(stderr, "\n\n");
+ fprintf(stderr, "\n");
}
- /* Schedule this instruction onto the QPU list. */
- if (chosen)
- remove_from_list(&chosen->link);
- qpu_serialize_one_inst(c, inst);
+ /* Schedule this instruction onto the QPU list. Also try to
+ * find an instruction to pair with it.
+ */
+ if (chosen) {
+ time = MAX2(chosen->unblocked_time, time);
+ pre_remove_head(scoreboard->dag, chosen);
+ if (chosen->uniform != -1) {
+ c->uniform_data[*next_uniform] =
+ orig_uniform_data[chosen->uniform];
+ c->uniform_contents[*next_uniform] =
+ orig_uniform_contents[chosen->uniform];
+ (*next_uniform)++;
+ }
- update_scoreboard_for_chosen(&scoreboard, inst);
+ merge = choose_instruction_to_schedule(scoreboard,
+ schedule_list,
+ chosen);
+ if (merge) {
+ time = MAX2(merge->unblocked_time, time);
+ inst = qpu_merge_inst(inst, merge->inst->inst);
+ assert(inst != 0);
+ if (merge->uniform != -1) {
+ c->uniform_data[*next_uniform] =
+ orig_uniform_data[merge->uniform];
+ c->uniform_contents[*next_uniform] =
+ orig_uniform_contents[merge->uniform];
+ (*next_uniform)++;
+ }
+
+ if (debug) {
+ fprintf(stderr, "t=%4d: merging: ",
+ time);
+ vc4_qpu_disasm(&merge->inst->inst, 1);
+ fprintf(stderr, "\n");
+ fprintf(stderr, " resulting in: ");
+ vc4_qpu_disasm(&inst, 1);
+ fprintf(stderr, "\n");
+ }
+ }
+ }
+
+ if (debug) {
+ fprintf(stderr, "\n");
+ }
/* Now that we've scheduled a new instruction, some of its
* children can be promoted to the list of instructions ready to
* be scheduled. Update the children's unblocked time for this
* DAG edge as we do so.
*/
- if (chosen) {
- for (int i = chosen->child_count - 1; i >= 0; i--) {
- struct schedule_node *child =
- chosen->children[i];
-
- child->parent_count--;
- if (child->parent_count == 0) {
- insert_at_head(schedule_list,
- &child->link);
- }
- }
+ mark_instruction_scheduled(scoreboard->dag, time, chosen);
+ mark_instruction_scheduled(scoreboard->dag, time, merge);
+
+ if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_THREAD_SWITCH ||
+ QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_LAST_THREAD_SWITCH) {
+ emit_thrsw(c, scoreboard, inst);
+ } else {
+ qpu_serialize_one_inst(c, inst);
+ update_scoreboard_for_chosen(scoreboard, inst);
}
- scoreboard.tick++;
+ scoreboard->tick++;
+ time++;
+
+ if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_BRANCH) {
+ block->branch_qpu_ip = c->qpu_inst_count - 1;
+ /* Fill the delay slots.
+ *
+ * We should fill these with actual instructions,
+ * instead, but that will probably need to be done
+ * after this, once we know what the leading
+ * instructions of the successors are (so we can
+ * handle A/B register file write latency)
+ */
+ inst = qpu_NOP();
+ update_scoreboard_for_chosen(scoreboard, inst);
+ qpu_serialize_one_inst(c, inst);
+ qpu_serialize_one_inst(c, inst);
+ qpu_serialize_one_inst(c, inst);
+ }
}
+
+ return time;
}
-void
+static uint32_t
+qpu_schedule_instructions_block(struct vc4_compile *c,
+ struct choose_scoreboard *scoreboard,
+ struct qblock *block,
+ enum quniform_contents *orig_uniform_contents,
+ uint32_t *orig_uniform_data,
+ uint32_t *next_uniform)
+{
+ scoreboard->dag = dag_create(NULL);
+ struct list_head setup_list;
+
+ list_inithead(&setup_list);
+
+ /* Wrap each instruction in a scheduler structure. */
+ uint32_t next_sched_uniform = *next_uniform;
+ while (!list_empty(&block->qpu_inst_list)) {
+ struct queued_qpu_inst *inst =
+ (struct queued_qpu_inst *)block->qpu_inst_list.next;
+ struct schedule_node *n = rzalloc(scoreboard->dag,
+ struct schedule_node);
+
+ dag_init_node(scoreboard->dag, &n->dag);
+ n->inst = inst;
+
+ if (reads_uniform(inst->inst)) {
+ n->uniform = next_sched_uniform++;
+ } else {
+ n->uniform = -1;
+ }
+ list_del(&inst->link);
+ list_addtail(&n->link, &setup_list);
+ }
+
+ calculate_forward_deps(c, scoreboard->dag, &setup_list);
+ calculate_reverse_deps(c, scoreboard->dag, &setup_list);
+
+ dag_traverse_bottom_up(scoreboard->dag, compute_delay, NULL);
+
+ uint32_t cycles = schedule_instructions(c, scoreboard, block,
+ &setup_list,
+ orig_uniform_contents,
+ orig_uniform_data,
+ next_uniform);
+
+ ralloc_free(scoreboard->dag);
+ scoreboard->dag = NULL;
+
+ return cycles;
+}
+
+static void
+qpu_set_branch_targets(struct vc4_compile *c)
+{
+ qir_for_each_block(block, c) {
+ /* The end block of the program has no branch. */
+ if (!block->successors[0])
+ continue;
+
+ /* If there was no branch instruction, then the successor
+ * block must follow immediately after this one.
+ */
+ if (block->branch_qpu_ip == ~0) {
+ assert(block->end_qpu_ip + 1 ==
+ block->successors[0]->start_qpu_ip);
+ continue;
+ }
+
+ /* Set the branch target for the block that doesn't follow
+ * immediately after ours.
+ */
+ uint64_t *branch_inst = &c->qpu_insts[block->branch_qpu_ip];
+ assert(QPU_GET_FIELD(*branch_inst, QPU_SIG) == QPU_SIG_BRANCH);
+ assert(QPU_GET_FIELD(*branch_inst, QPU_BRANCH_TARGET) == 0);
+
+ uint32_t branch_target =
+ (block->successors[0]->start_qpu_ip -
+ (block->branch_qpu_ip + 4)) * sizeof(uint64_t);
+ *branch_inst = (*branch_inst |
+ QPU_SET_FIELD(branch_target, QPU_BRANCH_TARGET));
+
+ /* Make sure that the if-we-don't-jump successor was scheduled
+ * just after the delay slots.
+ */
+ if (block->successors[1]) {
+ assert(block->successors[1]->start_qpu_ip ==
+ block->branch_qpu_ip + 4);
+ }
+ }
+}
+
+uint32_t
qpu_schedule_instructions(struct vc4_compile *c)
{
- void *mem_ctx = ralloc_context(NULL);
- struct simple_node schedule_list;
- struct simple_node *node;
+ /* We reorder the uniforms as we schedule instructions, so save the
+ * old data off and replace it.
+ */
+ uint32_t *uniform_data = c->uniform_data;
+ enum quniform_contents *uniform_contents = c->uniform_contents;
+ c->uniform_contents = ralloc_array(c, enum quniform_contents,
+ c->num_uniforms);
+ c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
+ c->uniform_array_size = c->num_uniforms;
+ uint32_t next_uniform = 0;
- make_empty_list(&schedule_list);
+ struct choose_scoreboard scoreboard;
+ memset(&scoreboard, 0, sizeof(scoreboard));
+ scoreboard.last_waddr_a = ~0;
+ scoreboard.last_waddr_b = ~0;
+ scoreboard.last_sfu_write_tick = -10;
+ scoreboard.last_uniforms_reset_tick = -10;
if (debug) {
fprintf(stderr, "Pre-schedule instructions\n");
- foreach(node, &c->qpu_inst_list) {
- struct queued_qpu_inst *q =
- (struct queued_qpu_inst *)node;
- vc4_qpu_disasm(&q->inst, 1);
- fprintf(stderr, "\n");
+ qir_for_each_block(block, c) {
+ fprintf(stderr, "BLOCK %d\n", block->index);
+ list_for_each_entry(struct queued_qpu_inst, q,
+ &block->qpu_inst_list, link) {
+ vc4_qpu_disasm(&q->inst, 1);
+ fprintf(stderr, "\n");
+ }
}
fprintf(stderr, "\n");
}
- /* Wrap each instruction in a scheduler structure. */
- while (!is_empty_list(&c->qpu_inst_list)) {
- struct queued_qpu_inst *inst =
- (struct queued_qpu_inst *)c->qpu_inst_list.next;
- struct schedule_node *n = rzalloc(mem_ctx, struct schedule_node);
+ uint32_t cycles = 0;
+ qir_for_each_block(block, c) {
+ block->start_qpu_ip = c->qpu_inst_count;
+ block->branch_qpu_ip = ~0;
- n->inst = inst;
- remove_from_list(&inst->link);
- insert_at_tail(&schedule_list, &n->link);
- }
-
- calculate_forward_deps(c, &schedule_list);
- calculate_reverse_deps(c, &schedule_list);
+ cycles += qpu_schedule_instructions_block(c,
+ &scoreboard,
+ block,
+ uniform_contents,
+ uniform_data,
+ &next_uniform);
- foreach(node, &schedule_list) {
- struct schedule_node *n = (struct schedule_node *)node;
- compute_delay(n);
+ block->end_qpu_ip = c->qpu_inst_count - 1;
}
- schedule_instructions(c, &schedule_list);
+ qpu_set_branch_targets(c);
+
+ assert(next_uniform == c->num_uniforms);
if (debug) {
fprintf(stderr, "Post-schedule instructions\n");
fprintf(stderr, "\n");
}
- ralloc_free(mem_ctx);
+ return cycles;
}