if (!magic) {
add_write_dep(state, &state->last_rf[waddr], n);
} else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
+ /* XXX perf: For V3D 4.x, we could reorder TMU writes other
+ * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
+ */
add_write_dep(state, &state->last_tmu_write, n);
switch (waddr) {
case V3D_QPU_WADDR_TMUS:
}
}
-static void
-process_cond_deps(struct schedule_state *state, struct schedule_node *n,
- enum v3d_qpu_cond cond)
-{
- if (cond != V3D_QPU_COND_NONE)
- add_read_dep(state, state->last_sf, n);
-}
-
-static void
-process_pf_deps(struct schedule_state *state, struct schedule_node *n,
- enum v3d_qpu_pf pf)
-{
- if (pf != V3D_QPU_PF_NONE)
- add_write_dep(state, &state->last_sf, n);
-}
-
-static void
-process_uf_deps(struct schedule_state *state, struct schedule_node *n,
- enum v3d_qpu_uf uf)
-{
- if (uf != V3D_QPU_UF_NONE)
- add_write_dep(state, &state->last_sf, n);
-}
-
/**
* Common code for dependencies that need to be tracked both forward and
* backward.
const struct v3d_device_info *devinfo = state->devinfo;
struct qinst *qinst = n->inst;
struct v3d_qpu_instr *inst = &qinst->qpu;
+ /* If the input and output segments are shared, then all VPM reads to
+ * a location need to happen before all writes. We handle this by
+ * serializing all VPM operations for now.
+ */
+ bool separate_vpm_segment = false;
if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
add_write_dep(state, &state->last_vpm, n);
break;
+ case V3D_QPU_A_LDVPMV_IN:
+ case V3D_QPU_A_LDVPMD_IN:
+ case V3D_QPU_A_LDVPMG_IN:
+ case V3D_QPU_A_LDVPMP:
+ if (!separate_vpm_segment)
+ add_write_dep(state, &state->last_vpm, n);
+ break;
+
case V3D_QPU_A_VPMWT:
add_read_dep(state, state->last_vpm, n);
break;
add_write_dep(state, &state->last_tlb, n);
break;
- case V3D_QPU_A_FLAPUSH:
- case V3D_QPU_A_FLBPUSH:
- case V3D_QPU_A_VFLA:
- case V3D_QPU_A_VFLNA:
- case V3D_QPU_A_VFLB:
- case V3D_QPU_A_VFLNB:
- add_read_dep(state, state->last_sf, n);
- break;
-
- case V3D_QPU_A_FLPOP:
- add_write_dep(state, &state->last_sf, n);
- break;
-
default:
break;
}
for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
add_write_dep(state, &state->last_r[i], n);
add_write_dep(state, &state->last_sf, n);
+ add_write_dep(state, &state->last_rtop, n);
/* Scoreboard-locking operations have to stay after the last
* thread switch.
add_write_dep(state, &state->last_tmu_config, n);
}
- if (inst->sig.ldtmu) {
+ if (v3d_qpu_waits_on_tmu(inst)) {
/* TMU loads are coming from a FIFO, so ordering is important.
*/
add_write_dep(state, &state->last_tmu_write, n);
if (inst->sig.ldtlb | inst->sig.ldtlbu)
add_read_dep(state, state->last_tlb, n);
- if (inst->sig.ldvpm)
+ if (inst->sig.ldvpm) {
add_write_dep(state, &state->last_vpm_read, n);
+ /* At least for now, we're doing shared I/O segments, so queue
+ * all writes after all reads.
+ */
+ if (!separate_vpm_segment)
+ add_write_dep(state, &state->last_vpm, n);
+ }
+
/* inst->sig.ldunif or sideband uniform read */
if (qinst->uniform != ~0)
add_write_dep(state, &state->last_unif, n);
- process_cond_deps(state, n, inst->flags.ac);
- process_cond_deps(state, n, inst->flags.mc);
- process_pf_deps(state, n, inst->flags.apf);
- process_pf_deps(state, n, inst->flags.mpf);
- process_uf_deps(state, n, inst->flags.auf);
- process_uf_deps(state, n, inst->flags.muf);
+ if (v3d_qpu_reads_flags(inst))
+ add_read_dep(state, state->last_sf, n);
+ if (v3d_qpu_writes_flags(inst))
+ add_write_dep(state, &state->last_sf, n);
}
static void
struct choose_scoreboard {
int tick;
- int last_sfu_write_tick;
+ int last_magic_sfu_write_tick;
int last_ldvary_tick;
int last_uniforms_reset_tick;
+ int last_thrsw_tick;
bool tlb_locked;
};
{
switch (mux) {
case V3D_QPU_MUX_R4:
- if (scoreboard->tick - scoreboard->last_sfu_write_tick <= 2)
+ if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick <= 2)
return true;
break;
* This would normally be prevented by dependency tracking, but might
* occur if a dead SFU computation makes it to scheduling.
*/
- if (scoreboard->tick - scoreboard->last_sfu_write_tick < 2 &&
+ if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick < 2 &&
v3d_qpu_writes_r4(devinfo, inst))
return true;
next_score++;
/* Schedule texture read results collection late to hide latency. */
- if (inst->sig.ldtmu)
+ if (v3d_qpu_waits_on_tmu(inst))
return next_score;
next_score++;
+ /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
+ * instructions after the producer if possible, not just 1.
+ */
+
/* Default score for things that aren't otherwise special. */
baseline_score = next_score;
next_score++;
{
if (v3d_qpu_uses_vpm(inst))
return true;
+ if (v3d_qpu_uses_sfu(inst))
+ return true;
if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
if (inst->alu.add.op != V3D_QPU_A_NOP &&
return true;
}
+ if (inst->alu.add.op == V3D_QPU_A_TMUWT)
+ return true;
+
if (inst->alu.mul.op != V3D_QPU_M_NOP &&
inst->alu.mul.magic_write &&
qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
- a->raddr_b != b->raddr_b) {
+ (a->raddr_b != b->raddr_b ||
+ a->sig.small_imm != b->sig.small_imm)) {
return false;
}
merge.raddr_b = b->raddr_b;
* sooner. If the ldvary's r5 wasn't used, then ldunif might
* otherwise get scheduled so ldunif and ldvary try to update
* r5 in the same tick.
+ *
+ * XXX perf: To get good pipelining of a sequence of varying
+ * loads, we need to figure out how to pair the ldvary signal
+ * up to the instruction before the last r5 user in the
+ * previous ldvary sequence. Currently, it usually pairs with
+ * the last r5 user.
*/
if ((inst->sig.ldunif || inst->sig.ldunifa) &&
scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
enum v3d_qpu_waddr waddr)
{
if (v3d_qpu_magic_waddr_is_sfu(waddr))
- scoreboard->last_sfu_write_tick = scoreboard->tick;
+ scoreboard->last_magic_sfu_write_tick = scoreboard->tick;
}
static void
*
* because we associate the first load_tmu0 with the *second* tmu0_s.
*/
- if (v3d_qpu_magic_waddr_is_tmu(waddr) && after->sig.ldtmu)
+ if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
return 100;
/* Assume that anything depending on us is consuming the SFU result. */
}
static bool
-valid_thrsw_sequence(struct v3d_compile *c,
+valid_thrsw_sequence(struct v3d_compile *c, struct choose_scoreboard *scoreboard,
struct qinst *qinst, int instructions_in_sequence,
bool is_thrend)
{
+ /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
+ if (scoreboard->last_thrsw_tick + 3 >
+ scoreboard->tick - instructions_in_sequence) {
+ return false;
+ }
+
for (int slot = 0; slot < instructions_in_sequence; slot++) {
/* No scheduling SFU when the result would land in the other
* thread. The simulator complains for safety, though it
if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
break;
- if (!valid_thrsw_sequence(c, prev_inst, slots_filled + 1,
+ if (!valid_thrsw_sequence(c, scoreboard,
+ prev_inst, slots_filled + 1,
is_thrend)) {
break;
}
if (merge_inst) {
merge_inst->qpu.sig.thrsw = true;
needs_free = true;
+ scoreboard->last_thrsw_tick = scoreboard->tick - slots_filled;
} else {
+ scoreboard->last_thrsw_tick = scoreboard->tick;
insert_scheduled_instruction(c, block, scoreboard, inst);
time++;
slots_filled++;
struct choose_scoreboard scoreboard;
memset(&scoreboard, 0, sizeof(scoreboard));
scoreboard.last_ldvary_tick = -10;
- scoreboard.last_sfu_write_tick = -10;
+ scoreboard.last_magic_sfu_write_tick = -10;
scoreboard.last_uniforms_reset_tick = -10;
+ scoreboard.last_thrsw_tick = -10;
if (debug) {
fprintf(stderr, "Pre-schedule instructions\n");