aco: fix non-rtz pack_half_2x16
[mesa.git] / src / amd / compiler / aco_scheduler.cpp
index d5f2d913a658fd0aaa783f05976cf76025f3e32a..9d6810672b58f776e217a903a1fe50635d86b653 100644 (file)
 
 namespace aco {
 
-struct sched_ctx {
+enum MoveResult {
+   move_success,
+   move_fail_ssa,
+   move_fail_rar,
+   move_fail_pressure,
+};
+
+struct MoveState {
+   RegisterDemand max_registers;
+
+   Block *block;
+   Instruction *current;
+   RegisterDemand *register_demand;
+   bool improved_rar;
+
    std::vector<bool> depends_on;
+   /* Two are needed because, for downwards VMEM scheduling, one needs to
+    * exclude the instructions in the clause, since new instructions in the
+    * clause are not moved past any other instructions in the clause. */
    std::vector<bool> RAR_dependencies;
-   /* For downwards VMEM scheduling, same as RAR_dependencies but excludes the
-    * instructions in the clause, since new instructions in the clause are not
-    * moved past any other instructions in the clause. */
-   std::vector<bool> new_RAR_dependencies;
+   std::vector<bool> RAR_dependencies_clause;
+
+   int source_idx;
+   int insert_idx, insert_idx_clause;
+   RegisterDemand total_demand, total_demand_clause;
+
+   /* for moving instructions before the current instruction to after it */
+   void downwards_init(int current_idx, bool improved_rar, bool may_form_clauses);
+   MoveResult downwards_move(bool clause);
+   void downwards_skip();
+
+   /* for moving instructions after the first use of the current instruction upwards */
+   void upwards_init(int source_idx, bool improved_rar);
+   bool upwards_check_deps();
+   void upwards_set_insert_idx(int before);
+   MoveResult upwards_move();
+   void upwards_skip();
+
+private:
+   void downwards_advance_helper();
+};
 
-   RegisterDemand max_registers;
+struct sched_ctx {
    int16_t num_waves;
    int16_t last_SMEM_stall;
    int last_SMEM_dep_idx;
+   MoveState mv;
 };
 
 /* This scheduler is a simple bottom-up pass based on ideas from
@@ -66,173 +101,425 @@ struct sched_ctx {
  */
 
 template <typename T>
-void move_element(T& list, size_t idx, size_t before) {
+void move_element(T begin_it, size_t idx, size_t before) {
     if (idx < before) {
-        auto begin = std::next(list.begin(), idx);
-        auto end = std::next(list.begin(), before);
+        auto begin = std::next(begin_it, idx);
+        auto end = std::next(begin_it, before);
         std::rotate(begin, begin + 1, end);
     } else if (idx > before) {
-        auto begin = std::next(list.begin(), before);
-        auto end = std::next(list.begin(), idx + 1);
+        auto begin = std::next(begin_it, before);
+        auto end = std::next(begin_it, idx + 1);
         std::rotate(begin, end - 1, end);
     }
 }
 
-static RegisterDemand getLiveChanges(aco_ptr<Instruction>& instr)
+void MoveState::downwards_advance_helper()
 {
-   RegisterDemand changes;
-   for (const Definition& def : instr->definitions) {
-      if (!def.isTemp() || def.isKill())
-         continue;
-      changes += def.getTemp();
+   source_idx--;
+   total_demand.update(register_demand[source_idx]);
+}
+
+void MoveState::downwards_init(int current_idx, bool improved_rar_, bool may_form_clauses)
+{
+   improved_rar = improved_rar_;
+   source_idx = current_idx;
+
+   insert_idx = current_idx + 1;
+   insert_idx_clause = current_idx;
+
+   total_demand = total_demand_clause = register_demand[current_idx];
+
+   std::fill(depends_on.begin(), depends_on.end(), false);
+   if (improved_rar) {
+      std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
+      if (may_form_clauses)
+         std::fill(RAR_dependencies_clause.begin(), RAR_dependencies_clause.end(), false);
    }
 
+   for (const Operand& op : current->operands) {
+      if (op.isTemp()) {
+         depends_on[op.tempId()] = true;
+         if (improved_rar && op.isFirstKill())
+            RAR_dependencies[op.tempId()] = true;
+      }
+   }
+
+   /* update total_demand/source_idx */
+   downwards_advance_helper();
+}
+
+MoveResult MoveState::downwards_move(bool clause)
+{
+   aco_ptr<Instruction>& instr = block->instructions[source_idx];
+
+   for (const Definition& def : instr->definitions)
+      if (def.isTemp() && depends_on[def.tempId()])
+         return move_fail_ssa;
+
+   /* check if one of candidate's operands is killed by depending instruction */
+   std::vector<bool>& RAR_deps = improved_rar ? (clause ? RAR_dependencies_clause : RAR_dependencies) : depends_on;
    for (const Operand& op : instr->operands) {
-      if (!op.isTemp() || !op.isFirstKill())
-         continue;
-      changes -= op.getTemp();
+      if (op.isTemp() && RAR_deps[op.tempId()]) {
+         // FIXME: account for difference in register pressure
+         return move_fail_rar;
+      }
+   }
+
+   if (clause) {
+      for (const Operand& op : instr->operands) {
+         if (op.isTemp()) {
+            depends_on[op.tempId()] = true;
+            if (op.isFirstKill())
+               RAR_dependencies[op.tempId()] = true;
+         }
+      }
    }
 
-   return changes;
+   int dest_insert_idx = clause ? insert_idx_clause : insert_idx;
+   RegisterDemand register_pressure = clause ? total_demand_clause : total_demand;
+
+   const RegisterDemand candidate_diff = get_live_changes(instr);
+   const RegisterDemand temp = get_temp_registers(instr);
+   if (RegisterDemand(register_pressure - candidate_diff).exceeds(max_registers))
+      return move_fail_pressure;
+   const RegisterDemand temp2 = get_temp_registers(block->instructions[dest_insert_idx - 1]);
+   const RegisterDemand new_demand = register_demand[dest_insert_idx - 1] - temp2 + temp;
+   if (new_demand.exceeds(max_registers))
+      return move_fail_pressure;
+
+   /* move the candidate below the memory load */
+   move_element(block->instructions.begin(), source_idx, dest_insert_idx);
+
+   /* update register pressure */
+   move_element(register_demand, source_idx, dest_insert_idx);
+   for (int i = source_idx; i < dest_insert_idx - 1; i++)
+      register_demand[i] -= candidate_diff;
+   register_demand[dest_insert_idx - 1] = new_demand;
+   total_demand_clause -= candidate_diff;
+   insert_idx_clause--;
+   if (!clause) {
+      total_demand -= candidate_diff;
+      insert_idx--;
+   }
+
+   downwards_advance_helper();
+   return move_success;
 }
 
-static RegisterDemand getTempRegisters(aco_ptr<Instruction>& instr)
+void MoveState::downwards_skip()
 {
-   RegisterDemand temp_registers;
-   for (const Definition& def : instr->definitions) {
-      if (!def.isTemp() || !def.isKill())
-         continue;
-      temp_registers += def.getTemp();
+   aco_ptr<Instruction>& instr = block->instructions[source_idx];
+
+   for (const Operand& op : instr->operands) {
+      if (op.isTemp()) {
+         depends_on[op.tempId()] = true;
+         if (improved_rar && op.isFirstKill()) {
+            RAR_dependencies[op.tempId()] = true;
+            RAR_dependencies_clause[op.tempId()] = true;
+         }
+      }
    }
-   return temp_registers;
+   total_demand_clause.update(register_demand[source_idx]);
+
+   downwards_advance_helper();
 }
 
-static bool is_spill_reload(aco_ptr<Instruction>& instr)
+void MoveState::upwards_init(int source_idx_, bool improved_rar_)
 {
-   return instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload;
+   source_idx = source_idx_;
+   improved_rar = improved_rar_;
+
+   insert_idx = -1;
+
+   std::fill(depends_on.begin(), depends_on.end(), false);
+   std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
+
+   for (const Definition& def : current->definitions) {
+      if (def.isTemp())
+         depends_on[def.tempId()] = true;
+   }
 }
 
-bool can_reorder(Instruction* candidate)
+bool MoveState::upwards_check_deps()
 {
-   switch (candidate->format) {
-   case Format::SMEM:
-      return static_cast<SMEM_instruction*>(candidate)->can_reorder;
-   case Format::MUBUF:
-      return static_cast<MUBUF_instruction*>(candidate)->can_reorder;
-   case Format::MIMG:
-      return static_cast<MIMG_instruction*>(candidate)->can_reorder;
-   case Format::MTBUF:
-      return static_cast<MTBUF_instruction*>(candidate)->can_reorder;
-   case Format::FLAT:
-   case Format::GLOBAL:
-   case Format::SCRATCH:
-      return static_cast<FLAT_instruction*>(candidate)->can_reorder;
-   default:
-      return true;
+   aco_ptr<Instruction>& instr = block->instructions[source_idx];
+   for (const Operand& op : instr->operands) {
+      if (op.isTemp() && depends_on[op.tempId()])
+         return false;
    }
+   return true;
+}
+
+void MoveState::upwards_set_insert_idx(int before)
+{
+   insert_idx = before;
+   total_demand = register_demand[before - 1];
 }
 
-bool is_gs_or_done_sendmsg(Instruction *instr)
+MoveResult MoveState::upwards_move()
+{
+   assert(insert_idx >= 0);
+
+   aco_ptr<Instruction>& instr = block->instructions[source_idx];
+   for (const Operand& op : instr->operands) {
+      if (op.isTemp() && depends_on[op.tempId()])
+         return move_fail_ssa;
+   }
+
+   /* check if candidate uses/kills an operand which is used by a dependency */
+   for (const Operand& op : instr->operands) {
+      if (op.isTemp() && (!improved_rar || op.isFirstKill()) && RAR_dependencies[op.tempId()])
+         return move_fail_rar;
+   }
+
+   /* check if register pressure is low enough: the diff is negative if register pressure is decreased */
+   const RegisterDemand candidate_diff = get_live_changes(instr);
+   const RegisterDemand temp = get_temp_registers(instr);
+   if (RegisterDemand(total_demand + candidate_diff).exceeds(max_registers))
+      return move_fail_pressure;
+   const RegisterDemand temp2 = get_temp_registers(block->instructions[insert_idx - 1]);
+   const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + candidate_diff + temp;
+   if (new_demand.exceeds(max_registers))
+      return move_fail_pressure;
+
+   /* move the candidate above the insert_idx */
+   move_element(block->instructions.begin(), source_idx, insert_idx);
+
+   /* update register pressure */
+   move_element(register_demand, source_idx, insert_idx);
+   for (int i = insert_idx + 1; i <= source_idx; i++)
+      register_demand[i] += candidate_diff;
+   register_demand[insert_idx] = new_demand;
+   total_demand += candidate_diff;
+
+   insert_idx++;
+
+   total_demand.update(register_demand[source_idx]);
+   source_idx++;
+
+   return move_success;
+}
+
+void MoveState::upwards_skip()
+{
+   if (insert_idx >= 0) {
+      aco_ptr<Instruction>& instr = block->instructions[source_idx];
+      for (const Definition& def : instr->definitions) {
+         if (def.isTemp())
+            depends_on[def.tempId()] = true;
+      }
+      for (const Operand& op : instr->operands) {
+         if (op.isTemp())
+            RAR_dependencies[op.tempId()] = true;
+      }
+      total_demand.update(register_demand[source_idx]);
+   }
+
+   source_idx++;
+}
+
+bool is_gs_or_done_sendmsg(const Instruction *instr)
 {
    if (instr->opcode == aco_opcode::s_sendmsg) {
-      uint16_t imm = static_cast<SOPP_instruction*>(instr)->imm;
+      uint16_t imm = static_cast<const SOPP_instruction*>(instr)->imm;
       return (imm & sendmsg_id_mask) == _sendmsg_gs ||
              (imm & sendmsg_id_mask) == _sendmsg_gs_done;
    }
    return false;
 }
 
-bool is_done_sendmsg(Instruction *instr)
+bool is_done_sendmsg(const Instruction *instr)
 {
    if (instr->opcode == aco_opcode::s_sendmsg) {
-      uint16_t imm = static_cast<SOPP_instruction*>(instr)->imm;
+      uint16_t imm = static_cast<const SOPP_instruction*>(instr)->imm;
       return (imm & sendmsg_id_mask) == _sendmsg_gs_done;
    }
    return false;
 }
 
-barrier_interaction get_barrier_interaction(Instruction* instr)
+memory_sync_info get_sync_info_with_hack(const Instruction* instr)
 {
-   switch (instr->format) {
-   case Format::SMEM:
-      return static_cast<SMEM_instruction*>(instr)->barrier;
-   case Format::MUBUF:
-      return static_cast<MUBUF_instruction*>(instr)->barrier;
-   case Format::MIMG:
-      return static_cast<MIMG_instruction*>(instr)->barrier;
-   case Format::MTBUF:
-      return static_cast<MTBUF_instruction*>(instr)->barrier;
-   case Format::FLAT:
-   case Format::GLOBAL:
-   case Format::SCRATCH:
-      return static_cast<FLAT_instruction*>(instr)->barrier;
-   case Format::DS:
-      return barrier_shared;
-   case Format::SOPP:
-      if (is_done_sendmsg(instr))
-         return (barrier_interaction)(barrier_gs_data | barrier_gs_sendmsg);
-      else if (is_gs_or_done_sendmsg(instr))
-         return barrier_gs_sendmsg;
+   memory_sync_info sync = get_sync_info(instr);
+   if (instr->format == Format::SMEM && !instr->operands.empty() && instr->operands[0].bytes() == 16) {
+      // FIXME: currently, it doesn't seem beneficial to omit this due to how our scheduler works
+      sync.storage = (storage_class)(sync.storage | storage_buffer);
+      sync.semantics = (memory_semantics)(sync.semantics | semantic_private);
+   }
+   return sync;
+}
+
+struct memory_event_set {
+   bool has_control_barrier;
+
+   unsigned bar_acquire;
+   unsigned bar_release;
+   unsigned bar_classes;
+
+   unsigned access_acquire;
+   unsigned access_release;
+   unsigned access_relaxed;
+   unsigned access_atomic;
+};
+
+struct hazard_query {
+   bool contains_spill;
+   bool contains_sendmsg;
+   memory_event_set mem_events;
+   unsigned aliasing_storage; /* storage classes which are accessed (non-SMEM) */
+   unsigned aliasing_storage_smem; /* storage classes which are accessed (SMEM) */
+};
+
+void init_hazard_query(hazard_query *query) {
+   query->contains_spill = false;
+   query->contains_sendmsg = false;
+   memset(&query->mem_events, 0, sizeof(query->mem_events));
+   query->aliasing_storage = 0;
+   query->aliasing_storage_smem = 0;
+}
+
+void add_memory_event(memory_event_set *set, Instruction *instr, memory_sync_info *sync)
+{
+   set->has_control_barrier |= is_done_sendmsg(instr);
+   if (instr->opcode == aco_opcode::p_barrier) {
+      Pseudo_barrier_instruction *bar = static_cast<Pseudo_barrier_instruction*>(instr);
+      if (bar->sync.semantics & semantic_acquire)
+         set->bar_acquire |= bar->sync.storage;
+      if (bar->sync.semantics & semantic_release)
+         set->bar_release |= bar->sync.storage;
+      set->bar_classes |= bar->sync.storage;
+
+      set->has_control_barrier |= bar->exec_scope > scope_invocation;
+   }
+
+   if (!sync->storage)
+      return;
+
+   if (sync->semantics & semantic_acquire)
+      set->access_acquire |= sync->storage;
+   if (sync->semantics & semantic_release)
+      set->access_release |= sync->storage;
+
+   if (!(sync->semantics & semantic_private)) {
+      if (sync->semantics & semantic_atomic)
+         set->access_atomic |= sync->storage;
       else
-         return barrier_none;
-   default:
-      return barrier_none;
+         set->access_relaxed |= sync->storage;
    }
 }
 
-bool can_move_instr(aco_ptr<Instruction>& instr, Instruction* current, int moving_interaction)
+void add_to_hazard_query(hazard_query *query, Instruction *instr)
 {
-   /* don't move exports so that they stay closer together */
-   if (instr->format == Format::EXP)
-      return false;
+   if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)
+      query->contains_spill = true;
+   query->contains_sendmsg |= instr->opcode == aco_opcode::s_sendmsg;
 
-   /* don't move s_memtime/s_memrealtime */
-   if (instr->opcode == aco_opcode::s_memtime || instr->opcode == aco_opcode::s_memrealtime)
-      return false;
-
-   /* handle barriers */
-
-   /* TODO: instead of stopping, maybe try to move the barriers and any
-    * instructions interacting with them instead? */
-   if (instr->format != Format::PSEUDO_BARRIER) {
-      if (instr->opcode == aco_opcode::s_barrier) {
-         return can_reorder(current) && moving_interaction == barrier_none;
-      } else if (is_gs_or_done_sendmsg(instr.get())) {
-         int interaction = get_barrier_interaction(current);
-         interaction |= moving_interaction;
-         return !(interaction & get_barrier_interaction(instr.get()));
-      } else {
-         return true;
-      }
+   memory_sync_info sync = get_sync_info_with_hack(instr);
+
+   add_memory_event(&query->mem_events, instr, &sync);
+
+   if (!(sync.semantics & semantic_can_reorder)) {
+      unsigned storage = sync.storage;
+      /* images and buffer/global memory can alias */ //TODO: more precisely, buffer images and buffer/global memory can alias
+      if (storage & (storage_buffer | storage_image))
+         storage |= storage_buffer | storage_image;
+      if (instr->format == Format::SMEM)
+         query->aliasing_storage_smem |= storage;
+      else
+         query->aliasing_storage |= storage;
+   }
+}
+
+enum HazardResult {
+   hazard_success,
+   hazard_fail_reorder_vmem_smem,
+   hazard_fail_reorder_ds,
+   hazard_fail_reorder_sendmsg,
+   hazard_fail_spill,
+   hazard_fail_export,
+   hazard_fail_barrier,
+   /* Must stop at these failures. The hazard query code doesn't consider them
+    * when added. */
+   hazard_fail_exec,
+   hazard_fail_unreorderable,
+};
+
+HazardResult perform_hazard_query(hazard_query *query, Instruction *instr, bool upwards)
+{
+   if (instr->opcode == aco_opcode::p_exit_early_if)
+      return hazard_fail_exec;
+   for (const Definition& def : instr->definitions) {
+      if (def.isFixed() && def.physReg() == exec)
+         return hazard_fail_exec;
    }
 
-   int interaction = get_barrier_interaction(current);
-   interaction |= moving_interaction;
-
-   switch (instr->opcode) {
-   case aco_opcode::p_memory_barrier_atomic:
-      return !(interaction & barrier_atomic);
-   /* For now, buffer and image barriers are treated the same. this is because of
-    * dEQP-VK.memory_model.message_passing.core11.u32.coherent.fence_fence.atomicwrite.device.payload_nonlocal.buffer.guard_nonlocal.image.comp
-    * which seems to use an image load to determine if the result of a buffer load is valid. So the ordering of the two loads is important.
-    * I /think/ we should probably eventually expand the meaning of a buffer barrier so that all buffer operations before it, must stay before it
-    * and that both image and buffer operations after it, must stay after it. We should also do the same for image barriers.
-    * Or perhaps the problem is that we don't have a combined barrier instruction for both buffers and images, but the CTS test expects us to?
-    * Either way, this solution should work. */
-   case aco_opcode::p_memory_barrier_buffer:
-   case aco_opcode::p_memory_barrier_image:
-      return !(interaction & (barrier_image | barrier_buffer));
-   case aco_opcode::p_memory_barrier_shared:
-      return !(interaction & barrier_shared);
-   case aco_opcode::p_memory_barrier_common:
-      return !(interaction & (barrier_image | barrier_buffer | barrier_shared | barrier_atomic));
-   case aco_opcode::p_memory_barrier_gs_data:
-      return !(interaction & barrier_gs_data);
-   case aco_opcode::p_memory_barrier_gs_sendmsg:
-      return !(interaction & barrier_gs_sendmsg);
-   default:
-      return false;
+   /* don't move exports so that they stay closer together */
+   if (instr->format == Format::EXP)
+      return hazard_fail_export;
+
+   /* don't move non-reorderable instructions */
+   if (instr->opcode == aco_opcode::s_memtime ||
+       instr->opcode == aco_opcode::s_memrealtime ||
+       instr->opcode == aco_opcode::s_setprio ||
+       instr->opcode == aco_opcode::s_getreg_b32)
+      return hazard_fail_unreorderable;
+
+   memory_event_set instr_set;
+   memset(&instr_set, 0, sizeof(instr_set));
+   memory_sync_info sync = get_sync_info_with_hack(instr);
+   add_memory_event(&instr_set, instr, &sync);
+
+   memory_event_set *first = &instr_set;
+   memory_event_set *second = &query->mem_events;
+   if (upwards)
+      std::swap(first, second);
+
+   /* everything after barrier(acquire) happens after the atomics/control_barriers before
+    * everything after load(acquire) happens after the load
+    */
+   if ((first->has_control_barrier || first->access_atomic) && second->bar_acquire)
+      return hazard_fail_barrier;
+   if (((first->access_acquire || first->bar_acquire) && second->bar_classes) ||
+       ((first->access_acquire | first->bar_acquire) & (second->access_relaxed | second->access_atomic)))
+      return hazard_fail_barrier;
+
+   /* everything before barrier(release) happens before the atomics/control_barriers after *
+    * everything before store(release) happens before the store
+    */
+   if (first->bar_release && (second->has_control_barrier || second->access_atomic))
+      return hazard_fail_barrier;
+   if ((first->bar_classes && (second->bar_release || second->access_release)) ||
+       ((first->access_relaxed | first->access_atomic) & (second->bar_release | second->access_release)))
+      return hazard_fail_barrier;
+
+   /* don't move memory barriers around other memory barriers */
+   if (first->bar_classes && second->bar_classes)
+      return hazard_fail_barrier;
+
+   /* Don't move memory accesses to before control barriers. I don't think
+    * this is necessary for the Vulkan memory model, but it might be for GLSL450. */
+   unsigned control_classes = storage_buffer | storage_atomic_counter | storage_image | storage_shared;
+   if (first->has_control_barrier && ((second->access_atomic | second->access_relaxed) & control_classes))
+      return hazard_fail_barrier;
+
+   /* don't move memory loads/stores past potentially aliasing loads/stores */
+   unsigned aliasing_storage = instr->format == Format::SMEM ?
+                               query->aliasing_storage_smem :
+                               query->aliasing_storage;
+   if ((sync.storage & aliasing_storage) && !(sync.semantics & semantic_can_reorder)) {
+      unsigned intersect = sync.storage & aliasing_storage;
+      if (intersect & storage_shared)
+         return hazard_fail_reorder_ds;
+      return hazard_fail_reorder_vmem_smem;
    }
+
+   if ((instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload) &&
+       query->contains_spill)
+      return hazard_fail_spill;
+
+   if (instr->opcode == aco_opcode::s_sendmsg && query->contains_sendmsg)
+      return hazard_fail_reorder_sendmsg;
+
+   return hazard_success;
 }
 
 void schedule_SMEM(sched_ctx& ctx, Block* block,
@@ -243,31 +530,22 @@ void schedule_SMEM(sched_ctx& ctx, Block* block,
    int window_size = SMEM_WINDOW_SIZE;
    int max_moves = SMEM_MAX_MOVES;
    int16_t k = 0;
-   bool can_reorder_cur = can_reorder(current);
 
    /* don't move s_memtime/s_memrealtime */
    if (current->opcode == aco_opcode::s_memtime || current->opcode == aco_opcode::s_memrealtime)
       return;
 
-   /* create the initial set of values which current depends on */
-   std::fill(ctx.depends_on.begin(), ctx.depends_on.end(), false);
-   for (const Operand& op : current->operands) {
-      if (op.isTemp())
-         ctx.depends_on[op.tempId()] = true;
-   }
-
-   /* maintain how many registers remain free when moving instructions */
-   RegisterDemand register_pressure = register_demand[idx];
-
    /* first, check if we have instructions before current to move down */
-   int insert_idx = idx + 1;
-   int moving_interaction = barrier_none;
-   bool moving_spill = false;
+   hazard_query hq;
+   init_hazard_query(&hq);
+   add_to_hazard_query(&hq, current);
+
+   ctx.mv.downwards_init(idx, false, false);
 
    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
       assert(candidate_idx >= 0);
+      assert(candidate_idx == ctx.mv.source_idx);
       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
-      bool can_reorder_candidate = can_reorder(candidate.get());
 
       /* break if we'd make the previous SMEM instruction stall */
       bool can_stall_prev_smem = idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
@@ -275,214 +553,102 @@ void schedule_SMEM(sched_ctx& ctx, Block* block,
          break;
 
       /* break when encountering another MEM instruction, logical_start or barriers */
-      if (!can_reorder_candidate && !can_reorder_cur)
-         break;
       if (candidate->opcode == aco_opcode::p_logical_start)
          break;
-      if (candidate->opcode == aco_opcode::p_exit_early_if)
-         break;
-      if (!can_move_instr(candidate, current, moving_interaction))
-         break;
       if (candidate->isVMEM())
          break;
-      register_pressure.update(register_demand[candidate_idx]);
 
-      /* if current depends on candidate, add additional dependencies and continue */
       bool can_move_down = true;
-      bool writes_exec = false;
-      for (const Definition& def : candidate->definitions) {
-         if (def.isTemp() && ctx.depends_on[def.tempId()])
-            can_move_down = false;
-         if (def.isFixed() && def.physReg() == exec)
-            writes_exec = true;
-      }
-      if (writes_exec)
-         break;
 
-      if (moving_spill && is_spill_reload(candidate))
+      HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
+      if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill || haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier || haz == hazard_fail_export)
          can_move_down = false;
-      if ((moving_interaction & barrier_shared) && candidate->format == Format::DS)
-         can_move_down = false;
-      moving_interaction |= get_barrier_interaction(candidate.get());
-      moving_spill |= is_spill_reload(candidate);
-      if (!can_move_down) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.depends_on[op.tempId()] = true;
-         }
-         can_reorder_cur &= can_reorder_candidate;
-         continue;
-      }
+      else if (haz != hazard_success)
+         break;
 
-      bool register_pressure_unknown = false;
-      /* check if one of candidate's operands is killed by depending instruction */
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && ctx.depends_on[op.tempId()]) {
-            // FIXME: account for difference in register pressure
-            register_pressure_unknown = true;
-         }
-      }
-      if (register_pressure_unknown) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.depends_on[op.tempId()] = true;
-         }
-         can_reorder_cur &= can_reorder_candidate;
+      /* don't use LDS/GDS instructions to hide latency since it can
+       * significanly worsen LDS scheduling */
+      if (candidate->format == Format::DS || !can_move_down) {
+         add_to_hazard_query(&hq, candidate.get());
+         ctx.mv.downwards_skip();
          continue;
       }
 
-      /* check if register pressure is low enough: the diff is negative if register pressure is increased */
-      const RegisterDemand candidate_diff = getLiveChanges(candidate);
-      const RegisterDemand tempDemand = getTempRegisters(candidate);
-      if (RegisterDemand(register_pressure - candidate_diff).exceeds(ctx.max_registers))
-         break;
-      const RegisterDemand tempDemand2 = getTempRegisters(block->instructions[insert_idx - 1]);
-      const RegisterDemand new_demand  = register_demand[insert_idx - 1] - tempDemand2 + tempDemand;
-      if (new_demand.exceeds(ctx.max_registers))
+      MoveResult res = ctx.mv.downwards_move(false);
+      if (res == move_fail_ssa || res == move_fail_rar) {
+         add_to_hazard_query(&hq, candidate.get());
+         ctx.mv.downwards_skip();
+         continue;
+      } else if (res == move_fail_pressure) {
          break;
-      // TODO: we might want to look further to find a sequence of instructions to move down which doesn't exceed reg pressure
-
-      /* move the candidate below the memory load */
-      move_element(block->instructions, candidate_idx, insert_idx);
-
-      /* update register pressure */
-      move_element(register_demand, candidate_idx, insert_idx);
-      for (int i = candidate_idx; i < insert_idx - 1; i++) {
-         register_demand[i] -= candidate_diff;
       }
-      register_demand[insert_idx - 1] = new_demand;
-      register_pressure -= candidate_diff;
 
       if (candidate_idx < ctx.last_SMEM_dep_idx)
          ctx.last_SMEM_stall++;
-      insert_idx--;
       k++;
    }
 
-   /* create the initial set of values which depend on current */
-   std::fill(ctx.depends_on.begin(), ctx.depends_on.end(), false);
-   std::fill(ctx.RAR_dependencies.begin(), ctx.RAR_dependencies.end(), false);
-   for (const Definition& def : current->definitions) {
-      if (def.isTemp())
-         ctx.depends_on[def.tempId()] = true;
-   }
-
    /* find the first instruction depending on current or find another MEM */
-   insert_idx = idx + 1;
-   moving_interaction = barrier_none;
-   moving_spill = false;
-   can_reorder_cur = true;
+   ctx.mv.upwards_init(idx + 1, false);
 
    bool found_dependency = false;
    /* second, check if we have instructions after current to move up */
    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int) idx + window_size; candidate_idx++) {
+      assert(candidate_idx == ctx.mv.source_idx);
       assert(candidate_idx < (int) block->instructions.size());
       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
-      bool can_reorder_candidate = can_reorder(candidate.get());
 
       if (candidate->opcode == aco_opcode::p_logical_end)
          break;
-      if (!can_move_instr(candidate, current, moving_interaction))
-         break;
-
-      const bool writes_exec = std::any_of(candidate->definitions.begin(), candidate->definitions.end(),
-                                           [](const Definition& def) { return def.isFixed() && def.physReg() == exec;});
-      if (writes_exec)
-         break;
 
       /* check if candidate depends on current */
-      bool is_dependency = std::any_of(candidate->operands.begin(), candidate->operands.end(),
-                                       [&ctx](const Operand& op) { return op.isTemp() && ctx.depends_on[op.tempId()];});
+      bool is_dependency = !found_dependency && !ctx.mv.upwards_check_deps();
       /* no need to steal from following VMEM instructions */
       if (is_dependency && candidate->isVMEM())
          break;
-      if (moving_spill && is_spill_reload(candidate))
-         is_dependency = true;
-      if ((moving_interaction & barrier_shared) && candidate->format == Format::DS)
-         is_dependency = true;
-      moving_interaction |= get_barrier_interaction(candidate.get());
-      moving_spill |= is_spill_reload(candidate);
+
+      if (found_dependency) {
+         HazardResult haz = perform_hazard_query(&hq, candidate.get(), true);
+         if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
+             haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
+             haz == hazard_fail_export)
+            is_dependency = true;
+         else if (haz != hazard_success)
+            break;
+      }
+
       if (is_dependency) {
-         for (const Definition& def : candidate->definitions) {
-            if (def.isTemp())
-               ctx.depends_on[def.tempId()] = true;
-         }
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.RAR_dependencies[op.tempId()] = true;
-         }
          if (!found_dependency) {
-            insert_idx = candidate_idx;
+            ctx.mv.upwards_set_insert_idx(candidate_idx);
+            init_hazard_query(&hq);
             found_dependency = true;
-            /* init register pressure */
-            register_pressure = register_demand[insert_idx - 1];
          }
       }
 
-      if (!can_reorder_candidate && !can_reorder_cur)
-         break;
-
-      if (!found_dependency) {
-         k++;
+      if (is_dependency || !found_dependency) {
+         if (found_dependency)
+            add_to_hazard_query(&hq, candidate.get());
+         else
+            k++;
+         ctx.mv.upwards_skip();
          continue;
       }
 
-      /* update register pressure */
-      register_pressure.update(register_demand[candidate_idx - 1]);
-
-      if (is_dependency) {
-         can_reorder_cur &= can_reorder_candidate;
-         continue;
-      }
-      assert(insert_idx != idx);
-
-      // TODO: correctly calculate register pressure for this case
-      bool register_pressure_unknown = false;
-      /* check if candidate uses/kills an operand which is used by a dependency */
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && ctx.RAR_dependencies[op.tempId()])
-            register_pressure_unknown = true;
-      }
-      if (register_pressure_unknown) {
-         if (candidate->isVMEM())
+      MoveResult res = ctx.mv.upwards_move();
+      if (res == move_fail_ssa || res == move_fail_rar) {
+         /* no need to steal from following VMEM instructions */
+         if (res == move_fail_ssa && candidate->isVMEM())
             break;
-         for (const Definition& def : candidate->definitions) {
-            if (def.isTemp())
-               ctx.RAR_dependencies[def.tempId()] = true;
-         }
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.RAR_dependencies[op.tempId()] = true;
-         }
-         can_reorder_cur &= can_reorder_candidate;
+         add_to_hazard_query(&hq, candidate.get());
+         ctx.mv.upwards_skip();
          continue;
-      }
-
-      /* check if register pressure is low enough: the diff is negative if register pressure is decreased */
-      const RegisterDemand candidate_diff = getLiveChanges(candidate);
-      const RegisterDemand temp = getTempRegisters(candidate);
-      if (RegisterDemand(register_pressure + candidate_diff).exceeds(ctx.max_registers))
-         break;
-      const RegisterDemand temp2 = getTempRegisters(block->instructions[insert_idx - 1]);
-      const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + candidate_diff + temp;
-      if (new_demand.exceeds(ctx.max_registers))
+      } else if (res == move_fail_pressure) {
          break;
-
-      /* move the candidate above the insert_idx */
-      move_element(block->instructions, candidate_idx, insert_idx);
-
-      /* update register pressure */
-      move_element(register_demand, candidate_idx, insert_idx);
-      for (int i = insert_idx + 1; i <= candidate_idx; i++) {
-         register_demand[i] += candidate_diff;
       }
-      register_demand[insert_idx] = new_demand;
-      register_pressure += candidate_diff;
-      insert_idx++;
       k++;
    }
 
-   ctx.last_SMEM_dep_idx = found_dependency ? insert_idx : 0;
+   ctx.last_SMEM_dep_idx = found_dependency ? ctx.mv.insert_idx : 0;
    ctx.last_SMEM_stall = 10 - ctx.num_waves - k;
 }
 
@@ -495,300 +661,133 @@ void schedule_VMEM(sched_ctx& ctx, Block* block,
    int max_moves = VMEM_MAX_MOVES;
    int clause_max_grab_dist = VMEM_CLAUSE_MAX_GRAB_DIST;
    int16_t k = 0;
-   /* initially true as we don't pull other VMEM instructions
-    * through the current instruction */
-   bool can_reorder_vmem = true;
-   bool can_reorder_smem = true;
-
-   /* create the initial set of values which current depends on */
-   std::fill(ctx.depends_on.begin(), ctx.depends_on.end(), false);
-   std::fill(ctx.RAR_dependencies.begin(), ctx.RAR_dependencies.end(), false);
-   std::fill(ctx.new_RAR_dependencies.begin(), ctx.new_RAR_dependencies.end(), false);
-   for (const Operand& op : current->operands) {
-      if (op.isTemp()) {
-         ctx.depends_on[op.tempId()] = true;
-         if (op.isFirstKill())
-            ctx.RAR_dependencies[op.tempId()] = true;
-      }
-   }
-
-   /* maintain how many registers remain free when moving instructions */
-   RegisterDemand register_pressure_indep = register_demand[idx];
-   RegisterDemand register_pressure_clause = register_demand[idx];
 
    /* first, check if we have instructions before current to move down */
-   int indep_insert_idx = idx + 1;
-   int clause_insert_idx = idx;
-   int moving_interaction = barrier_none;
-   bool moving_spill = false;
+   hazard_query indep_hq;
+   hazard_query clause_hq;
+   init_hazard_query(&indep_hq);
+   init_hazard_query(&clause_hq);
+   add_to_hazard_query(&indep_hq, current);
+
+   ctx.mv.downwards_init(idx, true, true);
 
    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
+      assert(candidate_idx == ctx.mv.source_idx);
       assert(candidate_idx >= 0);
       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
-      bool can_reorder_candidate = can_reorder(candidate.get());
       bool is_vmem = candidate->isVMEM() || candidate->isFlatOrGlobal();
 
       /* break when encountering another VMEM instruction, logical_start or barriers */
-      if (!can_reorder_smem && candidate->format == Format::SMEM && !can_reorder_candidate)
-         break;
       if (candidate->opcode == aco_opcode::p_logical_start)
          break;
-      if (candidate->opcode == aco_opcode::p_exit_early_if)
-         break;
-      if (!can_move_instr(candidate, current, moving_interaction))
-         break;
 
       /* break if we'd make the previous SMEM instruction stall */
       bool can_stall_prev_smem = idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
       if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
          break;
-      register_pressure_indep.update(register_demand[candidate_idx]);
 
       bool part_of_clause = false;
       if (current->isVMEM() == candidate->isVMEM()) {
          bool same_resource = true;
          if (current->isVMEM())
-            same_resource = candidate->operands[1].tempId() == current->operands[1].tempId();
-         bool can_reorder = can_reorder_vmem || can_reorder_candidate;
-         int grab_dist = clause_insert_idx - candidate_idx;
+            same_resource = candidate->operands[0].tempId() == current->operands[0].tempId();
+         int grab_dist = ctx.mv.insert_idx_clause - candidate_idx;
          /* We can't easily tell how much this will decrease the def-to-use
           * distances, so just use how far it will be moved as a heuristic. */
-         part_of_clause = can_reorder && same_resource && grab_dist < clause_max_grab_dist;
+         part_of_clause = same_resource && grab_dist < clause_max_grab_dist;
       }
 
       /* if current depends on candidate, add additional dependencies and continue */
       bool can_move_down = !is_vmem || part_of_clause;
-      bool writes_exec = false;
-      for (const Definition& def : candidate->definitions) {
-         if (def.isTemp() && ctx.depends_on[def.tempId()])
-            can_move_down = false;
-         if (def.isFixed() && def.physReg() == exec)
-            writes_exec = true;
-      }
-      if (writes_exec)
-         break;
 
-      if (moving_spill && is_spill_reload(candidate))
-         can_move_down = false;
-      if ((moving_interaction & barrier_shared) && candidate->format == Format::DS)
+      HazardResult haz = perform_hazard_query(part_of_clause ? &clause_hq : &indep_hq, candidate.get(), false);
+      if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
+          haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier ||
+          haz == hazard_fail_export)
          can_move_down = false;
-      moving_interaction |= get_barrier_interaction(candidate.get());
-      moving_spill |= is_spill_reload(candidate);
+      else if (haz != hazard_success)
+         break;
+
       if (!can_move_down) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp()) {
-               ctx.depends_on[op.tempId()] = true;
-               if (op.isFirstKill()) {
-                  ctx.RAR_dependencies[op.tempId()] = true;
-                  ctx.new_RAR_dependencies[op.tempId()] = true;
-               }
-            }
-         }
-         register_pressure_clause.update(register_demand[candidate_idx]);
-         can_reorder_smem &= candidate->format != Format::SMEM || can_reorder_candidate;
-         can_reorder_vmem &= !is_vmem || can_reorder_candidate;
+         add_to_hazard_query(&indep_hq, candidate.get());
+         add_to_hazard_query(&clause_hq, candidate.get());
+         ctx.mv.downwards_skip();
          continue;
       }
 
-      if (part_of_clause) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp()) {
-               ctx.depends_on[op.tempId()] = true;
-               if (op.isFirstKill())
-                  ctx.RAR_dependencies[op.tempId()] = true;
-            }
-         }
-      }
-
-      bool register_pressure_unknown = false;
-      std::vector<bool>& RAR_deps = part_of_clause ? ctx.new_RAR_dependencies : ctx.RAR_dependencies;
-      /* check if one of candidate's operands is killed by depending instruction */
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && RAR_deps[op.tempId()]) {
-            // FIXME: account for difference in register pressure
-            register_pressure_unknown = true;
-         }
-      }
-      if (register_pressure_unknown) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp()) {
-               ctx.depends_on[op.tempId()] = true;
-               if (op.isFirstKill()) {
-                  ctx.RAR_dependencies[op.tempId()] = true;
-                  ctx.new_RAR_dependencies[op.tempId()] = true;
-               }
-            }
-         }
-         register_pressure_clause.update(register_demand[candidate_idx]);
-         can_reorder_smem &= candidate->format != Format::SMEM || can_reorder_candidate;
-         can_reorder_vmem &= !is_vmem || can_reorder_candidate;
+      Instruction *candidate_ptr = candidate.get();
+      MoveResult res = ctx.mv.downwards_move(part_of_clause);
+      if (res == move_fail_ssa || res == move_fail_rar) {
+         add_to_hazard_query(&indep_hq, candidate.get());
+         add_to_hazard_query(&clause_hq, candidate.get());
+         ctx.mv.downwards_skip();
          continue;
-      }
-
-      int insert_idx = part_of_clause ? clause_insert_idx : indep_insert_idx;
-      RegisterDemand register_pressure = part_of_clause ? register_pressure_clause : register_pressure_indep;
-
-      /* check if register pressure is low enough: the diff is negative if register pressure is increased */
-      const RegisterDemand candidate_diff = getLiveChanges(candidate);
-      const RegisterDemand temp = getTempRegisters(candidate);;
-      if (RegisterDemand(register_pressure - candidate_diff).exceeds(ctx.max_registers))
-         break;
-      const RegisterDemand temp2 = getTempRegisters(block->instructions[insert_idx - 1]);
-      const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + temp;
-      if (new_demand.exceeds(ctx.max_registers))
+      } else if (res == move_fail_pressure) {
          break;
-      // TODO: we might want to look further to find a sequence of instructions to move down which doesn't exceed reg pressure
-
-      /* move the candidate below the memory load */
-      move_element(block->instructions, candidate_idx, insert_idx);
-
-      /* update register pressure */
-      move_element(register_demand, candidate_idx, insert_idx);
-      for (int i = candidate_idx; i < insert_idx - 1; i++) {
-         register_demand[i] -= candidate_diff;
-      }
-      register_demand[insert_idx - 1] = new_demand;
-      register_pressure_clause -= candidate_diff;
-      clause_insert_idx--;
-      if (!part_of_clause) {
-         register_pressure_indep -= candidate_diff;
-         indep_insert_idx--;
       }
+      if (part_of_clause)
+         add_to_hazard_query(&indep_hq, candidate_ptr);
       k++;
       if (candidate_idx < ctx.last_SMEM_dep_idx)
          ctx.last_SMEM_stall++;
    }
 
-   /* create the initial set of values which depend on current */
-   std::fill(ctx.depends_on.begin(), ctx.depends_on.end(), false);
-   std::fill(ctx.RAR_dependencies.begin(), ctx.RAR_dependencies.end(), false);
-   for (const Definition& def : current->definitions) {
-      if (def.isTemp())
-         ctx.depends_on[def.tempId()] = true;
-   }
-
    /* find the first instruction depending on current or find another VMEM */
-   RegisterDemand register_pressure;
-   int insert_idx = idx;
-   moving_interaction = barrier_none;
-   moving_spill = false;
-   // TODO: differentiate between loads and stores (load-load can always reorder)
-   can_reorder_vmem = true;
-   can_reorder_smem = true;
+   ctx.mv.upwards_init(idx + 1, true);
 
    bool found_dependency = false;
    /* second, check if we have instructions after current to move up */
    for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int) idx + window_size; candidate_idx++) {
+      assert(candidate_idx == ctx.mv.source_idx);
       assert(candidate_idx < (int) block->instructions.size());
       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
-      bool can_reorder_candidate = can_reorder(candidate.get());
       bool is_vmem = candidate->isVMEM() || candidate->isFlatOrGlobal();
 
       if (candidate->opcode == aco_opcode::p_logical_end)
          break;
-      if (!can_move_instr(candidate, current, moving_interaction))
-         break;
-
-      const bool writes_exec = std::any_of(candidate->definitions.begin(), candidate->definitions.end(),
-                                           [](const Definition& def) {return def.isFixed() && def.physReg() == exec; });
-      if (writes_exec)
-         break;
 
       /* check if candidate depends on current */
       bool is_dependency = false;
-      if (candidate->format == Format::SMEM)
-         is_dependency = !can_reorder_smem && !can_reorder_candidate;
-      if (is_vmem)
-         is_dependency = !can_reorder_vmem && !can_reorder_candidate;
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && ctx.depends_on[op.tempId()]) {
+      if (found_dependency) {
+         HazardResult haz = perform_hazard_query(&indep_hq, candidate.get(), true);
+         if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
+             haz == hazard_fail_reorder_vmem_smem || haz == hazard_fail_reorder_sendmsg ||
+             haz == hazard_fail_barrier || haz == hazard_fail_export)
             is_dependency = true;
+         else if (haz != hazard_success)
             break;
-         }
       }
-      if (moving_spill && is_spill_reload(candidate))
-         is_dependency = true;
-      if ((moving_interaction & barrier_shared) && candidate->format == Format::DS)
-         is_dependency = true;
-      moving_interaction |= get_barrier_interaction(candidate.get());
-      moving_spill |= is_spill_reload(candidate);
-      if (is_dependency) {
-         for (const Definition& def : candidate->definitions) {
-            if (def.isTemp())
-               ctx.depends_on[def.tempId()] = true;
-         }
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.RAR_dependencies[op.tempId()] = true;
-         }
-         /* update flag whether we can reorder other memory instructions */
-         can_reorder_smem &= candidate->format != Format::SMEM || can_reorder_candidate;
-         can_reorder_vmem &= !is_vmem || can_reorder_candidate;
 
+      is_dependency |= !found_dependency && !ctx.mv.upwards_check_deps();
+      if (is_dependency) {
          if (!found_dependency) {
-            insert_idx = candidate_idx;
+            ctx.mv.upwards_set_insert_idx(candidate_idx);
+            init_hazard_query(&indep_hq);
             found_dependency = true;
-            /* init register pressure */
-            register_pressure = register_demand[insert_idx - 1];
-            continue;
          }
-
       } else if (is_vmem) {
          /* don't move up dependencies of other VMEM instructions */
          for (const Definition& def : candidate->definitions) {
             if (def.isTemp())
-               ctx.depends_on[def.tempId()] = true;
+               ctx.mv.depends_on[def.tempId()] = true;
          }
       }
 
-      /* update register pressure */
-      register_pressure.update(register_demand[candidate_idx - 1]);
-
-      if (is_dependency || !found_dependency)
-         continue;
-      assert(insert_idx != idx);
-
-      bool register_pressure_unknown = false;
-      /* check if candidate uses/kills an operand which is used by a dependency */
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && op.isFirstKill() && ctx.RAR_dependencies[op.tempId()])
-            register_pressure_unknown = true;
-      }
-      if (register_pressure_unknown) {
-         for (const Definition& def : candidate->definitions) {
-            if (def.isTemp())
-               ctx.depends_on[def.tempId()] = true;
-         }
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp())
-               ctx.RAR_dependencies[op.tempId()] = true;
-         }
-         can_reorder_smem &= candidate->format != Format::SMEM || can_reorder_candidate;
-         can_reorder_vmem &= !is_vmem || can_reorder_candidate;
+      if (is_dependency || !found_dependency) {
+         if (found_dependency)
+            add_to_hazard_query(&indep_hq, candidate.get());
+         ctx.mv.upwards_skip();
          continue;
       }
 
-      /* check if register pressure is low enough: the diff is negative if register pressure is decreased */
-      const RegisterDemand candidate_diff = getLiveChanges(candidate);
-      const RegisterDemand temp = getTempRegisters(candidate);
-      if (RegisterDemand(register_pressure + candidate_diff).exceeds(ctx.max_registers))
-         break;
-      const RegisterDemand temp2 = getTempRegisters(block->instructions[insert_idx - 1]);
-      const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + candidate_diff + temp;
-      if (new_demand.exceeds(ctx.max_registers))
+      MoveResult res = ctx.mv.upwards_move();
+      if (res == move_fail_ssa || res == move_fail_rar) {
+         add_to_hazard_query(&indep_hq, candidate.get());
+         ctx.mv.upwards_skip();
+         continue;
+      } else if (res == move_fail_pressure) {
          break;
-
-      /* move the candidate above the insert_idx */
-      move_element(block->instructions, candidate_idx, insert_idx);
-
-      /* update register pressure */
-      move_element(register_demand, candidate_idx, insert_idx);
-      for (int i = insert_idx + 1; i <= candidate_idx; i++) {
-         register_demand[i] += candidate_diff;
       }
-      register_demand[insert_idx] = new_demand;
-      register_pressure += candidate_diff;
-      insert_idx++;
       k++;
    }
 }
@@ -802,111 +801,39 @@ void schedule_position_export(sched_ctx& ctx, Block* block,
    int max_moves = POS_EXP_MAX_MOVES;
    int16_t k = 0;
 
-   /* create the initial set of values which current depends on */
-   std::fill(ctx.depends_on.begin(), ctx.depends_on.end(), false);
-   std::fill(ctx.RAR_dependencies.begin(), ctx.RAR_dependencies.end(), false);
-   for (const Operand& op : current->operands) {
-      if (op.isTemp()) {
-         ctx.depends_on[op.tempId()] = true;
-         if (op.isFirstKill())
-            ctx.RAR_dependencies[op.tempId()] = true;
-      }
-   }
-
-   /* maintain how many registers remain free when moving instructions */
-   RegisterDemand register_pressure = register_demand[idx];
+   ctx.mv.downwards_init(idx, true, false);
 
-   /* first, check if we have instructions before current to move down */
-   int insert_idx = idx + 1;
-   int moving_interaction = barrier_none;
-   bool moving_spill = false;
+   hazard_query hq;
+   init_hazard_query(&hq);
+   add_to_hazard_query(&hq, current);
 
    for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
       assert(candidate_idx >= 0);
       aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
 
-      /* break when encountering logical_start or barriers */
       if (candidate->opcode == aco_opcode::p_logical_start)
          break;
-      if (candidate->opcode == aco_opcode::p_exit_early_if)
-         break;
       if (candidate->isVMEM() || candidate->format == Format::SMEM || candidate->isFlatOrGlobal())
          break;
-      if (!can_move_instr(candidate, current, moving_interaction))
-         break;
-
-      register_pressure.update(register_demand[candidate_idx]);
 
-      /* if current depends on candidate, add additional dependencies and continue */
-      bool can_move_down = true;
-      bool writes_exec = false;
-      for (unsigned i = 0; i < candidate->definitions.size(); i++) {
-         if (candidate->definitions[i].isTemp() && ctx.depends_on[candidate->definitions[i].tempId()])
-            can_move_down = false;
-         if (candidate->definitions[i].isFixed() && candidate->definitions[i].physReg() == exec)
-            writes_exec = true;
-      }
-      if (writes_exec)
+      HazardResult haz = perform_hazard_query(&hq, candidate.get(), false);
+      if (haz == hazard_fail_exec || haz == hazard_fail_unreorderable)
          break;
 
-      if (moving_spill && is_spill_reload(candidate))
-         can_move_down = false;
-      if ((moving_interaction & barrier_shared) && candidate->format == Format::DS)
-         can_move_down = false;
-      moving_interaction |= get_barrier_interaction(candidate.get());
-      moving_spill |= is_spill_reload(candidate);
-      if (!can_move_down) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp()) {
-               ctx.depends_on[op.tempId()] = true;
-               if (op.isFirstKill())
-                  ctx.RAR_dependencies[op.tempId()] = true;
-            }
-         }
+      if (haz != hazard_success) {
+         add_to_hazard_query(&hq, candidate.get());
+         ctx.mv.downwards_skip();
          continue;
       }
 
-      bool register_pressure_unknown = false;
-      /* check if one of candidate's operands is killed by depending instruction */
-      for (const Operand& op : candidate->operands) {
-         if (op.isTemp() && ctx.RAR_dependencies[op.tempId()]) {
-            // FIXME: account for difference in register pressure
-            register_pressure_unknown = true;
-         }
-      }
-      if (register_pressure_unknown) {
-         for (const Operand& op : candidate->operands) {
-            if (op.isTemp()) {
-               ctx.depends_on[op.tempId()] = true;
-               if (op.isFirstKill())
-                  ctx.RAR_dependencies[op.tempId()] = true;
-            }
-         }
+      MoveResult res = ctx.mv.downwards_move(false);
+      if (res == move_fail_ssa || res == move_fail_rar) {
+         add_to_hazard_query(&hq, candidate.get());
+         ctx.mv.downwards_skip();
          continue;
-      }
-
-      /* check if register pressure is low enough: the diff is negative if register pressure is increased */
-      const RegisterDemand candidate_diff = getLiveChanges(candidate);
-      const RegisterDemand temp = getTempRegisters(candidate);;
-      if (RegisterDemand(register_pressure - candidate_diff).exceeds(ctx.max_registers))
+      } else if (res == move_fail_pressure) {
          break;
-      const RegisterDemand temp2 = getTempRegisters(block->instructions[insert_idx - 1]);
-      const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + temp;
-      if (new_demand.exceeds(ctx.max_registers))
-         break;
-      // TODO: we might want to look further to find a sequence of instructions to move down which doesn't exceed reg pressure
-
-      /* move the candidate below the export */
-      move_element(block->instructions, candidate_idx, insert_idx);
-
-      /* update register pressure */
-      move_element(register_demand, candidate_idx, insert_idx);
-      for (int i = candidate_idx; i < insert_idx - 1; i++) {
-         register_demand[i] -= candidate_diff;
       }
-      register_demand[insert_idx - 1] = new_demand;
-      register_pressure -=  candidate_diff;
-      insert_idx--;
       k++;
    }
 }
@@ -915,6 +842,8 @@ void schedule_block(sched_ctx& ctx, Program *program, Block* block, live& live_v
 {
    ctx.last_SMEM_dep_idx = 0;
    ctx.last_SMEM_stall = INT16_MIN;
+   ctx.mv.block = block;
+   ctx.mv.register_demand = live_vars.register_demand[block->index].data();
 
    /* go through all instructions and find memory loads */
    for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
@@ -923,13 +852,18 @@ void schedule_block(sched_ctx& ctx, Program *program, Block* block, live& live_v
       if (current->definitions.empty())
          continue;
 
-      if (current->isVMEM() || current->isFlatOrGlobal())
+      if (current->isVMEM() || current->isFlatOrGlobal()) {
+         ctx.mv.current = current;
          schedule_VMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
-      if (current->format == Format::SMEM)
+      }
+
+      if (current->format == Format::SMEM) {
+         ctx.mv.current = current;
          schedule_SMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
+      }
    }
 
-   if ((program->stage & hw_vs) && block->index == program->blocks.size() - 1) {
+   if ((program->stage & (hw_vs | hw_ngg_gs)) && (block->kind & block_kind_export_end)) {
       /* Try to move position exports as far up as possible, to reduce register
        * usage and because ISA reference guides say so. */
       for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
@@ -937,8 +871,10 @@ void schedule_block(sched_ctx& ctx, Program *program, Block* block, live& live_v
 
          if (current->format == Format::EXP) {
             unsigned target = static_cast<Export_instruction*>(current)->dest;
-            if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PARAM)
+            if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PARAM) {
+               ctx.mv.current = current;
                schedule_position_export(ctx, block, live_vars.register_demand[block->index], current, idx);
+            }
          }
       }
    }
@@ -953,28 +889,32 @@ void schedule_block(sched_ctx& ctx, Program *program, Block* block, live& live_v
 
 void schedule_program(Program *program, live& live_vars)
 {
+   /* don't use program->max_reg_demand because that is affected by max_waves_per_simd */
+   RegisterDemand demand;
+   for (Block& block : program->blocks)
+      demand.update(block.register_demand);
+
    sched_ctx ctx;
-   ctx.depends_on.resize(program->peekAllocationId());
-   ctx.RAR_dependencies.resize(program->peekAllocationId());
-   ctx.new_RAR_dependencies.resize(program->peekAllocationId());
+   ctx.mv.depends_on.resize(program->peekAllocationId());
+   ctx.mv.RAR_dependencies.resize(program->peekAllocationId());
+   ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());
    /* Allowing the scheduler to reduce the number of waves to as low as 5
     * improves performance of Thrones of Britannia significantly and doesn't
     * seem to hurt anything else. */
    if (program->num_waves <= 5)
       ctx.num_waves = program->num_waves;
-   else if (program->max_reg_demand.vgpr >= 32)
+   else if (demand.vgpr >= 29)
       ctx.num_waves = 5;
-   else if (program->max_reg_demand.vgpr >= 28)
+   else if (demand.vgpr >= 25)
       ctx.num_waves = 6;
-   else if (program->max_reg_demand.vgpr >= 24)
-      ctx.num_waves = 7;
    else
-      ctx.num_waves = 8;
+      ctx.num_waves = 7;
    ctx.num_waves = std::max<uint16_t>(ctx.num_waves, program->min_waves);
+   ctx.num_waves = std::min<uint16_t>(ctx.num_waves, program->max_waves);
 
    assert(ctx.num_waves > 0 && ctx.num_waves <= program->num_waves);
-   ctx.max_registers = { int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves) - 2),
-                         int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves))};
+   ctx.mv.max_registers = { int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves) - 2),
+                            int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves))};
 
    for (Block& block : program->blocks)
       schedule_block(ctx, program, &block, live_vars);