MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
- si_emit_wait_fence(cs, va, 1, 0xffffffff);
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, va, 1, 0xffffffff);
assert(cmd_buffer->cs->cdw <= cdw_max);
}
uint32_t new_fence,
uint64_t gfx9_eop_bug_va);
-void si_emit_wait_fence(struct radeon_cmdbuf *cs,
- uint64_t va, uint32_t ref,
- uint32_t mask);
+void radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
+ uint32_t ref, uint32_t mask);
void si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
enum chip_class chip_class,
uint32_t *fence_ptr, uint64_t va,
uint64_t avail_va = va + pool->availability_offset + 4 * query;
/* This waits on the ME. All copies below are done on the ME */
- si_emit_wait_fence(cs, avail_va, 1, 0xffffffff);
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL,
+ avail_va, 1, 0xffffffff);
}
}
radv_query_shader(cmd_buffer, &cmd_buffer->device->meta_state.query.pipeline_statistics_query_pipeline,
}
void
-si_emit_wait_fence(struct radeon_cmdbuf *cs,
- uint64_t va, uint32_t ref,
- uint32_t mask)
+radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
+ uint32_t ref, uint32_t mask)
{
+ assert(op == WAIT_REG_MEM_EQUAL ||
+ op == WAIT_REG_MEM_NOT_EQUAL ||
+ op == WAIT_REG_MEM_GREATER_OR_EQUAL);
+
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
- radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+ radeon_emit(cs, op | WAIT_REG_MEM_MEM_SPACE(1));
radeon_emit(cs, va);
radeon_emit(cs, va >> 32);
radeon_emit(cs, ref); /* reference value */
EOP_DATA_SEL_VALUE_32BIT,
flush_va, old_fence, *flush_cnt,
gfx9_eop_bug_va);
- si_emit_wait_fence(cs, flush_va, *flush_cnt, 0xffffffff);
+ radv_cp_wait_mem(cs, WAIT_REG_MEM_EQUAL, flush_va,
+ *flush_cnt, 0xffffffff);
}
/* VGT state sync */