compute and SDMA will be added into it.
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
/* Handle buffers first. */
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
- si_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width, 0, -1);
+ si_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width);
return;
}
*
* \param user_flags bitmask of SI_CPDMA_*
*/
-void si_copy_buffer(struct si_context *sctx,
- struct pipe_resource *dst, struct pipe_resource *src,
- uint64_t dst_offset, uint64_t src_offset, unsigned size,
- unsigned user_flags, enum si_cache_policy cache_policy)
+void si_cp_dma_copy_buffer(struct si_context *sctx,
+ struct pipe_resource *dst, struct pipe_resource *src,
+ uint64_t dst_offset, uint64_t src_offset, unsigned size,
+ unsigned user_flags, enum si_coherency coher,
+ enum si_cache_policy cache_policy)
{
uint64_t main_dst_offset, main_src_offset;
unsigned skipped_size = 0;
unsigned realign_size = 0;
- enum si_coherency coher = SI_COHERENCY_SHADER;
bool is_first = true;
- if (!size)
- return;
-
- if (cache_policy == -1)
- cache_policy = get_cache_policy(sctx, coher);
+ assert(size);
if (dst != src || dst_offset != src_offset) {
/* Mark the buffer range of destination as valid (initialized),
si_cp_dma_realign_engine(sctx, realign_size, user_flags, coher,
cache_policy, &is_first);
}
+}
+
+void si_copy_buffer(struct si_context *sctx,
+ struct pipe_resource *dst, struct pipe_resource *src,
+ uint64_t dst_offset, uint64_t src_offset, unsigned size)
+{
+ enum si_coherency coher = SI_COHERENCY_SHADER;
+ enum si_cache_policy cache_policy = get_cache_policy(sctx, coher);
+
+ if (!size)
+ return;
+
+ si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size,
+ 0, coher, cache_policy);
if (cache_policy != L2_BYPASS)
r600_resource(dst)->TC_L2_dirty = true;
{
assert(sctx->chip_class >= CIK);
- si_copy_buffer(sctx, buf, buf, offset, offset, size, SI_CPDMA_SKIP_ALL, L2_LRU);
+ si_cp_dma_copy_buffer(sctx, buf, buf, offset, offset, size,
+ SI_CPDMA_SKIP_ALL, SI_COHERENCY_SHADER, L2_LRU);
}
static void cik_prefetch_shader_async(struct si_context *sctx,
r600_resource(buf)->gpu_address = 0; /* cause a VM fault */
if (sscreen->debug_flags & DBG(TEST_VMFAULT_CP)) {
- si_copy_buffer(sctx, buf, buf, 0, 4, 4, 0, -1);
+ si_cp_dma_copy_buffer(sctx, buf, buf, 0, 4, 4, 0,
+ SI_COHERENCY_NONE, L2_BYPASS);
ctx->flush(ctx, NULL, 0);
puts("VM fault test: CP - done.");
}
void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value,
enum si_coherency coher);
+void si_cp_dma_copy_buffer(struct si_context *sctx,
+ struct pipe_resource *dst, struct pipe_resource *src,
+ uint64_t dst_offset, uint64_t src_offset, unsigned size,
+ unsigned user_flags, enum si_coherency coher,
+ enum si_cache_policy cache_policy);
void si_copy_buffer(struct si_context *sctx,
struct pipe_resource *dst, struct pipe_resource *src,
- uint64_t dst_offset, uint64_t src_offset, unsigned size,
- unsigned user_flags, enum si_cache_policy cache_policy);
+ uint64_t dst_offset, uint64_t src_offset, unsigned size);
void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
uint64_t offset, unsigned size);
void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only);
if (test_cp) {
/* CP DMA */
if (is_copy) {
- si_copy_buffer(sctx, dst, src, 0, 0, size, 0,
- cache_policy);
+ si_cp_dma_copy_buffer(sctx, dst, src, 0, 0, size, 0,
+ SI_COHERENCY_NONE, cache_policy);
} else {
si_cp_dma_clear_buffer(sctx, dst, 0, size, clear_value,
SI_COHERENCY_NONE, cache_policy);