screen->info.chip_class == VI)
dwords *= 2;
- if (!screen->info.has_virtual_memory)
- dwords += 2;
-
return dwords;
}
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
- ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
RADEON_USAGE_WRITE)))
r600_dma_emit_wait_idle(ctx);
- /* If GPUVM is not supported, the CS checker needs 2 entries
- * in the buffer list per packet, which has to be done manually.
- */
- if (ctx->screen->info.has_virtual_memory) {
- if (dst)
- radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
- RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
- if (src)
- radeon_add_to_buffer_list(ctx, &ctx->dma, src,
- RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
+ if (dst) {
+ radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
+ RADEON_USAGE_WRITE,
+ RADEON_PRIO_SDMA_BUFFER);
+ }
+ if (src) {
+ radeon_add_to_buffer_list(ctx, &ctx->dma, src,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SDMA_BUFFER);
}
/* this function is called before all DMA calls, so increment this. */
ctx->num_dma_calls++;
}
-static void r600_flush_dma_ring(void *ctx, unsigned flags,
- struct pipe_fence_handle **fence)
+void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct radeon_winsys_cs *cs = rctx->dma.cs;
struct radeon_saved_cs saved;
- bool check_vm =
- (rctx->screen->debug_flags & DBG(CHECK_VM)) &&
- rctx->check_vm_faults;
+ bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
if (!radeon_emitted(cs, 0)) {
if (fence)
*/
rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
- rctx->check_vm_faults(rctx, &saved, RING_DMA);
+ si_check_vm_faults(rctx, &saved, RING_DMA);
si_clear_saved_cs(&saved);
}
}
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
return false;
rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
- 0, PIPE_USAGE_STREAM);
+ 0, PIPE_USAGE_STREAM,
+ R600_RESOURCE_FLAG_READ_ONLY);
if (!rctx->b.stream_uploader)
return false;
rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
- 0, PIPE_USAGE_DEFAULT);
+ 0, PIPE_USAGE_DEFAULT,
+ R600_RESOURCE_FLAG_32BIT |
+ (sscreen->cpdma_prefetch_writes_memory ?
+ 0 : R600_RESOURCE_FLAG_READ_ONLY));
if (!rctx->b.const_uploader)
return false;
+ rctx->cached_gtt_allocator = u_upload_create(&rctx->b, 16 * 1024,
+ 0, PIPE_USAGE_STAGING, 0);
+ if (!rctx->cached_gtt_allocator)
+ return false;
+
rctx->ctx = rctx->ws->ctx_create(rctx->ws);
if (!rctx->ctx)
return false;
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
- r600_flush_dma_ring,
+ si_flush_dma_cs,
rctx);
- rctx->dma.flush = r600_flush_dma_ring;
+ rctx->dma.flush = si_flush_dma_cs;
}
return true;
u_upload_destroy(rctx->b.stream_uploader);
if (rctx->b.const_uploader)
u_upload_destroy(rctx->b.const_uploader);
+ if (rctx->cached_gtt_allocator)
+ u_upload_destroy(rctx->cached_gtt_allocator);
slab_destroy_child(&rctx->pool_transfers);
slab_destroy_child(&rctx->pool_transfers_unsync);