/*
* Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* pipe_context
*/
-static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
-{
- struct radeon_winsys_cs *cs = rctx->dma.cs;
-
- /* NOP waits for idle on Evergreen and later. */
- if (rctx->chip_class >= CIK)
- radeon_emit(cs, 0x00000000); /* NOP */
- else
- radeon_emit(cs, 0xf0000000); /* NOP */
-}
-
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src)
-{
- uint64_t vram = ctx->dma.cs->used_vram;
- uint64_t gtt = ctx->dma.cs->used_gart;
-
- if (dst) {
- vram += dst->vram_usage;
- gtt += dst->gart_usage;
- }
- if (src) {
- vram += src->vram_usage;
- gtt += src->gart_usage;
- }
-
- /* Flush the GFX IB if DMA depends on it. */
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
- ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
- (src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
- RADEON_USAGE_WRITE))))
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
-
- /* Flush if there's not enough space, or if the memory usage per IB
- * is too large.
- *
- * IBs using too little memory are limited by the IB submission overhead.
- * IBs using too much memory are limited by the kernel/TTM overhead.
- * Too long IBs create CPU-GPU pipeline bubbles and add latency.
- *
- * This heuristic makes sure that DMA requests are executed
- * very soon after the call is made and lowers memory usage.
- * It improves texture upload performance by keeping the DMA
- * engine busy while uploads are being submitted.
- */
- num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
- ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
- }
-
- /* Wait for idle if either buffer has been used in the IB before to
- * prevent read-after-write hazards.
- */
- if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
- (src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
- RADEON_USAGE_WRITE)))
- r600_dma_emit_wait_idle(ctx);
-
- if (dst) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
- RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
- }
- if (src) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, src,
- RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
- }
-
- /* this function is called before all DMA calls, so increment this. */
- ctx->num_dma_calls++;
-}
-
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->dma.cs;
- struct radeon_saved_cs saved;
- bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
-
- if (!radeon_emitted(cs, 0)) {
- if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
- return;
- }
-
- if (check_vm)
- si_save_cs(rctx->ws, cs, &saved, true);
-
- rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
- if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
-
- if (check_vm) {
- /* Use conservative timeout 800ms, after which we won't wait any
- * longer and assume the GPU is hung.
- */
- rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
-
- si_check_vm_faults(rctx, &saved, RING_DMA);
- si_clear_saved_cs(&saved);
- }
-}
-
-/**
- * Store a linearized copy of all chunks of \p cs together with the buffer
- * list in \p saved.
- */
-void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
- struct radeon_saved_cs *saved, bool get_buffer_list)
-{
- uint32_t *buf;
- unsigned i;
-
- /* Save the IB chunks. */
- saved->num_dw = cs->prev_dw + cs->current.cdw;
- saved->ib = MALLOC(4 * saved->num_dw);
- if (!saved->ib)
- goto oom;
-
- buf = saved->ib;
- for (i = 0; i < cs->num_prev; ++i) {
- memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
- buf += cs->prev[i].cdw;
- }
- memcpy(buf, cs->current.buf, cs->current.cdw * 4);
-
- if (!get_buffer_list)
- return;
-
- /* Save the buffer list. */
- saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
- saved->bo_list = CALLOC(saved->bo_count,
- sizeof(saved->bo_list[0]));
- if (!saved->bo_list) {
- FREE(saved->ib);
- goto oom;
- }
- ws->cs_get_buffer_list(cs, saved->bo_list);
-
- return;
-
-oom:
- fprintf(stderr, "%s: out of memory\n", __func__);
- memset(saved, 0, sizeof(*saved));
-}
-
-void si_clear_saved_cs(struct radeon_saved_cs *saved)
-{
- FREE(saved->ib);
- FREE(saved->bo_list);
-
- memset(saved, 0, sizeof(*saved));
-}
-
static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
unsigned level, struct pipe_box *box,
bool commit)
{
- struct r600_common_context *ctx = (struct r600_common_context *)pctx;
+ struct si_context *ctx = (struct si_context *)pctx;
struct r600_resource *res = r600_resource(resource);
/*
* (b) wait for threaded submit to finish, including those that were
* triggered by some other, earlier operation.
*/
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
- res->buf, RADEON_USAGE_READWRITE)) {
+ if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
+ res->buf, RADEON_USAGE_READWRITE)) {
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- if (radeon_emitted(ctx->dma.cs, 0) &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
- res->buf, RADEON_USAGE_READWRITE)) {
+ if (radeon_emitted(ctx->b.dma_cs, 0) &&
+ ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
+ res->buf, RADEON_USAGE_READWRITE)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
- ctx->ws->cs_sync_flush(ctx->dma.cs);
- ctx->ws->cs_sync_flush(ctx->gfx.cs);
+ ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
+ ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
assert(resource->target == PIPE_BUFFER);
- return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
+ return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
}
bool si_common_context_init(struct r600_common_context *rctx,
struct si_screen *sscreen,
unsigned context_flags)
{
+ struct si_context *sctx = (struct si_context*)rctx;
+
slab_create_child(&rctx->pool_transfers, &sscreen->pool_transfers);
slab_create_child(&rctx->pool_transfers_unsync, &sscreen->pool_transfers);
rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
- si_init_context_texture_functions(rctx);
- si_init_query_functions(rctx);
+ si_init_context_texture_functions(sctx);
+ si_init_query_functions(sctx);
if (rctx->chip_class == CIK ||
rctx->chip_class == VI ||
return false;
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
- rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
- si_flush_dma_cs,
+ rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
+ (void*)si_flush_dma_cs,
rctx);
- rctx->dma.flush = si_flush_dma_cs;
}
return true;
if (rctx->query_result_shader)
rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
- if (rctx->gfx.cs)
- rctx->ws->cs_destroy(rctx->gfx.cs);
- if (rctx->dma.cs)
- rctx->ws->cs_destroy(rctx->dma.cs);
+ if (rctx->gfx_cs)
+ rctx->ws->cs_destroy(rctx->gfx_cs);
+ if (rctx->dma_cs)
+ rctx->ws->cs_destroy(rctx->dma_cs);
if (rctx->ctx)
rctx->ws->ctx_destroy(rctx->ctx);
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
r600_resource_reference(&rctx->eop_bug_scratch, NULL);
}
-
-
-void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
- uint64_t offset, uint64_t size, unsigned value)
-{
- struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
-
- mtx_lock(&sscreen->aux_context_lock);
- rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
- sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- mtx_unlock(&sscreen->aux_context_lock);
-}