* pipe_context
*/
-static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
-{
- struct radeon_winsys_cs *cs = rctx->dma.cs;
-
- /* NOP waits for idle on Evergreen and later. */
- if (rctx->chip_class >= CIK)
- radeon_emit(cs, 0x00000000); /* NOP */
- else
- radeon_emit(cs, 0xf0000000); /* NOP */
-}
-
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src)
-{
- uint64_t vram = ctx->dma.cs->used_vram;
- uint64_t gtt = ctx->dma.cs->used_gart;
-
- if (dst) {
- vram += dst->vram_usage;
- gtt += dst->gart_usage;
- }
- if (src) {
- vram += src->vram_usage;
- gtt += src->gart_usage;
- }
-
- /* Flush the GFX IB if DMA depends on it. */
- if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
- ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
- (src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
- RADEON_USAGE_WRITE))))
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
-
- /* Flush if there's not enough space, or if the memory usage per IB
- * is too large.
- *
- * IBs using too little memory are limited by the IB submission overhead.
- * IBs using too much memory are limited by the kernel/TTM overhead.
- * Too long IBs create CPU-GPU pipeline bubbles and add latency.
- *
- * This heuristic makes sure that DMA requests are executed
- * very soon after the call is made and lowers memory usage.
- * It improves texture upload performance by keeping the DMA
- * engine busy while uploads are being submitted.
- */
- num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
- ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
- }
-
- /* Wait for idle if either buffer has been used in the IB before to
- * prevent read-after-write hazards.
- */
- if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
- (src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
- RADEON_USAGE_WRITE)))
- r600_dma_emit_wait_idle(ctx);
-
- if (dst) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
- RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
- }
- if (src) {
- radeon_add_to_buffer_list(ctx, &ctx->dma, src,
- RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
- }
-
- /* this function is called before all DMA calls, so increment this. */
- ctx->num_dma_calls++;
-}
-
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct radeon_winsys_cs *cs = rctx->dma.cs;
- struct radeon_saved_cs saved;
- bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
-
- if (!radeon_emitted(cs, 0)) {
- if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
- return;
- }
-
- if (check_vm)
- si_save_cs(rctx->ws, cs, &saved, true);
-
- rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
- if (fence)
- rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
-
- if (check_vm) {
- /* Use conservative timeout 800ms, after which we won't wait any
- * longer and assume the GPU is hung.
- */
- rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
-
- si_check_vm_faults(rctx, &saved, RING_DMA);
- si_clear_saved_cs(&saved);
- }
-}
-
/**
* Store a linearized copy of all chunks of \p cs together with the buffer
* list in \p saved.
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
r600_resource_reference(&rctx->eop_bug_scratch, NULL);
}
-
-
-void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
- uint64_t offset, uint64_t size, unsigned value)
-{
- struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
-
- mtx_lock(&sscreen->aux_context_lock);
- rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
- sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- mtx_unlock(&sscreen->aux_context_lock);
-}
struct si_screen *sscreen,
unsigned context_flags);
void si_common_context_cleanup(struct r600_common_context *rctx);
-void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
- uint64_t offset, uint64_t size, unsigned value);
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src);
void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
struct radeon_saved_cs *saved, bool get_buffer_list);
void si_clear_saved_cs(struct radeon_saved_cs *saved);
bool si_check_device_reset(struct r600_common_context *rctx);
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
/* r600_gpu_load.c */
void si_gpu_load_kill_thread(struct si_screen *sscreen);
si_debug.c \
si_descriptors.c \
si_dma.c \
+ si_dma_cs.c \
si_fence.c \
si_get.c \
si_gfx_cs.c \
--- /dev/null
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "si_pipe.h"
+#include "radeon/r600_cs.h"
+
+static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
+{
+ struct radeon_winsys_cs *cs = rctx->dma.cs;
+
+ /* NOP waits for idle on Evergreen and later. */
+ if (rctx->chip_class >= CIK)
+ radeon_emit(cs, 0x00000000); /* NOP */
+ else
+ radeon_emit(cs, 0xf0000000); /* NOP */
+}
+
+void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+ struct r600_resource *dst, struct r600_resource *src)
+{
+ uint64_t vram = ctx->dma.cs->used_vram;
+ uint64_t gtt = ctx->dma.cs->used_gart;
+
+ if (dst) {
+ vram += dst->vram_usage;
+ gtt += dst->gart_usage;
+ }
+ if (src) {
+ vram += src->vram_usage;
+ gtt += src->gart_usage;
+ }
+
+ /* Flush the GFX IB if DMA depends on it. */
+ if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
+ ((dst &&
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
+ (src &&
+ ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
+ RADEON_USAGE_WRITE))))
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+
+ /* Flush if there's not enough space, or if the memory usage per IB
+ * is too large.
+ *
+ * IBs using too little memory are limited by the IB submission overhead.
+ * IBs using too much memory are limited by the kernel/TTM overhead.
+ * Too long IBs create CPU-GPU pipeline bubbles and add latency.
+ *
+ * This heuristic makes sure that DMA requests are executed
+ * very soon after the call is made and lowers memory usage.
+ * It improves texture upload performance by keeping the DMA
+ * engine busy while uploads are being submitted.
+ */
+ num_dw++; /* for emit_wait_idle below */
+ if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
+ ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
+ }
+
+ /* Wait for idle if either buffer has been used in the IB before to
+ * prevent read-after-write hazards.
+ */
+ if ((dst &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
+ (src &&
+ ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
+ RADEON_USAGE_WRITE)))
+ si_dma_emit_wait_idle(ctx);
+
+ if (dst) {
+ radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
+ RADEON_USAGE_WRITE,
+ RADEON_PRIO_SDMA_BUFFER);
+ }
+ if (src) {
+ radeon_add_to_buffer_list(ctx, &ctx->dma, src,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_SDMA_BUFFER);
+ }
+
+ /* this function is called before all DMA calls, so increment this. */
+ ctx->num_dma_calls++;
+}
+
+void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct radeon_winsys_cs *cs = rctx->dma.cs;
+ struct radeon_saved_cs saved;
+ bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
+
+ if (!radeon_emitted(cs, 0)) {
+ if (fence)
+ rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+ return;
+ }
+
+ if (check_vm)
+ si_save_cs(rctx->ws, cs, &saved, true);
+
+ rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
+ if (fence)
+ rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+
+ if (check_vm) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
+
+ si_check_vm_faults(rctx, &saved, RING_DMA);
+ si_clear_saved_cs(&saved);
+ }
+}
+
+void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
+ uint64_t offset, uint64_t size, unsigned value)
+{
+ struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
+
+ mtx_lock(&sscreen->aux_context_lock);
+ rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
+ sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
+ mtx_unlock(&sscreen->aux_context_lock);
+}
/* si_dma.c */
void si_init_dma_functions(struct si_context *sctx);
+/* si_dma_cs.c */
+void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+ struct r600_resource *dst, struct r600_resource *src);
+void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
+void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
+ uint64_t offset, uint64_t size, unsigned value);
+
/* si_fence.c */
void si_gfx_write_event_eop(struct r600_common_context *ctx,
unsigned event, unsigned event_flags,