*/
#include "si_pipe.h"
+#include "sid.h"
static void si_dma_emit_wait_idle(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->dma_cs;
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
- /* NOP waits for idle on Evergreen and later. */
- if (sctx->chip_class >= CIK)
+ /* NOP waits for idle. */
+ if (sctx->chip_class >= GFX7)
radeon_emit(cs, 0x00000000); /* NOP */
else
radeon_emit(cs, 0xf0000000); /* NOP */
}
+void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
+ uint64_t offset)
+{
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ uint64_t va = dst->gpu_address + offset;
+
+ if (sctx->chip_class == GFX6) {
+ unreachable("SI DMA doesn't support the timestamp packet.");
+ return;
+ }
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&dst->b.b, &dst->valid_buffer_range, offset, offset + 8);
+
+ assert(va % 8 == 0);
+
+ si_need_dma_space(sctx, 4, dst, NULL);
+ si_dma_emit_wait_idle(sctx);
+
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_TIMESTAMP,
+ SDMA_TS_SUB_OPCODE_GET_GLOBAL_TIMESTAMP,
+ 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+}
+
+void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
+ uint64_t offset, uint64_t size, unsigned clear_value)
+{
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ unsigned i, ncopy, csize;
+ struct si_resource *sdst = si_resource(dst);
+
+ assert(offset % 4 == 0);
+ assert(size);
+ assert(size % 4 == 0);
+
+ if (!cs || dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
+ sctx->screen->debug_flags & DBG(NO_SDMA_CLEARS)) {
+ sctx->b.clear_buffer(&sctx->b, dst, offset, size, &clear_value, 4);
+ return;
+ }
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size);
+
+ offset += sdst->gpu_address;
+
+ if (sctx->chip_class == GFX6) {
+ /* the same maximum size as for copying */
+ ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
+ si_need_dma_space(sctx, ncopy * 4, sdst, NULL);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
+ radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_CONSTANT_FILL, 0,
+ csize / 4));
+ radeon_emit(cs, offset);
+ radeon_emit(cs, clear_value);
+ radeon_emit(cs, (offset >> 32) << 16);
+ offset += csize;
+ size -= csize;
+ }
+ return;
+ }
+
+ /* The following code is for Sea Islands and later. */
+ /* the same maximum size as for copying */
+ ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
+ si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 0,
+ 0x8000 /* dword copy */));
+ radeon_emit(cs, offset);
+ radeon_emit(cs, offset >> 32);
+ radeon_emit(cs, clear_value);
+ /* dw count */
+ radeon_emit(cs, (sctx->chip_class >= GFX9 ? csize - 1 : csize) & 0xfffffffc);
+ offset += csize;
+ size -= csize;
+ }
+}
+
+void si_sdma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
+ struct pipe_resource *src, uint64_t dst_offset,
+ uint64_t src_offset, uint64_t size)
+{
+ struct radeon_cmdbuf *cs = sctx->sdma_cs;
+ unsigned i, ncopy, csize;
+ struct si_resource *sdst = si_resource(dst);
+ struct si_resource *ssrc = si_resource(src);
+
+ if (!cs ||
+ dst->flags & PIPE_RESOURCE_FLAG_SPARSE ||
+ src->flags & PIPE_RESOURCE_FLAG_SPARSE) {
+ si_copy_buffer(sctx, dst, src, dst_offset, src_offset, size);
+ return;
+ }
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(dst, &sdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
+ dst_offset += sdst->gpu_address;
+ src_offset += ssrc->gpu_address;
+
+ if (sctx->chip_class == GFX6) {
+ unsigned max_size, sub_cmd, shift;
+
+ /* see whether we should use the dword-aligned or byte-aligned copy */
+ if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
+ sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
+ shift = 2;
+ max_size = SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE;
+ } else {
+ sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
+ shift = 0;
+ max_size = SI_DMA_COPY_MAX_BYTE_ALIGNED_SIZE;
+ }
+
+ ncopy = DIV_ROUND_UP(size, max_size);
+ si_need_dma_space(sctx, ncopy * 5, sdst, ssrc);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = MIN2(size, max_size);
+ radeon_emit(cs, SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd,
+ csize >> shift));
+ radeon_emit(cs, dst_offset);
+ radeon_emit(cs, src_offset);
+ radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
+ radeon_emit(cs, (src_offset >> 32UL) & 0xff);
+ dst_offset += csize;
+ src_offset += csize;
+ size -= csize;
+ }
+ return;
+ }
+
+ /* The following code is for CI and later. */
+ unsigned align = ~0u;
+ ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
+
+ /* Align copy size to dw if src/dst address are dw aligned */
+ if ((src_offset & 0x3) == 0 &&
+ (dst_offset & 0x3) == 0 &&
+ size > 4 &&
+ (size & 3) != 0) {
+ align = ~0x3u;
+ ncopy++;
+ }
+
+ si_need_dma_space(sctx, ncopy * 7, sdst, ssrc);
+
+ for (i = 0; i < ncopy; i++) {
+ csize = size >= 4 ? MIN2(size & align, CIK_SDMA_COPY_MAX_SIZE) : size;
+ radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
+ CIK_SDMA_COPY_SUB_OPCODE_LINEAR,
+ 0));
+ radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
+ radeon_emit(cs, 0); /* src/dst endian swap */
+ radeon_emit(cs, src_offset);
+ radeon_emit(cs, src_offset >> 32);
+ radeon_emit(cs, dst_offset);
+ radeon_emit(cs, dst_offset >> 32);
+ dst_offset += csize;
+ src_offset += csize;
+ size -= csize;
+ }
+}
+
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
- struct r600_resource *dst, struct r600_resource *src)
+ struct si_resource *dst, struct si_resource *src)
{
- uint64_t vram = ctx->dma_cs->used_vram;
- uint64_t gtt = ctx->dma_cs->used_gart;
+ struct radeon_winsys *ws = ctx->ws;
+ uint64_t vram = ctx->sdma_cs->used_vram;
+ uint64_t gtt = ctx->sdma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
}
/* Flush the GFX IB if DMA depends on it. */
- if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+ if (!ctx->sdma_uploads_in_progress &&
+ radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
- RADEON_USAGE_WRITE))))
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
+ RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
- ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
- !radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
+ if (!ctx->sdma_uploads_in_progress &&
+ (!ws->cs_check_space(ctx->sdma_cs, num_dw, false) ||
+ ctx->sdma_cs->used_vram + ctx->sdma_cs->used_gart > 64 * 1024 * 1024 ||
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->sdma_cs, vram, gtt))) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
- assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
+ assert((num_dw + ctx->sdma_cs->current.cdw) <= ctx->sdma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ws->cs_is_buffer_referenced(ctx->sdma_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
- RADEON_USAGE_WRITE)))
+ ws->cs_is_buffer_referenced(ctx->sdma_cs, src->buf,
+ RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
+ unsigned sync = ctx->sdma_uploads_in_progress ? 0 : RADEON_USAGE_SYNCHRONIZED;
if (dst) {
- radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
- RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
+ ws->cs_add_buffer(ctx->sdma_cs, dst->buf, RADEON_USAGE_WRITE | sync,
+ dst->domains, 0);
}
if (src) {
- radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
- RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
+ ws->cs_add_buffer(ctx->sdma_cs, src->buf, RADEON_USAGE_READ | sync,
+ src->domains, 0);
}
/* this function is called before all DMA calls, so increment this. */
void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
- struct radeon_winsys_cs *cs = ctx->dma_cs;
+ struct radeon_cmdbuf *cs = ctx->sdma_cs;
struct radeon_saved_cs saved;
bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
{
struct si_context *ctx = (struct si_context*)sscreen->aux_context;
- mtx_lock(&sscreen->aux_context_lock);
- ctx->dma_clear_buffer(ctx, dst, offset, size, value);
+ simple_mtx_lock(&sscreen->aux_context_lock);
+ si_sdma_clear_buffer(ctx, dst, offset, size, value);
sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
- mtx_unlock(&sscreen->aux_context_lock);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
}