#include "sid.h"
#include "radeon/r600_cs.h"
-/* Alignment for optimal performance. */
-#define CP_DMA_ALIGNMENT 32
-/* The max number of bytes to copy per packet. */
-#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - CP_DMA_ALIGNMENT)
-
/* Set this if you want the ME to wait until CP DMA is done.
* It should be set on the last CP DMA packet. */
#define CP_DMA_SYNC (1 << 0)
#define CP_DMA_USE_L2 (1 << 2) /* CIK+ */
#define CP_DMA_CLEAR (1 << 3)
+/* The max number of bytes that can be copied per packet. */
+static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
+{
+ unsigned max = sctx->b.chip_class >= GFX9 ?
+ S_414_BYTE_COUNT_GFX9(~0u) :
+ S_414_BYTE_COUNT_GFX6(~0u);
+
+ /* make it aligned for optimal performance */
+ return max & ~(SI_CPDMA_ALIGNMENT - 1);
+}
+
+
/* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
* a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
* clear value.
enum r600_coherency coher)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
- uint32_t header = 0, command = S_414_BYTE_COUNT(size);
+ uint32_t header = 0, command = 0;
assert(size);
- assert(size <= CP_DMA_MAX_BYTE_COUNT);
+ assert(size <= cp_dma_max_byte_count(sctx));
+
+ if (sctx->b.chip_class >= GFX9)
+ command |= S_414_BYTE_COUNT_GFX9(size);
+ else
+ command |= S_414_BYTE_COUNT_GFX6(size);
/* Sync flags. */
if (flags & CP_DMA_SYNC)
header |= S_411_CP_SYNC(1);
- else
- command |= S_414_DISABLE_WR_CONFIRM(1);
+ else {
+ if (sctx->b.chip_class >= GFX9)
+ command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
+ else
+ command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
+ }
if (flags & CP_DMA_RAW_WAIT)
command |= S_414_RAW_WAIT(1);
/* Src and dst flags. */
- if (flags & CP_DMA_USE_L2)
+ if (sctx->b.chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) &&
+ src_va == dst_va)
+ header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
+ else if (flags & CP_DMA_USE_L2)
header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
if (flags & CP_DMA_CLEAR)
SI_CONTEXT_INV_VMEM_L1 |
(sctx->b.chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0);
case R600_COHERENCY_CB_META:
- return SI_CONTEXT_FLUSH_AND_INV_CB |
- SI_CONTEXT_FLUSH_AND_INV_CB_META;
+ return SI_CONTEXT_FLUSH_AND_INV_CB;
}
}
uint64_t remaining_size, unsigned user_flags,
bool *is_first, unsigned *packet_flags)
{
- /* Count memory usage in so that need_cs_space can take it into account. */
- r600_context_add_resource_size(&sctx->b.b, dst);
- if (src)
- r600_context_add_resource_size(&sctx->b.b, src);
+ /* Fast exit for a CPDMA prefetch. */
+ if ((user_flags & SI_CPDMA_SKIP_ALL) == SI_CPDMA_SKIP_ALL) {
+ *is_first = false;
+ return;
+ }
+
+ if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
+ /* Count memory usage in so that need_cs_space can take it into account. */
+ r600_context_add_resource_size(&sctx->b.b, dst);
+ if (src)
+ r600_context_add_resource_size(&sctx->b.b, src);
+ }
if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
si_need_cs_space(sctx);
/* This must be done after need_cs_space. */
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)dst,
- RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
- if (src)
+ if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource*)src,
- RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
+ (struct r600_resource*)dst,
+ RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
+ if (src)
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ (struct r600_resource*)src,
+ RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
+ }
/* Flush the caches for the first copy only.
* Also wait for the previous CP DMA operations.
struct r600_resource *rdst = r600_resource(dst);
unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher);
unsigned flush_flags = get_flush_flags(sctx, coher);
+ uint64_t dma_clear_size;
bool is_first = true;
if (!size)
return;
+ dma_clear_size = size & ~3llu;
+
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
util_range_add(&rdst->valid_buffer_range, offset,
- offset + size);
-
- /* Fallback for unaligned clears. */
- if (offset % 4 != 0 || size % 4 != 0) {
- uint8_t *map = r600_buffer_map_sync_with_rings(&sctx->b, rdst,
- PIPE_TRANSFER_WRITE);
- map += offset;
- for (uint64_t i = 0; i < size; i++) {
- unsigned byte_within_dword = (offset + i) % 4;
- *map++ = (value >> (byte_within_dword * 8)) & 0xff;
- }
- return;
- }
+ offset + dma_clear_size);
/* dma_clear_buffer can use clear_buffer on failure. Make sure that
* doesn't happen. We don't want an infinite recursion: */
if (sctx->b.dma.cs &&
+ !(dst->flags & PIPE_RESOURCE_FLAG_SPARSE) &&
+ (offset % 4 == 0) &&
/* CP DMA is very slow. Always use SDMA for big clears. This
* alone improves DeusEx:MD performance by 70%. */
(size > 128 * 1024 ||
* of them are moved to SDMA thanks to this. */
!ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
RADEON_USAGE_READWRITE))) {
- sctx->b.dma_clear_buffer(ctx, dst, offset, size, value);
- return;
- }
+ sctx->b.dma_clear_buffer(ctx, dst, offset, dma_clear_size, value);
- uint64_t va = rdst->gpu_address + offset;
+ offset += dma_clear_size;
+ size -= dma_clear_size;
+ } else if (dma_clear_size >= 4) {
+ uint64_t va = rdst->gpu_address + offset;
- /* Flush the caches. */
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
- SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
+ offset += dma_clear_size;
+ size -= dma_clear_size;
- while (size) {
- unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
- unsigned dma_flags = tc_l2_flag | CP_DMA_CLEAR;
+ /* Flush the caches. */
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
- si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, 0,
- &is_first, &dma_flags);
+ while (dma_clear_size) {
+ unsigned byte_count = MIN2(dma_clear_size, cp_dma_max_byte_count(sctx));
+ unsigned dma_flags = tc_l2_flag | CP_DMA_CLEAR;
- /* Emit the clear packet. */
- si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
+ si_cp_dma_prepare(sctx, dst, NULL, byte_count, dma_clear_size, 0,
+ &is_first, &dma_flags);
- size -= byte_count;
- va += byte_count;
+ /* Emit the clear packet. */
+ si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
+
+ dma_clear_size -= byte_count;
+ va += byte_count;
+ }
+
+ if (tc_l2_flag)
+ rdst->TC_L2_dirty = true;
+
+ /* If it's not a framebuffer fast clear... */
+ if (coher == R600_COHERENCY_SHADER)
+ sctx->b.num_cp_dma_calls++;
}
- if (tc_l2_flag)
- rdst->TC_L2_dirty = true;
+ if (size) {
+ /* Handle non-dword alignment.
+ *
+ * This function is called for embedded texture metadata clears,
+ * but those should always be properly aligned. */
+ assert(dst->target == PIPE_BUFFER);
+ assert(size < 4);
- /* If it's not a framebuffer fast clear... */
- if (coher == R600_COHERENCY_SHADER)
- sctx->b.num_cp_dma_calls++;
+ pipe_buffer_write(ctx, dst, offset, size, &value);
+ }
}
/**
{
uint64_t va;
unsigned dma_flags = 0;
- unsigned scratch_size = CP_DMA_ALIGNMENT * 2;
+ unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
- assert(size < CP_DMA_ALIGNMENT);
+ assert(size < SI_CPDMA_ALIGNMENT);
/* Use the scratch buffer as the dummy buffer. The 3D engine should be
* idle at this point.
sctx->scratch_buffer->b.b.width0 < scratch_size) {
r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer = (struct r600_resource*)
- pipe_buffer_create(&sctx->screen->b.b, 0,
- PIPE_USAGE_DEFAULT, scratch_size);
+ r600_aligned_buffer_create(&sctx->screen->b.b,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_size, 256);
if (!sctx->scratch_buffer)
return;
- sctx->emit_scratch_reloc = true;
+
+ si_mark_atom_dirty(sctx, &sctx->scratch_state);
}
si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
is_first, &dma_flags);
va = sctx->scratch_buffer->gpu_address;
- si_emit_cp_dma(sctx, va, va + CP_DMA_ALIGNMENT, size, dma_flags,
+ si_emit_cp_dma(sctx, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags,
R600_COHERENCY_SHADER);
}
if (!size)
return;
- /* Mark the buffer range of destination as valid (initialized),
- * so that transfer_map knows it should wait for the GPU when mapping
- * that range. */
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
+ if (dst != src || dst_offset != src_offset) {
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
+ }
dst_offset += r600_resource(dst)->gpu_address;
src_offset += r600_resource(src)->gpu_address;
* just to align the internal counter. Otherwise, the DMA engine
* would slow down by an order of magnitude for following copies.
*/
- if (size % CP_DMA_ALIGNMENT)
- realign_size = CP_DMA_ALIGNMENT - (size % CP_DMA_ALIGNMENT);
+ if (size % SI_CPDMA_ALIGNMENT)
+ realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
/* If the copy begins unaligned, we must start copying from the next
* aligned block and the skipped part should be copied after everything
* else has been copied. Only the src alignment matters, not dst.
*/
- if (src_offset % CP_DMA_ALIGNMENT) {
- skipped_size = CP_DMA_ALIGNMENT - (src_offset % CP_DMA_ALIGNMENT);
+ if (src_offset % SI_CPDMA_ALIGNMENT) {
+ skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
/* The main part will be skipped if the size is too small. */
skipped_size = MIN2(skipped_size, size);
size -= skipped_size;
while (size) {
unsigned dma_flags = tc_l2_flag;
- unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
+ unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
si_cp_dma_prepare(sctx, dst, src, byte_count,
size + skipped_size + realign_size,
sctx->b.num_cp_dma_calls++;
}
+void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
+ uint64_t offset, unsigned size)
+{
+ assert(sctx->b.chip_class >= CIK);
+
+ si_copy_buffer(sctx, buf, buf, offset, offset, size, SI_CPDMA_SKIP_ALL);
+}
+
+static void cik_prefetch_shader_async(struct si_context *sctx,
+ struct si_pm4_state *state)
+{
+ if (state) {
+ struct pipe_resource *bo = &state->bo[0]->b.b;
+ assert(state->nbo == 1);
+
+ cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0);
+ }
+}
+
+static void cik_emit_prefetch_L2(struct si_context *sctx, struct r600_atom *atom)
+{
+ /* Prefetch shaders and VBO descriptors to TC L2. */
+ if (si_pm4_state_changed(sctx, ls))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
+ if (si_pm4_state_changed(sctx, hs))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
+ if (si_pm4_state_changed(sctx, es))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.es);
+ if (si_pm4_state_changed(sctx, gs))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
+ if (si_pm4_state_changed(sctx, vs))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
+
+ /* Vertex buffer descriptors are uploaded uncached, so prefetch
+ * them right after the VS binary. */
+ if (sctx->vertex_buffer_pointer_dirty) {
+ cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b,
+ sctx->vertex_buffers.buffer_offset,
+ sctx->vertex_elements->desc_list_byte_size);
+ }
+ if (si_pm4_state_changed(sctx, ps))
+ cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
+}
+
void si_init_cp_dma_functions(struct si_context *sctx)
{
sctx->b.clear_buffer = si_clear_buffer;
+
+ si_init_atom(sctx, &sctx->prefetch_L2, &sctx->atoms.s.prefetch_L2,
+ cik_emit_prefetch_L2);
}