* Authors:
* Jerome Glisse
*/
-#include "r600_hw_context_priv.h"
+#include "r600_pipe.h"
#include "evergreend.h"
#include "util/u_memory.h"
#include "util/u_math.h"
-int evergreen_context_init(struct r600_context *ctx)
+void evergreen_dma_copy_buffer(struct r600_context *rctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src,
+ uint64_t dst_offset,
+ uint64_t src_offset,
+ uint64_t size)
{
- int r = 0;
-
- /* add blocks */
- r = r600_setup_block_table(ctx);
- if (r)
- goto out_err;
-
- ctx->max_db = 8;
- return 0;
-out_err:
- r600_context_fini(ctx);
- return r;
-}
-
-void evergreen_flush_vgt_streamout(struct r600_context *ctx)
-{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
-
- r600_write_config_reg(cs, R_0084FC_CP_STRMOUT_CNTL, 0);
-
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0);
-
- cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0);
- cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */
- cs->buf[cs->cdw++] = R_0084FC_CP_STRMOUT_CNTL >> 2; /* register */
- cs->buf[cs->cdw++] = 0;
- cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* reference value */
- cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* mask */
- cs->buf[cs->cdw++] = 4; /* poll interval */
-}
-
-void evergreen_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit)
-{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
-
- if (buffer_enable_bit) {
- r600_write_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
- r600_write_value(cs, S_028B94_STREAMOUT_0_EN(1)); /* R_028B94_VGT_STRMOUT_CONFIG */
- r600_write_value(cs, S_028B98_STREAM_0_BUFFER_EN(buffer_enable_bit)); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */
- } else {
- r600_write_context_reg(cs, R_028B94_VGT_STRMOUT_CONFIG, S_028B94_STREAMOUT_0_EN(0));
- }
-}
-
-void evergreen_dma_copy(struct r600_context *rctx,
- struct pipe_resource *dst,
- struct pipe_resource *src,
- uint64_t dst_offset,
- uint64_t src_offset,
- uint64_t size)
-{
- struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
+ struct radeon_cmdbuf *cs = rctx->b.dma.cs;
unsigned i, ncopy, csize, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
- /* make sure that the dma ring is only one active */
- rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
- dst_offset += r600_resource_va(&rctx->screen->screen, dst);
- src_offset += r600_resource_va(&rctx->screen->screen, src);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
+ dst_offset += rdst->gpu_address;
+ src_offset += rsrc->gpu_address;
/* see if we use dword or byte copy */
- if (!(dst_offset & 0x3) && !(src_offset & 0x3) && !(size & 0x3)) {
+ if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
size >>= 2;
- sub_cmd = 0x00;
+ sub_cmd = EG_DMA_COPY_DWORD_ALIGNED;
shift = 2;
} else {
- sub_cmd = 0x40;
+ sub_cmd = EG_DMA_COPY_BYTE_ALIGNED;
shift = 0;
}
- ncopy = (size / 0x000fffff) + !!(size % 0x000fffff);
+ ncopy = (size / EG_DMA_COPY_MAX_SIZE) + !!(size % EG_DMA_COPY_MAX_SIZE);
- r600_need_dma_space(rctx, ncopy * 5);
+ r600_need_dma_space(&rctx->b, ncopy * 5, rdst, rsrc);
for (i = 0; i < ncopy; i++) {
- csize = size < 0x000fffff ? size : 0x000fffff;
- /* emit reloc before writting cs so that cs is always in consistent state */
- r600_context_bo_reloc(rctx, &rctx->rings.dma, rsrc, RADEON_USAGE_READ);
- r600_context_bo_reloc(rctx, &rctx->rings.dma, rdst, RADEON_USAGE_WRITE);
- cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize);
- cs->buf[cs->cdw++] = dst_offset & 0xffffffff;
- cs->buf[cs->cdw++] = src_offset & 0xffffffff;
- cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
- cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
+ csize = size < EG_DMA_COPY_MAX_SIZE ? size : EG_DMA_COPY_MAX_SIZE;
+ /* emit reloc before writing cs so that cs is always in consistent state */
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);
+ radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize));
+ radeon_emit(cs, dst_offset & 0xffffffff);
+ radeon_emit(cs, src_offset & 0xffffffff);
+ radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
+ radeon_emit(cs, (src_offset >> 32UL) & 0xff);
dst_offset += csize << shift;
src_offset += csize << shift;
size -= csize;
}
+}
- util_range_add(&rdst->valid_buffer_range, dst_offset,
- dst_offset + size);
+/* The max number of bytes to copy per packet. */
+#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
+
+void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
+ struct pipe_resource *dst, uint64_t offset,
+ unsigned size, uint32_t clear_value,
+ enum r600_coherency coher)
+{
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+
+ assert(size);
+ assert(rctx->screen->b.has_cp_dma);
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
+ offset + size);
+
+ offset += r600_resource(dst)->gpu_address;
+
+ /* Flush the cache where the resource is bound. */
+ rctx->b.flags |= r600_get_flush_flags(coher) |
+ R600_CONTEXT_WAIT_3D_IDLE;
+
+ while (size) {
+ unsigned sync = 0;
+ unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
+ unsigned reloc;
+
+ r600_need_cs_space(rctx,
+ 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
+ R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
+
+ /* Flush the caches for the first copy only. */
+ if (rctx->b.flags) {
+ r600_flush_emit(rctx);
+ }
+
+ /* Do the synchronization after the last copy, so that all data is written to memory. */
+ if (size == byte_count) {
+ sync = PKT3_CP_DMA_CP_SYNC;
+ }
+
+ /* This must be done after r600_need_cs_space. */
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ (struct r600_resource*)dst, RADEON_USAGE_WRITE,
+ RADEON_PRIO_CP_DMA);
+
+ radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
+ radeon_emit(cs, clear_value); /* DATA [31:0] */
+ radeon_emit(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
+ radeon_emit(cs, offset); /* DST_ADDR_LO [31:0] */
+ radeon_emit(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
+ radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ size -= byte_count;
+ offset += byte_count;
+ }
+
+ /* CP DMA is executed in ME, but index buffers are read by PFP.
+ * This ensures that ME (CP DMA) is idle before PFP starts fetching
+ * indices. If we wanted to execute CP DMA in PFP, this packet
+ * should precede it.
+ */
+ if (coher == R600_COHERENCY_SHADER)
+ r600_emit_pfp_sync_me(rctx);
}