+/* The max number of bytes to copy per packet. */
+#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
+
+void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
+ struct pipe_resource *dst, uint64_t offset,
+ unsigned size, uint32_t clear_value,
+ enum r600_coherency coher)
+{
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+
+ assert(size);
+ assert(rctx->screen->b.has_cp_dma);
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
+ offset + size);
+
+ offset += r600_resource(dst)->gpu_address;
+
+ /* Flush the cache where the resource is bound. */
+ rctx->b.flags |= r600_get_flush_flags(coher) |
+ R600_CONTEXT_WAIT_3D_IDLE;
+
+ while (size) {
+ unsigned sync = 0;
+ unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
+ unsigned reloc;
+
+ r600_need_cs_space(rctx,
+ 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
+ R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
+
+ /* Flush the caches for the first copy only. */
+ if (rctx->b.flags) {
+ r600_flush_emit(rctx);
+ }
+
+ /* Do the synchronization after the last copy, so that all data is written to memory. */
+ if (size == byte_count) {
+ sync = PKT3_CP_DMA_CP_SYNC;
+ }
+
+ /* This must be done after r600_need_cs_space. */
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ (struct r600_resource*)dst, RADEON_USAGE_WRITE,
+ RADEON_PRIO_CP_DMA);
+
+ radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
+ radeon_emit(cs, clear_value); /* DATA [31:0] */
+ radeon_emit(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
+ radeon_emit(cs, offset); /* DST_ADDR_LO [31:0] */
+ radeon_emit(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */
+ radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ size -= byte_count;
+ offset += byte_count;
+ }
+
+ /* CP DMA is executed in ME, but index buffers are read by PFP.
+ * This ensures that ME (CP DMA) is idle before PFP starts fetching
+ * indices. If we wanted to execute CP DMA in PFP, this packet
+ * should precede it.
+ */
+ if (coher == R600_COHERENCY_SHADER)
+ r600_emit_pfp_sync_me(rctx);