struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
/* make sure that the dma ring is only one active */
rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
src_offset += csize << shift;
size -= csize;
}
-
- util_range_add(&rdst->valid_buffer_range, dst_offset,
- dst_offset + size);
}
/* The max number of bytes to copy per packet. */
assert(size);
assert(rctx->screen->b.has_cp_dma);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
+ offset + size);
+
offset += r600_resource_va(&rctx->screen->b.b, dst);
/* Flush the cache where the resource is bound. */
rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_INV_VERTEX_CACHE |
R600_CONTEXT_INV_TEX_CACHE;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
}
assert(size);
assert(rctx->screen->b.has_cp_dma);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
src_offset += r600_resource_va(&rctx->screen->b.b, src);
rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_INV_VERTEX_CACHE |
R600_CONTEXT_INV_TEX_CACHE;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
}
void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
/* make sure that the dma ring is only one active */
rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
src_offset += csize << shift;
size -= csize;
}
-
- util_range_add(&rdst->valid_buffer_range, dst_offset,
- dst_offset + size);
}
if (!size)
return;
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
+ offset + size);
+
/* Fallback for unaligned clears. */
if (offset % 4 != 0 || size % 4 != 0) {
uint32_t *map = rctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
size /= 4;
for (unsigned i = 0; i < size; i++)
*map++ = value;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
return;
}
R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_CB_META |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
- offset + size);
}
void si_copy_buffer(struct r600_context *rctx,
if (!size)
return;
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
src_offset += r600_resource_va(&rctx->screen->b.b, src);
R600_CONTEXT_FLUSH_AND_INV_DB |
R600_CONTEXT_FLUSH_AND_INV_CB_META |
R600_CONTEXT_FLUSH_AND_INV_DB_META;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
}
/* INIT/DEINIT */