}
ncopy = (size / 0x000fffff) + !!(size % 0x000fffff);
- r600_need_dma_space(rctx, ncopy * 5);
+ r600_need_dma_space(&rctx->b, ncopy * 5);
for (i = 0; i < ncopy; i++) {
csize = size < 0x000fffff ? size : 0x000fffff;
/* emit reloc before writting cs so that cs is always in consistent state */
size = (copy_height * pitch) >> 2;
ncopy = (size / 0x000fffff) + !!(size % 0x000fffff);
- r600_need_dma_space(rctx, ncopy * 9);
+ r600_need_dma_space(&rctx->b, ncopy * 9);
for (i = 0; i < ncopy; i++) {
cheight = copy_height;
R600_CONTEXT_INV_TEX_CACHE;
}
-void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
-{
- /* The number of dwords we already used in the DMA so far. */
- num_dw += ctx->b.rings.dma.cs->cdw;
- /* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
- }
-}
-
void r600_dma_copy(struct r600_context *rctx,
struct pipe_resource *dst,
struct pipe_resource *src,
shift = 2;
ncopy = (size / 0xffff) + !!(size % 0xffff);
- r600_need_dma_space(rctx, ncopy * 5);
+ r600_need_dma_space(&rctx->b, ncopy * 5);
for (i = 0; i < ncopy; i++) {
csize = size < 0xffff ? size : 0xffff;
/* emit reloc before writting cs so that cs is always in consistent state */
void r600_begin_new_cs(struct r600_context *ctx);
void r600_flush_emit(struct r600_context *ctx);
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
-void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw);
void r600_cp_dma_copy_buffer(struct r600_context *rctx,
struct pipe_resource *dst, uint64_t dst_offset,
struct pipe_resource *src, uint64_t src_offset,
*/
cheight = ((0x0000ffff << 2) / pitch) & 0xfffffff8;
ncopy = (copy_height / cheight) + !!(copy_height % cheight);
- r600_need_dma_space(rctx, ncopy * 7);
+ r600_need_dma_space(&rctx->b, ncopy * 7);
for (i = 0; i < ncopy; i++) {
cheight = cheight > copy_height ? copy_height : cheight;
* pipe_context
*/
+void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw)
+{
+ /* The number of dwords we already used in the DMA so far. */
+ num_dw += ctx->rings.dma.cs->cdw;
+ /* Flush if there's not enough space. */
+ if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
+ ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+ }
+}
+
static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
{
}
struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
const struct pipe_resource *templ);
const char *r600_get_llvm_processor_name(enum radeon_family family);
+void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw);
/* r600_query.c */
void r600_query_init(struct r600_common_context *rctx);