radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %d\n",
__FUNCTION__, size, rmesa->dma.minimum_size);
+ if (!is_empty_list(&rmesa->dma.reserved))
+ radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
rmesa->dma.current_used = 0;
rmesa->dma.current_vertexptr = 0;
-
+
if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
first_elem(&rmesa->dma.reserved)->bo,
RADEON_GEM_DOMAIN_GTT, 0))
/* Cmd buff have been flushed in radeon_revalidate_bos */
goto again_alloc;
}
+ radeon_bo_map(first_elem(&rmesa->dma.reserved)->bo, 1);
}
/* Allocates a region from rmesa->dma.current. If there isn't enough
/* request updated cs processing information from kernel */
legacy_track_pending(rmesa->radeonScreen->bom, 0);
}
+
+ if (!is_empty_list(&rmesa->dma.reserved))
+ radeon_bo_unmap(first_elem(&rmesa->dma.reserved)->bo);
+
/* move waiting bos to free list.
wait list provides gpu time to handle data before reuse */
foreach_s(dma_bo, temp, &rmesa->dma.wait) {
FREE(dma_bo);
continue;
}
- if (!radeon_bo_is_idle(dma_bo->bo))
+ if (!radeon_bo_is_idle(dma_bo->bo)) {
+ if (rmesa->radeonScreen->driScreen->dri2.enabled)
+ break;
continue;
+ }
remove_from_list(dma_bo);
dma_bo->expire_counter = expire_at;
insert_at_tail(&rmesa->dma.free, dma_bo);
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
struct radeon_dma *dma = &rmesa->dma;
-
+
if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
dma->flush = NULL;