offset = heap->start;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
+
+ if (offset + waste + size > heap->end) {
+ mtx_unlock(&heap->mutex);
+ return 0;
+ }
+
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
return offset;
}
+static uint64_t radeon_bomgr_find_va64(struct radeon_drm_winsys *ws,
+ uint64_t size, uint64_t alignment)
+{
+ uint64_t va = 0;
+
+ /* Try to allocate from the 64-bit address space first.
+ * If it doesn't exist (start = 0) or if it doesn't have enough space,
+ * fall back to the 32-bit address space.
+ */
+ if (ws->vm64.start)
+ va = radeon_bomgr_find_va(&ws->info, &ws->vm64, size, alignment);
+ if (!va)
+ va = radeon_bomgr_find_va(&ws->info, &ws->vm32, size, alignment);
+ return va;
+}
+
static void radeon_bomgr_free_va(const struct radeon_info *info,
struct radeon_vm_heap *heap,
uint64_t va, uint64_t size)
}
}
- radeon_bomgr_free_va(&rws->info, &rws->vm64, bo->va, bo->base.size);
+ radeon_bomgr_free_va(&rws->info,
+ bo->va < rws->vm32.end ? &rws->vm32 : &rws->vm64,
+ bo->va, bo->base.size);
}
/* Close object. */
unsigned va_gap_size;
va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
- bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm64,
- size + va_gap_size, alignment);
+
+ if (flags & RADEON_FLAG_32BIT) {
+ bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm32,
+ size + va_gap_size, alignment);
+ assert(bo->va + size < rws->vm32.end);
+ } else {
+ bo->va = radeon_bomgr_find_va64(rws, size + va_gap_size, alignment);
+ }
va.handle = bo->handle;
va.vm_id = 0;
if (ws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
- bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
if (ws->info.has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
- bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
ws->info.max_alloc_size = MIN2(ws->info.vram_size * 0.7, ws->info.max_alloc_size);
if (ws->info.drm_minor < 40)
ws->info.max_alloc_size = MIN2(ws->info.max_alloc_size, 256*1024*1024);
+ /* Both 32-bit and 64-bit address spaces only have 4GB. */
+ ws->info.max_alloc_size = MIN2(ws->info.max_alloc_size, 3ull*1024*1024*1024);
/* Get max clock frequency info and convert it to MHz */
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_SCLK, NULL,
util_hash_table_destroy(ws->bo_handles);
util_hash_table_destroy(ws->bo_vas);
mtx_destroy(&ws->bo_handles_mutex);
+ mtx_destroy(&ws->vm32.mutex);
mtx_destroy(&ws->vm64.mutex);
mtx_destroy(&ws->bo_fence_lock);
ws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
ws->bo_vas = util_hash_table_create(handle_hash, handle_compare);
(void) mtx_init(&ws->bo_handles_mutex, mtx_plain);
+ (void) mtx_init(&ws->vm32.mutex, mtx_plain);
(void) mtx_init(&ws->vm64.mutex, mtx_plain);
(void) mtx_init(&ws->bo_fence_lock, mtx_plain);
+ list_inithead(&ws->vm32.holes);
list_inithead(&ws->vm64.holes);
- ws->vm64.start = ws->va_start;
+ /* The kernel currently returns 8MB. Make sure this doesn't change. */
+ if (ws->va_start > 8 * 1024 * 1024) {
+ /* Not enough 32-bit address space. */
+ radeon_winsys_destroy(&ws->base);
+ mtx_unlock(&fd_tab_mutex);
+ return NULL;
+ }
+
+ ws->vm32.start = ws->va_start;
+ ws->vm32.end = 1ull << 32;
+
+ /* The maximum is 8GB of virtual address space limited by the kernel.
+ * It's obviously not enough for bigger cards, like Hawaiis with 4GB
+ * and 8GB of physical memory and 4GB of GART.
+ *
+ * Older kernels set the limit to 4GB, which is even worse, so they only
+ * have 32-bit address space.
+ */
+ if (ws->info.drm_minor >= 41) {
+ ws->vm64.start = 1ull << 32;
+ ws->vm64.end = 1ull << 33;
+ }
/* TTM aligns the BO size to the CPU page size */
ws->info.gart_page_size = sysconf(_SC_PAGESIZE);