return get_valid_domain(args.value);
}
-static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
+static uint64_t radeon_bomgr_find_va(const struct radeon_info *info,
+ struct radeon_vm_heap *heap,
uint64_t size, uint64_t alignment)
{
struct radeon_bo_va_hole *hole, *n;
/* All VM address space holes will implicitly start aligned to the
* size alignment, so we don't need to sanitize the alignment here
*/
- size = align(size, rws->info.gart_page_size);
+ size = align(size, info->gart_page_size);
- mtx_lock(&rws->bo_va_mutex);
+ mtx_lock(&heap->mutex);
/* first look for a hole */
- LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &heap->holes, list) {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
if ((hole->size - waste) > size) {
}
hole->size -= (size + waste);
hole->offset += size + waste;
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
return offset;
}
}
- offset = rws->va_offset;
+ offset = heap->start;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = offset;
- list_add(&n->list, &rws->va_holes);
+ list_add(&n->list, &heap->holes);
}
offset += waste;
- rws->va_offset += size + waste;
- mtx_unlock(&rws->bo_va_mutex);
+ heap->start += size + waste;
+ mtx_unlock(&heap->mutex);
return offset;
}
-static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
+static void radeon_bomgr_free_va(const struct radeon_info *info,
+ struct radeon_vm_heap *heap,
uint64_t va, uint64_t size)
{
struct radeon_bo_va_hole *hole = NULL;
- size = align(size, rws->info.gart_page_size);
+ size = align(size, info->gart_page_size);
- mtx_lock(&rws->bo_va_mutex);
- if ((va + size) == rws->va_offset) {
- rws->va_offset = va;
+ mtx_lock(&heap->mutex);
+ if ((va + size) == heap->start) {
+ heap->start = va;
/* Delete uppermost hole if it reaches the new top */
- if (!LIST_IS_EMPTY(&rws->va_holes)) {
- hole = container_of(rws->va_holes.next, hole, list);
+ if (!LIST_IS_EMPTY(&heap->holes)) {
+ hole = container_of(heap->holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
- rws->va_offset = hole->offset;
+ heap->start = hole->offset;
list_del(&hole->list);
FREE(hole);
}
} else {
struct radeon_bo_va_hole *next;
- hole = container_of(&rws->va_holes, hole, list);
- LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
+ hole = container_of(&heap->holes, hole, list);
+ LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {
if (next->offset < va)
break;
hole = next;
}
- if (&hole->list != &rws->va_holes) {
+ if (&hole->list != &heap->holes) {
/* Grow upper hole if it's adjacent */
if (hole->offset == (va + size)) {
hole->offset = va;
hole->size += size;
/* Merge lower hole if it's adjacent */
- if (next != hole && &next->list != &rws->va_holes &&
+ if (next != hole && &next->list != &heap->holes &&
(next->offset + next->size) == va) {
next->size += hole->size;
list_del(&hole->list);
}
/* Grow lower hole if it's adjacent */
- if (next != hole && &next->list != &rws->va_holes &&
+ if (next != hole && &next->list != &heap->holes &&
(next->offset + next->size) == va) {
next->size += size;
goto out;
}
}
out:
- mtx_unlock(&rws->bo_va_mutex);
+ mtx_unlock(&heap->mutex);
}
void radeon_bo_destroy(struct pb_buffer *_buf)
}
}
- radeon_bomgr_free_va(rws, bo->va, bo->base.size);
+ radeon_bomgr_free_va(&rws->info, &rws->vm64, bo->va, bo->base.size);
}
/* Close object. */
unsigned va_gap_size;
va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
- bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
+ bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm64,
+ size + va_gap_size, alignment);
va.handle = bo->handle;
va.vm_id = 0;
if (ws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+ bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
if (ws->info.has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+ bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;