pipe_mutex_unlock(rws->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
- return b;
+ return radeon_bo(b);
}
util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
else if (initial_domains & RADEON_DOMAIN_GTT)
rws->allocated_gtt += align(size, rws->size_align);
- return &bo->base;
+ return bo;
}
bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
usage |= 1 << (flags + 3);
if (use_reusable_pool) {
- bo = pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage);
+ bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage));
if (bo)
- return bo;
+ return &bo->base;
}
bo = radeon_create_bo(ws, size, alignment, usage, domain, flags);
if (ws->info.r600_virtual_address) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(rws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
if (ws->info.r600_virtual_address && !bo->va) {
struct drm_radeon_gem_va va;
- bo->va = radeon_bomgr_find_va(rws, bo->base.size, 1 << 20);
+ bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;