if (flags & RADEON_FLAG_GTT_WC)
args.flags |= RADEON_GEM_GTT_WC;
- if (flags & RADEON_FLAG_CPU_ACCESS)
- args.flags |= RADEON_GEM_CPU_ACCESS;
if (flags & RADEON_FLAG_NO_CPU_ACCESS)
args.flags |= RADEON_GEM_NO_CPU_ACCESS;
if (heap & 1)
flags |= RADEON_FLAG_GTT_WC;
- if (heap & 2)
- flags |= RADEON_FLAG_CPU_ACCESS;
switch (heap >> 2) {
case 0:
return NULL;
/* Sub-allocate small buffers from slabs. */
- if (!(flags & RADEON_FLAG_HANDLE) &&
+ if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
ws->info.has_virtual_memory &&
alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
if (flags & RADEON_FLAG_GTT_WC)
heap |= 1;
- if (flags & RADEON_FLAG_CPU_ACCESS)
- heap |= 2;
- if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
+ if (flags & ~RADEON_FLAG_GTT_WC)
goto no_slab;
switch (domain) {
no_slab:
/* This flag is irrelevant for the cache. */
- flags &= ~RADEON_FLAG_HANDLE;
+ flags &= ~RADEON_FLAG_NO_SUBALLOC;
/* Align size to page size. This is the minimum alignment for normal
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
pb_cache_bucket);
if (!bo) {
/* Clear the cache and try again. */
- pb_slabs_reclaim(&ws->bo_slabs);
+ if (ws->info.has_virtual_memory)
+ pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
pb_cache_bucket);
struct radeon_bo *bo = radeon_bo(buffer);
struct radeon_drm_winsys *ws = bo->rws;
- if (!bo->handle) {
- offset += bo->va - bo->u.slab.real->va;
- bo = bo->u.slab.real;
- }
+ /* Don't allow exports of slab entries. */
+ if (!bo->handle)
+ return false;
memset(&flink, 0, sizeof(flink));