}
}
-/* The pb cache bucket is chosen to minimize pb_cache misses.
- * It must be between 0 and 3 inclusive.
- */
-static inline unsigned radeon_get_pb_cache_bucket_index(enum radeon_heap heap)
-{
- switch (heap) {
- case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
- return 0;
- case RADEON_HEAP_VRAM_READ_ONLY:
- case RADEON_HEAP_VRAM:
- return 1;
- case RADEON_HEAP_GTT_WC:
- case RADEON_HEAP_GTT_WC_READ_ONLY:
- return 2;
- case RADEON_HEAP_GTT:
- default:
- return 3;
- }
-}
-
/* Return the heap index for winsys allocators, or -1 on failure. */
static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
- unsigned usage,
enum radeon_bo_domain initial_domain,
unsigned flags,
- unsigned pb_cache_bucket)
+ int heap)
{
struct amdgpu_bo_alloc_request request = {0};
amdgpu_bo_handle buf_handle;
return NULL;
}
- pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
- pb_cache_bucket);
+ if (heap >= 0) {
+ pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
+ heap);
+ }
request.alloc_size = size;
request.phys_alignment = alignment;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
- bo->base.usage = usage;
+ bo->base.usage = 0;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
- unsigned usage = 0, pb_cache_bucket = 0;
+ int heap = -1;
/* VRAM implies WC. This is not optional. */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
if (use_reusable_pool) {
- int heap = radeon_get_heap_index(domain, flags);
+ heap = radeon_get_heap_index(domain, flags);
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
- usage = 1 << heap; /* Only set one usage bit for each heap. */
-
- pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
/* Get a buffer from the cache. */
bo = (struct amdgpu_winsys_bo*)
- pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
- pb_cache_bucket);
+ pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
if (bo)
return &bo->base;
}
/* Create a new one. */
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo)
return NULL;
}
goto fail_alloc;
/* Create managers. */
- pb_cache_init(&ws->bo_cache, 4,
+ pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
500000, ws->check_vm ? 1.0f : 2.0f, 0,
(ws->info.vram_size + ws->info.gart_size) / 8,
amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
unsigned size, unsigned alignment,
- unsigned usage,
unsigned initial_domains,
unsigned flags,
- unsigned pb_cache_bucket)
+ int heap)
{
struct radeon_bo *bo;
struct drm_radeon_gem_create args;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
- bo->base.usage = usage;
+ bo->base.usage = 0;
bo->base.size = size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->rws = rws;
bo->initial_domain = initial_domains;
bo->hash = __sync_fetch_and_add(&rws->next_bo_hash, 1);
(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
- pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
- pb_cache_bucket);
+
+ if (heap >= 0) {
+ pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
+ heap);
+ }
if (rws->info.has_virtual_memory) {
struct drm_radeon_gem_va va;
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo;
- unsigned usage = 0, pb_cache_bucket = 0;
+ int heap = -1;
assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */
/* Shared resources don't use cached heaps. */
if (use_reusable_pool) {
- int heap = radeon_get_heap_index(domain, flags);
+ heap = radeon_get_heap_index(domain, flags);
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
- usage = 1 << heap; /* Only set one usage bit for each heap. */
-
- pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
- usage, pb_cache_bucket));
+ 0, heap));
if (bo)
return &bo->base;
}
- bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
if (ws->info.has_virtual_memory)
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
- bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo)
return NULL;
}
if (!do_winsys_init(ws))
goto fail1;
- pb_cache_init(&ws->bo_cache, 4,
+ pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
500000, ws->check_vm ? 1.0f : 2.0f, 0,
MIN2(ws->info.vram_size, ws->info.gart_size),
radeon_bo_destroy,