RADEON_HEAP_GTT_WC,
RADEON_HEAP_GTT,
RADEON_MAX_SLAB_HEAPS,
+ RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
};
static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
}
}
+/* The pb cache bucket is chosen to minimize pb_cache misses.
+ * It must be between 0 and 3 inclusive.
+ */
+static inline unsigned radeon_get_pb_cache_bucket_index(enum radeon_heap heap)
+{
+ switch (heap) {
+ case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
+ return 0;
+ case RADEON_HEAP_VRAM:
+ case RADEON_HEAP_VRAM_GTT:
+ return 1;
+ case RADEON_HEAP_GTT_WC:
+ return 2;
+ case RADEON_HEAP_GTT:
+ default:
+ return 3;
+ }
+}
+
/* Return the heap index for winsys allocators, or -1 on failure. */
static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
size = align64(size, ws->info.gart_page_size);
alignment = align(alignment, ws->info.gart_page_size);
- /* Only set one usage bit each for domains and flags, or the cache manager
- * might consider different sets of domains / flags compatible
- */
- if (domain == RADEON_DOMAIN_VRAM_GTT)
- usage = 1 << 2;
- else
- usage = domain >> 1;
- assert(flags < sizeof(usage) * 8 - 3);
- usage |= 1 << (flags + 3);
-
- /* Determine the pb_cache bucket for minimizing pb_cache misses. */
- pb_cache_bucket = 0;
- if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
- pb_cache_bucket += 1;
- if (flags == RADEON_FLAG_GTT_WC) /* WC */
- pb_cache_bucket += 2;
+ int heap = radeon_get_heap_index(domain, flags);
+ assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
+ usage = 1 << heap; /* Only set one usage bit for each heap. */
+
+ pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
/* Get a buffer from the cache. */
size = align(size, ws->info.gart_page_size);
alignment = align(alignment, ws->info.gart_page_size);
- /* Only set one usage bit each for domains and flags, or the cache manager
- * might consider different sets of domains / flags compatible
- */
- if (domain == RADEON_DOMAIN_VRAM_GTT)
- usage = 1 << 2;
- else
- usage = (unsigned)domain >> 1;
- assert(flags < sizeof(usage) * 8 - 3);
- usage |= 1 << (flags + 3);
-
- /* Determine the pb_cache bucket for minimizing pb_cache misses. */
- pb_cache_bucket = 0;
- if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
- pb_cache_bucket += 1;
- if (flags == RADEON_FLAG_GTT_WC) /* WC */
- pb_cache_bucket += 2;
+ int heap = radeon_get_heap_index(domain, flags);
+ assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
+ usage = 1 << heap; /* Only set one usage bit for each heap. */
+
+ pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,