memset(&desc, 0, sizeof(desc));
desc.base.alignment = alignment;
- /* Additional criteria for the cache manager. */
- desc.base.usage = domain;
+ /* Only set one usage bit each for domains and flags, or the cache manager
+ * might consider different sets of domains / flags compatible
+ */
+ if (domain == RADEON_DOMAIN_VRAM_GTT)
+ desc.base.usage = 1 << 2;
+ else
+ desc.base.usage = domain >> 1;
+ assert(flags < sizeof(desc.base.usage) * 8 - 3);
+ desc.base.usage |= 1 << (flags + 3);
+
desc.initial_domains = domain;
desc.flags = flags;
/* Assign a buffer manager. */
- assert(flags < RADEON_NUM_CACHE_MANAGERS);
- if (use_reusable_pool) {
- if (domain == RADEON_DOMAIN_VRAM)
- provider = ws->cman_vram[flags];
- else
- provider = ws->cman_gtt[flags];
- } else {
+ if (use_reusable_pool)
+ provider = ws->cman;
+ else
provider = ws->kman;
- }
buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
static void radeon_winsys_destroy(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
- int i;
if (ws->thread) {
ws->kill_thread = 1;
pipe_mutex_destroy(ws->cmask_owner_mutex);
pipe_mutex_destroy(ws->cs_stack_lock);
- for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
- ws->cman_gtt[i]->destroy(ws->cman_gtt[i]);
- ws->cman_vram[i]->destroy(ws->cman_vram[i]);
- }
+ ws->cman->destroy(ws->cman);
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
- int i;
pipe_mutex_lock(fd_tab_mutex);
if (!fd_tab) {
if (!ws->kman)
goto fail;
- for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
- ws->cman_vram[i] = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
- ws->info.vram_size / 8);
- if (!ws->cman_vram[i])
- goto fail;
-
- ws->cman_gtt[i] = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
- ws->info.gart_size / 8);
- if (!ws->cman_gtt[i])
- goto fail;
- }
+ ws->cman = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ (ws->info.vram_size + ws->info.gart_size) / 8);
+ if (!ws->cman)
+ goto fail;
if (ws->gen >= DRV_R600) {
ws->surf_man = radeon_surface_manager_new(fd);
fail:
pipe_mutex_unlock(fd_tab_mutex);
- for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
- if (ws->cman_gtt[i])
- ws->cman_gtt[i]->destroy(ws->cman_gtt[i]);
- if (ws->cman_vram[i])
- ws->cman_vram[i]->destroy(ws->cman_vram[i]);
- }
+ if (ws->cman)
+ ws->cman->destroy(ws->cman);
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
DRV_SI
};
-#define RADEON_NUM_CACHE_MANAGERS 8
-
struct radeon_drm_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
uint32_t accel_working2;
struct pb_manager *kman;
- struct pb_manager *cman_vram[RADEON_NUM_CACHE_MANAGERS];
- struct pb_manager *cman_gtt[RADEON_NUM_CACHE_MANAGERS];
+ struct pb_manager *cman;
struct radeon_surface_manager *surf_man;
uint32_t num_cpus; /* Number of CPUs. */