desc.initial_domains = domain;
/* Assign a buffer manager. */
- if (use_reusable_pool)
- provider = ws->cman;
- else
+ if (use_reusable_pool) {
+ if (domain == RADEON_DOMAIN_VRAM)
+ provider = ws->cman_vram;
+ else
+ provider = ws->cman_gtt;
+ } else {
provider = ws->kman;
+ }
buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
pipe_mutex_destroy(ws->cmask_owner_mutex);
pipe_mutex_destroy(ws->cs_stack_lock);
- ws->cman->destroy(ws->cman);
+ ws->cman_vram->destroy(ws->cman_vram);
+ ws->cman_gtt->destroy(ws->cman_gtt);
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
ws->kman = radeon_bomgr_create(ws);
if (!ws->kman)
goto fail;
- ws->cman = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
- if (!ws->cman)
+ ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ if (!ws->cman_vram)
+ goto fail;
+ ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ if (!ws->cman_gtt)
goto fail;
if (ws->gen >= DRV_R600) {
fail:
pipe_mutex_unlock(fd_tab_mutex);
- if (ws->cman)
- ws->cman->destroy(ws->cman);
+ if (ws->cman_gtt)
+ ws->cman_gtt->destroy(ws->cman_gtt);
+ if (ws->cman_vram)
+ ws->cman_vram->destroy(ws->cman_vram);
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
uint32_t va_start;
struct pb_manager *kman;
- struct pb_manager *cman;
+ struct pb_manager *cman_vram;
+ struct pb_manager *cman_gtt;
struct radeon_surface_manager *surf_man;
uint32_t num_cpus; /* Number of CPUs. */