desc.flags = flags;
/* Assign a buffer manager. */
+ assert(flags < RADEON_NUM_CACHE_MANAGERS);
if (use_reusable_pool) {
- if (domain == RADEON_DOMAIN_VRAM) {
- if (flags & RADEON_FLAG_GTT_WC)
- provider = ws->cman_vram_gtt_wc;
- else
- provider = ws->cman_vram;
- } else if (flags & RADEON_FLAG_GTT_WC) {
- provider = ws->cman_gtt_wc;
- } else {
- provider = ws->cman_gtt;
- }
+ if (domain == RADEON_DOMAIN_VRAM)
+ provider = ws->cman_vram[flags];
+ else
+ provider = ws->cman_gtt[flags];
} else {
provider = ws->kman;
}
static void radeon_winsys_destroy(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
+ int i;
if (ws->thread) {
ws->kill_thread = 1;
pipe_mutex_destroy(ws->cmask_owner_mutex);
pipe_mutex_destroy(ws->cs_stack_lock);
- ws->cman_vram->destroy(ws->cman_vram);
- ws->cman_vram_gtt_wc->destroy(ws->cman_vram_gtt_wc);
- ws->cman_gtt->destroy(ws->cman_gtt);
- ws->cman_gtt_wc->destroy(ws->cman_gtt_wc);
+ for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
+ ws->cman_gtt[i]->destroy(ws->cman_gtt[i]);
+ ws->cman_vram[i]->destroy(ws->cman_vram[i]);
+ }
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
+ int i;
pipe_mutex_lock(fd_tab_mutex);
if (!fd_tab) {
ws->kman = radeon_bomgr_create(ws);
if (!ws->kman)
goto fail;
- ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
- ws->info.vram_size / 8);
- if (!ws->cman_vram)
- goto fail;
- ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+
+ for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
+ ws->cman_vram[i] = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
ws->info.vram_size / 8);
- if (!ws->cman_vram_gtt_wc)
- goto fail;
- ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
- ws->info.gart_size / 8);
- if (!ws->cman_gtt)
- goto fail;
- ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
- ws->info.gart_size / 8);
- if (!ws->cman_gtt_wc)
- goto fail;
+ if (!ws->cman_vram[i])
+ goto fail;
+
+ ws->cman_gtt[i] = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ ws->info.gart_size / 8);
+ if (!ws->cman_gtt[i])
+ goto fail;
+ }
if (ws->gen >= DRV_R600) {
ws->surf_man = radeon_surface_manager_new(fd);
fail:
pipe_mutex_unlock(fd_tab_mutex);
- if (ws->cman_gtt)
- ws->cman_gtt->destroy(ws->cman_gtt);
- if (ws->cman_gtt_wc)
- ws->cman_gtt_wc->destroy(ws->cman_gtt_wc);
- if (ws->cman_vram)
- ws->cman_vram->destroy(ws->cman_vram);
- if (ws->cman_vram_gtt_wc)
- ws->cman_vram_gtt_wc->destroy(ws->cman_vram_gtt_wc);
+ for (i = 0; i < RADEON_NUM_CACHE_MANAGERS; i++) {
+ if (ws->cman_gtt[i])
+ ws->cman_gtt[i]->destroy(ws->cman_gtt[i]);
+ if (ws->cman_vram[i])
+ ws->cman_vram[i]->destroy(ws->cman_vram[i]);
+ }
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
DRV_SI
};
+#define RADEON_NUM_CACHE_MANAGERS 8
+
struct radeon_drm_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
uint32_t accel_working2;
struct pb_manager *kman;
- struct pb_manager *cman_vram;
- struct pb_manager *cman_vram_gtt_wc;
- struct pb_manager *cman_gtt;
- struct pb_manager *cman_gtt_wc;
+ struct pb_manager *cman_vram[RADEON_NUM_CACHE_MANAGERS];
+ struct pb_manager *cman_gtt[RADEON_NUM_CACHE_MANAGERS];
struct radeon_surface_manager *surf_man;
uint32_t num_cpus; /* Number of CPUs. */