From cb446dc0fa5c68f681108f4613560543aa4cf553 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Michel=20D=C3=A4nzer?= Date: Fri, 28 Jun 2019 16:06:23 +0200 Subject: [PATCH] winsys/amdgpu: Add amdgpu_screen_winsys MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit It extends pipe_screen / radeon_winsys and references amdgpu_winsys. Multiple amdgpu_screen_winsys instances may reference the same amdgpu_winsys instance, which corresponds to an amdgpu_device_handle. The purpose of amdgpu_screen_winsys is to keep a duplicate of the DRM file descriptor passed to amdgpu_winsys_create, which will be needed in the next change. v2: * Add comment in amdgpu_winsys_unref explaining why it always returns true (Marek Olšák) Reviewed-by: Marek Olšák Tested-by: Pierre-Eric Pelloux-Prayer --- src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 36 +-- src/gallium/winsys/amdgpu/drm/amdgpu_bo.h | 10 +- src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 36 +-- src/gallium/winsys/amdgpu/drm/amdgpu_cs.h | 2 +- .../winsys/amdgpu/drm/amdgpu_surface.c | 2 +- src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c | 222 ++++++++++-------- src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h | 17 +- 7 files changed, 183 insertions(+), 142 deletions(-) diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c index 37098ab305f..f1a6cc41af7 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c @@ -50,12 +50,6 @@ struct amdgpu_sparse_backing_chunk { uint32_t begin, end; }; -static struct pb_buffer * -amdgpu_bo_create(struct radeon_winsys *rws, - uint64_t size, - unsigned alignment, - enum radeon_bo_domain domain, - enum radeon_bo_flag flags); static void amdgpu_bo_unmap(struct pb_buffer *buf); static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout, @@ -251,9 +245,9 @@ static bool amdgpu_bo_do_map(struct amdgpu_winsys_bo *bo, void **cpu) return true; } -static void *amdgpu_bo_map(struct pb_buffer *buf, - struct radeon_cmdbuf *rcs, - enum pipe_transfer_usage usage) +void *amdgpu_bo_map(struct pb_buffer *buf, + struct radeon_cmdbuf *rcs, + enum pipe_transfer_usage usage) { struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf; struct amdgpu_winsys_bo *real; @@ -658,7 +652,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, } assert(slab_size != 0); - slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base, + slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws, slab_size, slab_size, domains, flags)); if (!slab->buffer) @@ -833,7 +827,7 @@ sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_ bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE); size = MAX2(size, RADEON_SPARSE_PAGE_SIZE); - buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE, + buf = amdgpu_bo_create(bo->ws, size, RADEON_SPARSE_PAGE_SIZE, bo->initial_domain, bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC); if (!buf) { @@ -1298,14 +1292,13 @@ static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf, amdgpu_bo_set_metadata(bo->bo, &metadata); } -static struct pb_buffer * -amdgpu_bo_create(struct radeon_winsys *rws, +struct pb_buffer * +amdgpu_bo_create(struct amdgpu_winsys *ws, uint64_t size, unsigned alignment, enum radeon_bo_domain domain, enum radeon_bo_flag flags) { - struct amdgpu_winsys *ws = amdgpu_winsys(rws); struct amdgpu_winsys_bo *bo; int heap = -1; @@ -1402,6 +1395,17 @@ no_slab: return &bo->base; } +static struct pb_buffer * +amdgpu_buffer_create(struct radeon_winsys *ws, + uint64_t size, + unsigned alignment, + enum radeon_bo_domain domain, + enum radeon_bo_flag flags) +{ + return amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain, + flags); +} + static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws, struct winsys_handle *whandle, unsigned vm_alignment, @@ -1645,14 +1649,14 @@ static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf) return ((struct amdgpu_winsys_bo*)buf)->va; } -void amdgpu_bo_init_functions(struct amdgpu_winsys *ws) +void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws) { ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata; ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata; ws->base.buffer_map = amdgpu_bo_map; ws->base.buffer_unmap = amdgpu_bo_unmap; ws->base.buffer_wait = amdgpu_bo_wait; - ws->base.buffer_create = amdgpu_bo_create; + ws->base.buffer_create = amdgpu_buffer_create; ws->base.buffer_from_handle = amdgpu_bo_from_handle; ws->base.buffer_from_ptr = amdgpu_bo_from_ptr; ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr; diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h index 88f4241327d..18ef7e1fa51 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.h @@ -125,8 +125,16 @@ struct amdgpu_slab { }; bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf); +struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws, + uint64_t size, + unsigned alignment, + enum radeon_bo_domain domain, + enum radeon_bo_flag flags); void amdgpu_bo_destroy(struct pb_buffer *_buf); -void amdgpu_bo_init_functions(struct amdgpu_winsys *ws); +void *amdgpu_bo_map(struct pb_buffer *buf, + struct radeon_cmdbuf *rcs, + enum pipe_transfer_usage usage); +void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws); bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry); struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c index 024d6131621..976ec7770f0 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c @@ -672,7 +672,8 @@ static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs, return index; } -static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib, +static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, + struct amdgpu_ib *ib, enum ring_type ring_type) { struct pb_buffer *pb; @@ -696,18 +697,18 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib, buffer_size = MIN2(buffer_size, max_size); buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */ - pb = ws->base.buffer_create(&ws->base, buffer_size, - ws->info.gart_page_size, - RADEON_DOMAIN_GTT, - RADEON_FLAG_NO_INTERPROCESS_SHARING | - (ring_type == RING_GFX || - ring_type == RING_COMPUTE || - ring_type == RING_DMA ? - RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0)); + pb = amdgpu_bo_create(ws, buffer_size, + ws->info.gart_page_size, + RADEON_DOMAIN_GTT, + RADEON_FLAG_NO_INTERPROCESS_SHARING | + (ring_type == RING_GFX || + ring_type == RING_COMPUTE || + ring_type == RING_DMA ? + RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0)); if (!pb) return false; - mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE); + mapped = amdgpu_bo_map(pb, NULL, PIPE_TRANSFER_WRITE); if (!mapped) { pb_reference(&pb, NULL); return false; @@ -740,10 +741,9 @@ static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type) } } -static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs, +static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws, struct amdgpu_cs *cs, enum ib_type ib_type) { - struct amdgpu_winsys *aws = amdgpu_winsys(ws); /* Small IBs are better than big IBs, because the GPU goes idle quicker * and there is less waiting for buffers and fences. Proof: * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1 @@ -785,7 +785,7 @@ static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs, /* Allocate a new buffer for IBs if the current buffer is all used. */ if (!ib->big_ib_buffer || ib->used_ib_space + ib_size > ib->big_ib_buffer->size) { - if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type)) + if (!amdgpu_ib_new_buffer(ws, ib, cs->ring_type)) return false; } @@ -987,7 +987,7 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx, cs->csc = &cs->csc1; cs->cst = &cs->csc2; - if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) { + if (!amdgpu_get_new_ib(ctx->ws, cs, IB_MAIN)) { amdgpu_destroy_cs_context(&cs->csc2); amdgpu_destroy_cs_context(&cs->csc1); FREE(cs); @@ -1013,7 +1013,7 @@ amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf *ib, return NULL; /* Allocate the compute IB. */ - if (!amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE)) + if (!amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE)) return NULL; if (uses_gds_ordered_append) { @@ -1768,9 +1768,9 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs, amdgpu_cs_context_cleanup(cs->csc); } - amdgpu_get_new_ib(&ws->base, cs, IB_MAIN); + amdgpu_get_new_ib(ws, cs, IB_MAIN); if (cs->compute_ib.ib_mapped) - amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE); + amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE); cs->main.base.used_gart = 0; cs->main.base.used_vram = 0; @@ -1810,7 +1810,7 @@ static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs, return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage); } -void amdgpu_cs_init_functions(struct amdgpu_winsys *ws) +void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws) { ws->base.ctx_create = amdgpu_ctx_create; ws->base.ctx_destroy = amdgpu_ctx_destroy; diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h index 56788cdc27e..507859c2d59 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h @@ -276,7 +276,7 @@ void amdgpu_add_fences(struct amdgpu_winsys_bo *bo, unsigned num_fences, struct pipe_fence_handle **fences); void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs); -void amdgpu_cs_init_functions(struct amdgpu_winsys *ws); +void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws); void amdgpu_cs_submit_ib(void *job, int thread_index); #endif diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c b/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c index aba365f0f49..1dff1b3447e 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_surface.c @@ -104,7 +104,7 @@ static int amdgpu_surface_init(struct radeon_winsys *rws, return ac_compute_surface(ws->addrlib, &ws->info, &config, mode, surf); } -void amdgpu_surface_init_functions(struct amdgpu_winsys *ws) +void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws) { ws->base.surface_init = amdgpu_surface_init; } diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c index cf1f79c0ec2..562759a9210 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "amd/common/ac_llvm_util.h" #include "amd/common/sid.h" @@ -119,14 +120,6 @@ fail: static void do_winsys_deinit(struct amdgpu_winsys *ws) { - AddrDestroy(ws->addrlib); - amdgpu_device_deinitialize(ws->dev); -} - -static void amdgpu_winsys_destroy(struct radeon_winsys *rws) -{ - struct amdgpu_winsys *ws = amdgpu_winsys(rws); - if (ws->reserve_vmid) amdgpu_vm_unreserve_vmid(ws->dev, 0); @@ -142,7 +135,41 @@ static void amdgpu_winsys_destroy(struct radeon_winsys *rws) util_hash_table_destroy(ws->bo_export_table); simple_mtx_destroy(&ws->global_bo_list_lock); simple_mtx_destroy(&ws->bo_export_table_lock); - do_winsys_deinit(ws); + + AddrDestroy(ws->addrlib); + amdgpu_device_deinitialize(ws->dev); + FREE(ws); +} + +static void amdgpu_winsys_destroy(struct radeon_winsys *rws) +{ + struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws); + struct amdgpu_winsys *ws = sws->aws; + bool destroy; + + /* When the reference counter drops to zero, remove the device pointer + * from the table. + * This must happen while the mutex is locked, so that + * amdgpu_winsys_create in another thread doesn't get the winsys + * from the table when the counter drops to 0. + */ + simple_mtx_lock(&dev_tab_mutex); + + destroy = pipe_reference(&ws->reference, NULL); + if (destroy && dev_tab) { + util_hash_table_remove(dev_tab, ws->dev); + if (util_hash_table_count(dev_tab) == 0) { + util_hash_table_destroy(dev_tab); + dev_tab = NULL; + } + } + + simple_mtx_unlock(&dev_tab_mutex); + + if (destroy) + do_winsys_deinit(ws); + + close(sws->fd); FREE(rws); } @@ -246,27 +273,11 @@ static int compare_pointers(void *key1, void *key2) static bool amdgpu_winsys_unref(struct radeon_winsys *rws) { - struct amdgpu_winsys *ws = amdgpu_winsys(rws); - bool destroy; - - /* When the reference counter drops to zero, remove the device pointer - * from the table. - * This must happen while the mutex is locked, so that - * amdgpu_winsys_create in another thread doesn't get the winsys - * from the table when the counter drops to 0. */ - simple_mtx_lock(&dev_tab_mutex); - - destroy = pipe_reference(&ws->reference, NULL); - if (destroy && dev_tab) { - util_hash_table_remove(dev_tab, ws->dev); - if (util_hash_table_count(dev_tab) == 0) { - util_hash_table_destroy(dev_tab); - dev_tab = NULL; - } - } - - simple_mtx_unlock(&dev_tab_mutex); - return destroy; + /* radeon_winsys corresponds to amdgpu_screen_winsys, which is never + * referenced multiple times, so amdgpu_winsys_destroy always needs to be + * called. It handles reference counting for amdgpu_winsys. + */ + return true; } static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws, @@ -282,10 +293,17 @@ PUBLIC struct radeon_winsys * amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, radeon_screen_create_t screen_create) { - struct amdgpu_winsys *ws; + struct amdgpu_screen_winsys *ws; + struct amdgpu_winsys *aws; amdgpu_device_handle dev; uint32_t drm_major, drm_minor, r; + ws = CALLOC_STRUCT(amdgpu_screen_winsys); + if (!ws) + return NULL; + + ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0); + /* Look up the winsys from the dev table. */ simple_mtx_lock(&dev_tab_mutex); if (!dev_tab) @@ -295,15 +313,14 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, * for the same fd. */ r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev); if (r) { - simple_mtx_unlock(&dev_tab_mutex); fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n"); - return NULL; + goto fail; } /* Lookup a winsys if we have already created one for this device. */ - ws = util_hash_table_get(dev_tab, dev); - if (ws) { - pipe_reference(NULL, &ws->reference); + aws = util_hash_table_get(dev_tab, dev); + if (aws) { + pipe_reference(NULL, &aws->reference); simple_mtx_unlock(&dev_tab_mutex); /* Release the device handle, because we don't need it anymore. @@ -311,57 +328,83 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, * has its own device handle. */ amdgpu_device_deinitialize(dev); - return &ws->base; - } + } else { + /* Create a new winsys. */ + aws = CALLOC_STRUCT(amdgpu_winsys); + if (!aws) + goto fail; + + aws->dev = dev; + aws->info.drm_major = drm_major; + aws->info.drm_minor = drm_minor; + + if (!do_winsys_init(aws, config, fd)) + goto fail_alloc; + + /* Create managers. */ + pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS, + 500000, aws->check_vm ? 1.0f : 2.0f, 0, + (aws->info.vram_size + aws->info.gart_size) / 8, + amdgpu_bo_destroy, amdgpu_bo_can_reclaim); + + unsigned min_slab_order = 9; /* 512 bytes */ + unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */ + unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) / + NUM_SLAB_ALLOCATORS; + + /* Divide the size order range among slab managers. */ + for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) { + unsigned min_order = min_slab_order; + unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator, + max_slab_order); + + if (!pb_slabs_init(&aws->bo_slabs[i], + min_order, max_order, + RADEON_MAX_SLAB_HEAPS, + aws, + amdgpu_bo_can_reclaim_slab, + amdgpu_bo_slab_alloc, + amdgpu_bo_slab_free)) { + amdgpu_winsys_destroy(&ws->base); + simple_mtx_unlock(&dev_tab_mutex); + return NULL; + } - /* Create a new winsys. */ - ws = CALLOC_STRUCT(amdgpu_winsys); - if (!ws) - goto fail; + min_slab_order = max_order + 1; + } - ws->dev = dev; - ws->info.drm_major = drm_major; - ws->info.drm_minor = drm_minor; + aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order; - if (!do_winsys_init(ws, config, fd)) - goto fail_alloc; + /* init reference */ + pipe_reference_init(&aws->reference, 1); - /* Create managers. */ - pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS, - 500000, ws->check_vm ? 1.0f : 2.0f, 0, - (ws->info.vram_size + ws->info.gart_size) / 8, - amdgpu_bo_destroy, amdgpu_bo_can_reclaim); + LIST_INITHEAD(&aws->global_bo_list); + aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers); - unsigned min_slab_order = 9; /* 512 bytes */ - unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */ - unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) / - NUM_SLAB_ALLOCATORS; + (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain); + (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain); + (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain); - /* Divide the size order range among slab managers. */ - for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) { - unsigned min_order = min_slab_order; - unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator, - max_slab_order); - - if (!pb_slabs_init(&ws->bo_slabs[i], - min_order, max_order, - RADEON_MAX_SLAB_HEAPS, - ws, - amdgpu_bo_can_reclaim_slab, - amdgpu_bo_slab_alloc, - amdgpu_bo_slab_free)) { + if (!util_queue_init(&aws->cs_queue, "cs", 8, 1, + UTIL_QUEUE_INIT_RESIZE_IF_FULL)) { amdgpu_winsys_destroy(&ws->base); simple_mtx_unlock(&dev_tab_mutex); return NULL; } - min_slab_order = max_order + 1; - } + util_hash_table_set(dev_tab, dev, aws); - ws->info.min_alloc_size = 1 << ws->bo_slabs[0].min_order; + if (aws->reserve_vmid) { + r = amdgpu_vm_reserve_vmid(dev, 0); + if (r) { + amdgpu_winsys_destroy(&ws->base); + simple_mtx_unlock(&dev_tab_mutex); + return NULL; + } + } + } - /* init reference */ - pipe_reference_init(&ws->reference, 1); + ws->aws = aws; /* Set functions. */ ws->base.unref = amdgpu_winsys_unref; @@ -376,20 +419,6 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, amdgpu_cs_init_functions(ws); amdgpu_surface_init_functions(ws); - LIST_INITHEAD(&ws->global_bo_list); - ws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers); - - (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain); - (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain); - (void) simple_mtx_init(&ws->bo_export_table_lock, mtx_plain); - - if (!util_queue_init(&ws->cs_queue, "cs", 8, 1, - UTIL_QUEUE_INIT_RESIZE_IF_FULL)) { - amdgpu_winsys_destroy(&ws->base); - simple_mtx_unlock(&dev_tab_mutex); - return NULL; - } - /* Create the screen at the end. The winsys must be initialized * completely. * @@ -402,16 +431,6 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, return NULL; } - util_hash_table_set(dev_tab, dev, ws); - - if (ws->reserve_vmid) { - r = amdgpu_vm_reserve_vmid(dev, 0); - if (r) { - fprintf(stderr, "amdgpu: amdgpu_vm_reserve_vmid failed. (%i)\n", r); - goto fail_cache; - } - } - /* We must unlock the mutex once the winsys is fully initialized, so that * other threads attempting to create the winsys from the same fd will * get a fully initialized winsys and not just half-way initialized. */ @@ -419,12 +438,11 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config, return &ws->base; -fail_cache: - pb_cache_deinit(&ws->bo_cache); - do_winsys_deinit(ws); fail_alloc: - FREE(ws); + FREE(aws); fail: + close(ws->fd); + FREE(ws); simple_mtx_unlock(&dev_tab_mutex); return NULL; } diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h index 4f0b1262e30..1ff16072829 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h @@ -41,7 +41,6 @@ struct amdgpu_cs; #define NUM_SLAB_ALLOCATORS 3 struct amdgpu_winsys { - struct radeon_winsys base; struct pipe_reference reference; struct pb_cache bo_cache; @@ -94,12 +93,24 @@ struct amdgpu_winsys { simple_mtx_t bo_export_table_lock; }; +struct amdgpu_screen_winsys { + struct radeon_winsys base; + struct amdgpu_winsys *aws; + int fd; +}; + +static inline struct amdgpu_screen_winsys * +amdgpu_screen_winsys(struct radeon_winsys *base) +{ + return (struct amdgpu_screen_winsys*)base; +} + static inline struct amdgpu_winsys * amdgpu_winsys(struct radeon_winsys *base) { - return (struct amdgpu_winsys*)base; + return amdgpu_screen_winsys(base)->aws; } -void amdgpu_surface_init_functions(struct amdgpu_winsys *ws); +void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws); #endif -- 2.30.2