X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fwinsys%2Famdgpu%2Fdrm%2Famdgpu_bo.c;h=3ee38b8a79f8a19f5b11d142d1d35570d60912fd;hb=82aa07f81fcc5ed696eea16f48cec7e39c3cd3d1;hp=27652110f03588aad920269c0624e7a10104cbf6;hpb=529cdce799f30606ee857599f34de3ec9014dc09;p=mesa.git diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c index 27652110f03..3ee38b8a79f 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c @@ -27,7 +27,8 @@ #include "amdgpu_cs.h" -#include "os/os_time.h" +#include "util/os_time.h" +#include "util/u_hash_table.h" #include "state_tracker/drm_driver.h" #include #include @@ -38,6 +39,10 @@ #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) #endif +#ifndef AMDGPU_VA_RANGE_HIGH +#define AMDGPU_VA_RANGE_HIGH 0x2 +#endif + /* Set to 1 for verbose output showing committed sparse buffer ranges. */ #define DEBUG_SPARSE_COMMITS 0 @@ -90,7 +95,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout, unsigned idle_fences; bool buffer_idle; - mtx_lock(&ws->bo_fence_lock); + simple_mtx_lock(&ws->bo_fence_lock); for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) { if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false)) @@ -106,13 +111,13 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout, bo->num_fences -= idle_fences; buffer_idle = !bo->num_fences; - mtx_unlock(&ws->bo_fence_lock); + simple_mtx_unlock(&ws->bo_fence_lock); return buffer_idle; } else { bool buffer_idle = true; - mtx_lock(&ws->bo_fence_lock); + simple_mtx_lock(&ws->bo_fence_lock); while (bo->num_fences && buffer_idle) { struct pipe_fence_handle *fence = NULL; bool fence_idle = false; @@ -120,12 +125,12 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout, amdgpu_fence_reference(&fence, bo->fences[0]); /* Wait for the fence. */ - mtx_unlock(&ws->bo_fence_lock); + simple_mtx_unlock(&ws->bo_fence_lock); if (amdgpu_fence_wait(fence, abs_timeout, true)) fence_idle = true; else buffer_idle = false; - mtx_lock(&ws->bo_fence_lock); + simple_mtx_lock(&ws->bo_fence_lock); /* Release an idle fence to avoid checking it again later, keeping in * mind that the fence array may have been modified by other threads. @@ -139,7 +144,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout, amdgpu_fence_reference(&fence, NULL); } - mtx_unlock(&ws->bo_fence_lock); + simple_mtx_unlock(&ws->bo_fence_lock); return buffer_idle; } @@ -164,16 +169,21 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo) void amdgpu_bo_destroy(struct pb_buffer *_buf) { struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf); + struct amdgpu_winsys *ws = bo->ws; assert(bo->bo && "must not be called for slab entries"); - if (bo->ws->debug_all_bos) { - mtx_lock(&bo->ws->global_bo_list_lock); + if (ws->debug_all_bos) { + simple_mtx_lock(&ws->global_bo_list_lock); LIST_DEL(&bo->u.real.global_list_item); - bo->ws->num_buffers--; - mtx_unlock(&bo->ws->global_bo_list_lock); + ws->num_buffers--; + simple_mtx_unlock(&ws->global_bo_list_lock); } + simple_mtx_lock(&ws->bo_export_table_lock); + util_hash_table_remove(ws->bo_export_table, bo->bo); + simple_mtx_unlock(&ws->bo_export_table_lock); + amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP); amdgpu_va_range_free(bo->u.real.va_handle); amdgpu_bo_free(bo->bo); @@ -181,16 +191,16 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf) amdgpu_bo_remove_fences(bo); if (bo->initial_domain & RADEON_DOMAIN_VRAM) - bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size); + ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size); else if (bo->initial_domain & RADEON_DOMAIN_GTT) - bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size); + ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size); if (bo->u.real.map_count >= 1) { if (bo->initial_domain & RADEON_DOMAIN_VRAM) - bo->ws->mapped_vram -= bo->base.size; + ws->mapped_vram -= bo->base.size; else if (bo->initial_domain & RADEON_DOMAIN_GTT) - bo->ws->mapped_gtt -= bo->base.size; - bo->ws->num_mapped_buffers--; + ws->mapped_gtt -= bo->base.size; + ws->num_mapped_buffers--; } FREE(bo); @@ -209,7 +219,7 @@ static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf) } static void *amdgpu_bo_map(struct pb_buffer *buf, - struct radeon_winsys_cs *rcs, + struct radeon_cmdbuf *rcs, enum pipe_transfer_usage usage) { struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf; @@ -235,7 +245,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf, * Only check whether the buffer is being used for write. */ if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, RADEON_USAGE_WRITE)) { - cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL); + cs->flush_cs(cs->flush_data, + RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); return NULL; } @@ -245,7 +256,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf, } } else { if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) { - cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL); + cs->flush_cs(cs->flush_data, + RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); return NULL; } @@ -268,7 +280,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf, if (cs) { if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, RADEON_USAGE_WRITE)) { - cs->flush_cs(cs->flush_data, 0, NULL); + cs->flush_cs(cs->flush_data, + RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL); } else { /* Try to avoid busy-waiting in amdgpu_bo_wait. */ if (p_atomic_read(&bo->num_active_ioctls)) @@ -282,7 +295,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf, /* Mapping for write. */ if (cs) { if (amdgpu_bo_is_referenced_by_cs(cs, bo)) { - cs->flush_cs(cs->flush_data, 0, NULL); + cs->flush_cs(cs->flush_data, + RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL); } else { /* Try to avoid busy-waiting in amdgpu_bo_wait. */ if (p_atomic_read(&bo->num_active_ioctls)) @@ -363,20 +377,19 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo) assert(bo->bo); if (ws->debug_all_bos) { - mtx_lock(&ws->global_bo_list_lock); + simple_mtx_lock(&ws->global_bo_list_lock); LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list); ws->num_buffers++; - mtx_unlock(&ws->global_bo_list_lock); + simple_mtx_unlock(&ws->global_bo_list_lock); } } static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, uint64_t size, unsigned alignment, - unsigned usage, enum radeon_bo_domain initial_domain, unsigned flags, - unsigned pb_cache_bucket) + int heap) { struct amdgpu_bo_alloc_request request = {0}; amdgpu_bo_handle buf_handle; @@ -386,14 +399,18 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, unsigned va_gap_size; int r; - assert(initial_domain & RADEON_DOMAIN_VRAM_GTT); + /* VRAM or GTT must be specified, but not both at the same time. */ + assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1); + bo = CALLOC_STRUCT(amdgpu_winsys_bo); if (!bo) { return NULL; } - pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base, - pb_cache_bucket); + if (heap >= 0) { + pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base, + heap); + } request.alloc_size = size; request.phys_alignment = alignment; @@ -402,13 +419,24 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, if (initial_domain & RADEON_DOMAIN_GTT) request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT; + /* Since VRAM and GTT have almost the same performance on APUs, we could + * just set GTT. However, in order to decrease GTT(RAM) usage, which is + * shared with the OS, allow VRAM placements too. The idea is not to use + * VRAM usefully, but to use it so that it's not unused and wasted. + */ + if (!ws->info.has_dedicated_vram) + request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT; + if (flags & RADEON_FLAG_NO_CPU_ACCESS) request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; if (flags & RADEON_FLAG_GTT_WC) request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC; if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING && - ws->info.drm_minor >= 20) + ws->info.has_local_buffers) request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID; + if (ws->zero_all_vram_allocs && + (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)) + request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle); if (r) { @@ -423,17 +451,26 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, if (size > ws->info.pte_fragment_size) alignment = MAX2(alignment, ws->info.pte_fragment_size); r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, - size + va_gap_size, alignment, 0, &va, &va_handle, 0); + size + va_gap_size, alignment, 0, &va, &va_handle, + (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) | + AMDGPU_VA_RANGE_HIGH); if (r) goto error_va_alloc; - r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP); + unsigned vm_flags = AMDGPU_VM_PAGE_READABLE | + AMDGPU_VM_PAGE_EXECUTABLE; + + if (!(flags & RADEON_FLAG_READ_ONLY)) + vm_flags |= AMDGPU_VM_PAGE_WRITEABLE; + + r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags, + AMDGPU_VA_OP_MAP); if (r) goto error_va_map; pipe_reference_init(&bo->base.reference, 1); bo->base.alignment = alignment; - bo->base.usage = usage; + bo->base.usage = 0; bo->base.size = size; bo->base.vtbl = &amdgpu_winsys_bo_vtbl; bo->ws = ws; @@ -449,6 +486,8 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws, else if (initial_domain & RADEON_DOMAIN_GTT) ws->allocated_gtt += align64(size, ws->info.gart_page_size); + amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle); + amdgpu_add_buffer_to_global_list(bo); return bo; @@ -722,9 +761,9 @@ sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo, bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE; - mtx_lock(&ws->bo_fence_lock); + simple_mtx_lock(&ws->bo_fence_lock); amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences); - mtx_unlock(&ws->bo_fence_lock); + simple_mtx_unlock(&ws->bo_fence_lock); list_del(&backing->list); amdgpu_winsys_bo_reference(&backing->bo, NULL); @@ -819,7 +858,7 @@ static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf) } amdgpu_va_range_free(bo->u.sparse.va_handle); - mtx_destroy(&bo->u.sparse.commit_lock); + simple_mtx_destroy(&bo->u.sparse.commit_lock); FREE(bo->u.sparse.commitments); FREE(bo); } @@ -866,7 +905,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size, if (!bo->u.sparse.commitments) goto error_alloc_commitments; - mtx_init(&bo->u.sparse.commit_lock, mtx_plain); + simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain); LIST_INITHEAD(&bo->u.sparse.backing); /* For simplicity, we always map a multiple of the page size. */ @@ -874,7 +913,8 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size, va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0; r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE, - 0, &bo->va, &bo->u.sparse.va_handle, 0); + 0, &bo->va, &bo->u.sparse.va_handle, + AMDGPU_VA_RANGE_HIGH); if (r) goto error_va_alloc; @@ -888,7 +928,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size, error_va_map: amdgpu_va_range_free(bo->u.sparse.va_handle); error_va_alloc: - mtx_destroy(&bo->u.sparse.commit_lock); + simple_mtx_destroy(&bo->u.sparse.commit_lock); FREE(bo->u.sparse.commitments); error_alloc_commitments: FREE(bo); @@ -915,7 +955,7 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size, va_page = offset / RADEON_SPARSE_PAGE_SIZE; end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE); - mtx_lock(&bo->u.sparse.commit_lock); + simple_mtx_lock(&bo->u.sparse.commit_lock); #if DEBUG_SPARSE_COMMITS sparse_dump(bo, __func__); @@ -1019,7 +1059,7 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size, } out: - mtx_unlock(&bo->u.sparse.commit_lock); + simple_mtx_unlock(&bo->u.sparse.commit_lock); return ok; } @@ -1142,7 +1182,7 @@ amdgpu_bo_create(struct radeon_winsys *rws, { struct amdgpu_winsys *ws = amdgpu_winsys(rws); struct amdgpu_winsys_bo *bo; - unsigned usage = 0, pb_cache_bucket = 0; + int heap = -1; /* VRAM implies WC. This is not optional. */ assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC); @@ -1150,6 +1190,9 @@ amdgpu_bo_create(struct radeon_winsys *rws, /* NO_CPU_ACCESS is valid with VRAM only. */ assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS)); + /* Sparse buffers must have NO_CPU_ACCESS set. */ + assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS); + /* Sub-allocate small buffers from slabs. */ if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) && size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) && @@ -1182,8 +1225,6 @@ no_slab: if (flags & RADEON_FLAG_SPARSE) { assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0); - flags |= RADEON_FLAG_NO_CPU_ACCESS; - return amdgpu_bo_sparse_create(ws, size, domain, flags); } @@ -1200,30 +1241,23 @@ no_slab: bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING; if (use_reusable_pool) { - int heap = radeon_get_heap_index(domain, flags); + heap = radeon_get_heap_index(domain, flags); assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS); - usage = 1 << heap; /* Only set one usage bit for each heap. */ - - pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap); - assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets)); /* Get a buffer from the cache. */ bo = (struct amdgpu_winsys_bo*) - pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage, - pb_cache_bucket); + pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap); if (bo) return &bo->base; } /* Create a new one. */ - bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags, - pb_cache_bucket); + bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap); if (!bo) { /* Clear the cache and try again. */ pb_slabs_reclaim(&ws->bo_slabs); pb_cache_release_all_buffers(&ws->bo_cache); - bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags, - pb_cache_bucket); + bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap); if (!bo) return NULL; } @@ -1238,56 +1272,78 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws, unsigned *offset) { struct amdgpu_winsys *ws = amdgpu_winsys(rws); - struct amdgpu_winsys_bo *bo; + struct amdgpu_winsys_bo *bo = NULL; enum amdgpu_bo_handle_type type; struct amdgpu_bo_import_result result = {0}; uint64_t va; - amdgpu_va_handle va_handle; + amdgpu_va_handle va_handle = NULL; struct amdgpu_bo_info info = {0}; enum radeon_bo_domain initial = 0; int r; - /* Initialize the structure. */ - bo = CALLOC_STRUCT(amdgpu_winsys_bo); - if (!bo) { - return NULL; - } - switch (whandle->type) { - case DRM_API_HANDLE_TYPE_SHARED: + case WINSYS_HANDLE_TYPE_SHARED: type = amdgpu_bo_handle_type_gem_flink_name; break; - case DRM_API_HANDLE_TYPE_FD: + case WINSYS_HANDLE_TYPE_FD: type = amdgpu_bo_handle_type_dma_buf_fd; break; default: return NULL; } + if (stride) + *stride = whandle->stride; + if (offset) + *offset = whandle->offset; + r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result); if (r) - goto error; + return NULL; + + simple_mtx_lock(&ws->bo_export_table_lock); + bo = util_hash_table_get(ws->bo_export_table, result.buf_handle); + + /* If the amdgpu_winsys_bo instance already exists, bump the reference + * counter and return it. + */ + if (bo) { + p_atomic_inc(&bo->base.reference.count); + simple_mtx_unlock(&ws->bo_export_table_lock); + + /* Release the buffer handle, because we don't need it anymore. + * This function is returning an existing buffer, which has its own + * handle. + */ + amdgpu_bo_free(result.buf_handle); + return &bo->base; + } /* Get initial domains. */ r = amdgpu_bo_query_info(result.buf_handle, &info); if (r) - goto error_query; + goto error; r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, - result.alloc_size, 1 << 20, 0, &va, &va_handle, 0); + result.alloc_size, 1 << 20, 0, &va, &va_handle, + AMDGPU_VA_RANGE_HIGH); if (r) - goto error_query; + goto error; + + bo = CALLOC_STRUCT(amdgpu_winsys_bo); + if (!bo) + goto error; r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP); if (r) - goto error_va_map; + goto error; if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM) initial |= RADEON_DOMAIN_VRAM; if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT) initial |= RADEON_DOMAIN_GTT; - + /* Initialize the structure. */ pipe_reference_init(&bo->base.reference, 1); bo->base.alignment = info.phys_alignment; bo->bo = result.buf_handle; @@ -1300,28 +1356,27 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws, bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1); bo->is_shared = true; - if (stride) - *stride = whandle->stride; - if (offset) - *offset = whandle->offset; - if (bo->initial_domain & RADEON_DOMAIN_VRAM) ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size); else if (bo->initial_domain & RADEON_DOMAIN_GTT) ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size); - amdgpu_add_buffer_to_global_list(bo); + amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle); - return &bo->base; + amdgpu_add_buffer_to_global_list(bo); -error_va_map: - amdgpu_va_range_free(va_handle); + util_hash_table_set(ws->bo_export_table, bo->bo, bo); + simple_mtx_unlock(&ws->bo_export_table_lock); -error_query: - amdgpu_bo_free(result.buf_handle); + return &bo->base; error: - FREE(bo); + simple_mtx_unlock(&ws->bo_export_table_lock); + if (bo) + FREE(bo); + if (va_handle) + amdgpu_va_range_free(va_handle); + amdgpu_bo_free(result.buf_handle); return NULL; } @@ -1331,6 +1386,7 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer, struct winsys_handle *whandle) { struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer); + struct amdgpu_winsys *ws = bo->ws; enum amdgpu_bo_handle_type type; int r; @@ -1341,13 +1397,13 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer, bo->u.real.use_reusable_pool = false; switch (whandle->type) { - case DRM_API_HANDLE_TYPE_SHARED: + case WINSYS_HANDLE_TYPE_SHARED: type = amdgpu_bo_handle_type_gem_flink_name; break; - case DRM_API_HANDLE_TYPE_FD: + case WINSYS_HANDLE_TYPE_FD: type = amdgpu_bo_handle_type_dma_buf_fd; break; - case DRM_API_HANDLE_TYPE_KMS: + case WINSYS_HANDLE_TYPE_KMS: type = amdgpu_bo_handle_type_kms; break; default: @@ -1358,6 +1414,10 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer, if (r) return false; + simple_mtx_lock(&ws->bo_export_table_lock); + util_hash_table_set(ws->bo_export_table, bo->bo, bo); + simple_mtx_unlock(&ws->bo_export_table_lock); + whandle->stride = stride; whandle->offset = offset; whandle->offset += slice_size * whandle->layer; @@ -1373,19 +1433,23 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws, struct amdgpu_winsys_bo *bo; uint64_t va; amdgpu_va_handle va_handle; + /* Avoid failure when the size is not page aligned */ + uint64_t aligned_size = align64(size, ws->info.gart_page_size); bo = CALLOC_STRUCT(amdgpu_winsys_bo); if (!bo) return NULL; - if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle)) + if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, + aligned_size, &buf_handle)) goto error; if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, - size, 1 << 12, 0, &va, &va_handle, 0)) + aligned_size, 1 << 12, 0, &va, &va_handle, + AMDGPU_VA_RANGE_HIGH)) goto error_va_alloc; - if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) + if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP)) goto error_va_map; /* Initialize it. */ @@ -1401,10 +1465,12 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws, bo->initial_domain = RADEON_DOMAIN_GTT; bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1); - ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size); + ws->allocated_gtt += aligned_size; amdgpu_add_buffer_to_global_list(bo); + amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle); + return (struct pb_buffer*)bo; error_va_map: