#include "amdgpu_cs.h"
-#include "os/os_time.h"
+#include "util/os_time.h"
+#include "util/u_hash_table.h"
#include "state_tracker/drm_driver.h"
#include <amdgpu_drm.h>
#include <xf86drm.h>
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
#endif
+#ifndef AMDGPU_VA_RANGE_HIGH
+#define AMDGPU_VA_RANGE_HIGH 0x2
+#endif
+
/* Set to 1 for verbose output showing committed sparse buffer ranges. */
#define DEBUG_SPARSE_COMMITS 0
unsigned idle_fences;
bool buffer_idle;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
bo->num_fences -= idle_fences;
buffer_idle = !bo->num_fences;
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
} else {
bool buffer_idle = true;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
amdgpu_fence_reference(&fence, bo->fences[0]);
/* Wait for the fence. */
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
if (amdgpu_fence_wait(fence, abs_timeout, true))
fence_idle = true;
else
buffer_idle = false;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
amdgpu_fence_reference(&fence, NULL);
}
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
}
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
+ struct amdgpu_winsys *ws = bo->ws;
assert(bo->bo && "must not be called for slab entries");
- if (bo->ws->debug_all_bos) {
- mtx_lock(&bo->ws->global_bo_list_lock);
+ if (ws->debug_all_bos) {
+ simple_mtx_lock(&ws->global_bo_list_lock);
LIST_DEL(&bo->u.real.global_list_item);
- bo->ws->num_buffers--;
- mtx_unlock(&bo->ws->global_bo_list_lock);
+ ws->num_buffers--;
+ simple_mtx_unlock(&ws->global_bo_list_lock);
}
+ simple_mtx_lock(&ws->bo_export_table_lock);
+ util_hash_table_remove(ws->bo_export_table, bo->bo);
+ simple_mtx_unlock(&ws->bo_export_table_lock);
+
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
amdgpu_bo_free(bo->bo);
amdgpu_bo_remove_fences(bo);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
+ ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
+ ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size);
if (bo->u.real.map_count >= 1) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->mapped_vram -= bo->base.size;
+ ws->mapped_vram -= bo->base.size;
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->mapped_gtt -= bo->base.size;
- bo->ws->num_mapped_buffers--;
+ ws->mapped_gtt -= bo->base.size;
+ ws->num_mapped_buffers--;
}
FREE(bo);
}
static void *amdgpu_bo_map(struct pb_buffer *buf,
- struct radeon_winsys_cs *rcs,
+ struct radeon_cmdbuf *rcs,
enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
if (cs) {
if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
} else {
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
/* Mapping for write. */
if (cs) {
if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
} else {
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
assert(bo->bo);
if (ws->debug_all_bos) {
- mtx_lock(&ws->global_bo_list_lock);
+ simple_mtx_lock(&ws->global_bo_list_lock);
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
ws->num_buffers++;
- mtx_unlock(&ws->global_bo_list_lock);
+ simple_mtx_unlock(&ws->global_bo_list_lock);
}
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
- unsigned usage,
enum radeon_bo_domain initial_domain,
unsigned flags,
- unsigned pb_cache_bucket)
+ int heap)
{
struct amdgpu_bo_alloc_request request = {0};
amdgpu_bo_handle buf_handle;
unsigned va_gap_size;
int r;
- assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
+ /* VRAM or GTT must be specified, but not both at the same time. */
+ assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1);
+
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
- pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
- pb_cache_bucket);
+ if (heap >= 0) {
+ pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
+ heap);
+ }
request.alloc_size = size;
request.phys_alignment = alignment;
if (initial_domain & RADEON_DOMAIN_GTT)
request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
+ /* Since VRAM and GTT have almost the same performance on APUs, we could
+ * just set GTT. However, in order to decrease GTT(RAM) usage, which is
+ * shared with the OS, allow VRAM placements too. The idea is not to use
+ * VRAM usefully, but to use it so that it's not unused and wasted.
+ */
+ if (!ws->info.has_dedicated_vram)
+ request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
+
if (flags & RADEON_FLAG_NO_CPU_ACCESS)
request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (flags & RADEON_FLAG_GTT_WC)
request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
- ws->info.drm_minor >= 20)
+ ws->info.has_local_buffers)
request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
+ if (ws->zero_all_vram_allocs &&
+ (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
+ request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
if (size > ws->info.pte_fragment_size)
alignment = MAX2(alignment, ws->info.pte_fragment_size);
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
- size + va_gap_size, alignment, 0, &va, &va_handle, 0);
+ size + va_gap_size, alignment, 0, &va, &va_handle,
+ (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
+ AMDGPU_VA_RANGE_HIGH);
if (r)
goto error_va_alloc;
- r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
+ unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE;
+
+ if (!(flags & RADEON_FLAG_READ_ONLY))
+ vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+
+ r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
+ AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
- bo->base.usage = usage;
+ bo->base.usage = 0;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
else if (initial_domain & RADEON_DOMAIN_GTT)
ws->allocated_gtt += align64(size, ws->info.gart_page_size);
+ amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
+
amdgpu_add_buffer_to_global_list(bo);
return bo;
bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
list_del(&backing->list);
amdgpu_winsys_bo_reference(&backing->bo, NULL);
}
amdgpu_va_range_free(bo->u.sparse.va_handle);
- mtx_destroy(&bo->u.sparse.commit_lock);
+ simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
FREE(bo);
}
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
+ simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
LIST_INITHEAD(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
- 0, &bo->va, &bo->u.sparse.va_handle, 0);
+ 0, &bo->va, &bo->u.sparse.va_handle,
+ AMDGPU_VA_RANGE_HIGH);
if (r)
goto error_va_alloc;
error_va_map:
amdgpu_va_range_free(bo->u.sparse.va_handle);
error_va_alloc:
- mtx_destroy(&bo->u.sparse.commit_lock);
+ simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
error_alloc_commitments:
FREE(bo);
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
- mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->u.sparse.commit_lock);
#if DEBUG_SPARSE_COMMITS
sparse_dump(bo, __func__);
}
out:
- mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->u.sparse.commit_lock);
return ok;
}
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
- unsigned usage = 0, pb_cache_bucket = 0;
+ int heap = -1;
/* VRAM implies WC. This is not optional. */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
/* NO_CPU_ACCESS is valid with VRAM only. */
assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
+ /* Sparse buffers must have NO_CPU_ACCESS set. */
+ assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
+
/* Sub-allocate small buffers from slabs. */
if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
if (flags & RADEON_FLAG_SPARSE) {
assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
- flags |= RADEON_FLAG_NO_CPU_ACCESS;
-
return amdgpu_bo_sparse_create(ws, size, domain, flags);
}
bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
if (use_reusable_pool) {
- int heap = radeon_get_heap_index(domain, flags);
+ heap = radeon_get_heap_index(domain, flags);
assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
- usage = 1 << heap; /* Only set one usage bit for each heap. */
-
- pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
- assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
/* Get a buffer from the cache. */
bo = (struct amdgpu_winsys_bo*)
- pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
- pb_cache_bucket);
+ pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
if (bo)
return &bo->base;
}
/* Create a new one. */
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo)
return NULL;
}
unsigned *offset)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
- struct amdgpu_winsys_bo *bo;
+ struct amdgpu_winsys_bo *bo = NULL;
enum amdgpu_bo_handle_type type;
struct amdgpu_bo_import_result result = {0};
uint64_t va;
- amdgpu_va_handle va_handle;
+ amdgpu_va_handle va_handle = NULL;
struct amdgpu_bo_info info = {0};
enum radeon_bo_domain initial = 0;
int r;
- /* Initialize the structure. */
- bo = CALLOC_STRUCT(amdgpu_winsys_bo);
- if (!bo) {
- return NULL;
- }
-
switch (whandle->type) {
- case DRM_API_HANDLE_TYPE_SHARED:
+ case WINSYS_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
- case DRM_API_HANDLE_TYPE_FD:
+ case WINSYS_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
default:
return NULL;
}
+ if (stride)
+ *stride = whandle->stride;
+ if (offset)
+ *offset = whandle->offset;
+
r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
if (r)
- goto error;
+ return NULL;
+
+ simple_mtx_lock(&ws->bo_export_table_lock);
+ bo = util_hash_table_get(ws->bo_export_table, result.buf_handle);
+
+ /* If the amdgpu_winsys_bo instance already exists, bump the reference
+ * counter and return it.
+ */
+ if (bo) {
+ p_atomic_inc(&bo->base.reference.count);
+ simple_mtx_unlock(&ws->bo_export_table_lock);
+
+ /* Release the buffer handle, because we don't need it anymore.
+ * This function is returning an existing buffer, which has its own
+ * handle.
+ */
+ amdgpu_bo_free(result.buf_handle);
+ return &bo->base;
+ }
/* Get initial domains. */
r = amdgpu_bo_query_info(result.buf_handle, &info);
if (r)
- goto error_query;
+ goto error;
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
- result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
+ result.alloc_size, 1 << 20, 0, &va, &va_handle,
+ AMDGPU_VA_RANGE_HIGH);
if (r)
- goto error_query;
+ goto error;
+
+ bo = CALLOC_STRUCT(amdgpu_winsys_bo);
+ if (!bo)
+ goto error;
r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
if (r)
- goto error_va_map;
+ goto error;
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
initial |= RADEON_DOMAIN_VRAM;
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
initial |= RADEON_DOMAIN_GTT;
-
+ /* Initialize the structure. */
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->bo = result.buf_handle;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
bo->is_shared = true;
- if (stride)
- *stride = whandle->stride;
- if (offset)
- *offset = whandle->offset;
-
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
- amdgpu_add_buffer_to_global_list(bo);
+ amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
- return &bo->base;
+ amdgpu_add_buffer_to_global_list(bo);
-error_va_map:
- amdgpu_va_range_free(va_handle);
+ util_hash_table_set(ws->bo_export_table, bo->bo, bo);
+ simple_mtx_unlock(&ws->bo_export_table_lock);
-error_query:
- amdgpu_bo_free(result.buf_handle);
+ return &bo->base;
error:
- FREE(bo);
+ simple_mtx_unlock(&ws->bo_export_table_lock);
+ if (bo)
+ FREE(bo);
+ if (va_handle)
+ amdgpu_va_range_free(va_handle);
+ amdgpu_bo_free(result.buf_handle);
return NULL;
}
struct winsys_handle *whandle)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
+ struct amdgpu_winsys *ws = bo->ws;
enum amdgpu_bo_handle_type type;
int r;
bo->u.real.use_reusable_pool = false;
switch (whandle->type) {
- case DRM_API_HANDLE_TYPE_SHARED:
+ case WINSYS_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
- case DRM_API_HANDLE_TYPE_FD:
+ case WINSYS_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
- case DRM_API_HANDLE_TYPE_KMS:
+ case WINSYS_HANDLE_TYPE_KMS:
type = amdgpu_bo_handle_type_kms;
break;
default:
if (r)
return false;
+ simple_mtx_lock(&ws->bo_export_table_lock);
+ util_hash_table_set(ws->bo_export_table, bo->bo, bo);
+ simple_mtx_unlock(&ws->bo_export_table_lock);
+
whandle->stride = stride;
whandle->offset = offset;
whandle->offset += slice_size * whandle->layer;
struct amdgpu_winsys_bo *bo;
uint64_t va;
amdgpu_va_handle va_handle;
+ /* Avoid failure when the size is not page aligned */
+ uint64_t aligned_size = align64(size, ws->info.gart_page_size);
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo)
return NULL;
- if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
+ if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
+ aligned_size, &buf_handle))
goto error;
if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
- size, 1 << 12, 0, &va, &va_handle, 0))
+ aligned_size, 1 << 12, 0, &va, &va_handle,
+ AMDGPU_VA_RANGE_HIGH))
goto error_va_alloc;
- if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
+ if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
goto error_va_map;
/* Initialize it. */
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
+ ws->allocated_gtt += aligned_size;
amdgpu_add_buffer_to_global_list(bo);
+ amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->u.real.kms_handle);
+
return (struct pb_buffer*)bo;
error_va_map: