* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
-/*
- * Authors:
- * Marek Olšák <maraeo@gmail.com>
- */
#include "amdgpu_cs.h"
-#include "os/os_time.h"
+#include "util/os_time.h"
#include "state_tracker/drm_driver.h"
#include <amdgpu_drm.h>
#include <xf86drm.h>
#include <stdio.h>
#include <inttypes.h>
+#ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
+#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
+#endif
+
/* Set to 1 for verbose output showing committed sparse buffer ranges. */
#define DEBUG_SPARSE_COMMITS 0
unsigned idle_fences;
bool buffer_idle;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
bo->num_fences -= idle_fences;
buffer_idle = !bo->num_fences;
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
} else {
bool buffer_idle = true;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
while (bo->num_fences && buffer_idle) {
struct pipe_fence_handle *fence = NULL;
bool fence_idle = false;
amdgpu_fence_reference(&fence, bo->fences[0]);
/* Wait for the fence. */
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
if (amdgpu_fence_wait(fence, abs_timeout, true))
fence_idle = true;
else
buffer_idle = false;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
/* Release an idle fence to avoid checking it again later, keeping in
* mind that the fence array may have been modified by other threads.
amdgpu_fence_reference(&fence, NULL);
}
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
return buffer_idle;
}
assert(bo->bo && "must not be called for slab entries");
- mtx_lock(&bo->ws->global_bo_list_lock);
- LIST_DEL(&bo->u.real.global_list_item);
- bo->ws->num_buffers--;
- mtx_unlock(&bo->ws->global_bo_list_lock);
+ if (bo->ws->debug_all_bos) {
+ simple_mtx_lock(&bo->ws->global_bo_list_lock);
+ LIST_DEL(&bo->u.real.global_list_item);
+ bo->ws->num_buffers--;
+ simple_mtx_unlock(&bo->ws->global_bo_list_lock);
+ }
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
assert(bo->bo);
- mtx_lock(&ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
- ws->num_buffers++;
- mtx_unlock(&ws->global_bo_list_lock);
+ if (ws->debug_all_bos) {
+ simple_mtx_lock(&ws->global_bo_list_lock);
+ LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
+ ws->num_buffers++;
+ simple_mtx_unlock(&ws->global_bo_list_lock);
+ }
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
- unsigned usage,
enum radeon_bo_domain initial_domain,
unsigned flags,
- unsigned pb_cache_bucket)
+ int heap)
{
struct amdgpu_bo_alloc_request request = {0};
amdgpu_bo_handle buf_handle;
unsigned va_gap_size;
int r;
- assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
+ /* VRAM or GTT must be specified, but not both at the same time. */
+ assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1);
+
bo = CALLOC_STRUCT(amdgpu_winsys_bo);
if (!bo) {
return NULL;
}
- pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
- pb_cache_bucket);
+ if (heap >= 0) {
+ pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
+ heap);
+ }
request.alloc_size = size;
request.phys_alignment = alignment;
if (initial_domain & RADEON_DOMAIN_GTT)
request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
- if (flags & RADEON_FLAG_CPU_ACCESS)
- request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ /* If VRAM is just stolen system memory, allow both VRAM and
+ * GTT, whichever has free space. If a buffer is evicted from
+ * VRAM to GTT, it will stay there.
+ *
+ * DRM 3.6.0 has good BO move throttling, so we can allow VRAM-only
+ * placements even with a low amount of stolen VRAM.
+ */
+ if (!ws->info.has_dedicated_vram && ws->info.drm_minor < 6)
+ request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
+
if (flags & RADEON_FLAG_NO_CPU_ACCESS)
request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (flags & RADEON_FLAG_GTT_WC)
request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ /* TODO: Enable this once the kernel handles it efficiently. */
+ /*if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ ws->info.drm_minor >= 20)
+ request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;*/
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
if (r)
goto error_va_alloc;
- r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
+ unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE;
+
+ if (!(flags & RADEON_FLAG_READ_ONLY))
+ vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+
+ r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
+ AMDGPU_VA_OP_MAP);
if (r)
goto error_va_map;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
- bo->base.usage = usage;
+ bo->base.usage = 0;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
bo->ws = ws;
bo->u.real.va_handle = va_handle;
bo->initial_domain = initial_domain;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
+ bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
if (initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align64(size, ws->info.gart_page_size);
{
struct amdgpu_winsys *ws = priv;
struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
- enum radeon_bo_domain domains;
- enum radeon_bo_flag flags = 0;
+ enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
+ enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
uint32_t base_id;
if (!slab)
return NULL;
- if (heap & 1)
- flags |= RADEON_FLAG_GTT_WC;
- if (heap & 2)
- flags |= RADEON_FLAG_CPU_ACCESS;
-
- switch (heap >> 2) {
- case 0:
- domains = RADEON_DOMAIN_VRAM;
- break;
- default:
- case 1:
- domains = RADEON_DOMAIN_VRAM_GTT;
- break;
- case 2:
- domains = RADEON_DOMAIN_GTT;
- break;
- }
-
+ unsigned slab_size = 1 << AMDGPU_SLAB_BO_SIZE_LOG2;
slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
- 64 * 1024, 64 * 1024,
+ slab_size, slab_size,
domains, flags));
if (!slab->buffer)
goto fail;
buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
bo->initial_domain,
- bo->u.sparse.flags | RADEON_FLAG_HANDLE);
+ bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
if (!buf) {
FREE(best_backing->chunks);
FREE(best_backing);
bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
list_del(&backing->list);
amdgpu_winsys_bo_reference(&backing->bo, NULL);
}
amdgpu_va_range_free(bo->u.sparse.va_handle);
- mtx_destroy(&bo->u.sparse.commit_lock);
+ simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
FREE(bo);
}
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
+ simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
LIST_INITHEAD(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
error_va_map:
amdgpu_va_range_free(bo->u.sparse.va_handle);
error_va_alloc:
- mtx_destroy(&bo->u.sparse.commit_lock);
+ simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
error_alloc_commitments:
FREE(bo);
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
- mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->u.sparse.commit_lock);
#if DEBUG_SPARSE_COMMITS
sparse_dump(bo, __func__);
}
out:
- mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->u.sparse.commit_lock);
return ok;
}
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
- unsigned usage = 0, pb_cache_bucket;
+ int heap = -1;
+
+ /* VRAM implies WC. This is not optional. */
+ assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
+
+ /* NO_CPU_ACCESS is valid with VRAM only. */
+ assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
+
+ /* Sparse buffers must have NO_CPU_ACCESS set. */
+ assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
/* Sub-allocate small buffers from slabs. */
- if (!(flags & (RADEON_FLAG_HANDLE | RADEON_FLAG_SPARSE)) &&
+ if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
struct pb_slab_entry *entry;
- unsigned heap = 0;
+ int heap = radeon_get_heap_index(domain, flags);
- if (flags & RADEON_FLAG_GTT_WC)
- heap |= 1;
- if (flags & RADEON_FLAG_CPU_ACCESS)
- heap |= 2;
- if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
+ if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
goto no_slab;
- switch (domain) {
- case RADEON_DOMAIN_VRAM:
- heap |= 0 * 4;
- break;
- case RADEON_DOMAIN_VRAM_GTT:
- heap |= 1 * 4;
- break;
- case RADEON_DOMAIN_GTT:
- heap |= 2 * 4;
- break;
- default:
- goto no_slab;
- }
-
entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
if (!entry) {
/* Clear the cache and try again. */
if (flags & RADEON_FLAG_SPARSE) {
assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
- assert(!(flags & RADEON_FLAG_CPU_ACCESS));
-
- flags |= RADEON_FLAG_NO_CPU_ACCESS;
return amdgpu_bo_sparse_create(ws, size, domain, flags);
}
/* This flag is irrelevant for the cache. */
- flags &= ~RADEON_FLAG_HANDLE;
+ flags &= ~RADEON_FLAG_NO_SUBALLOC;
/* Align size to page size. This is the minimum alignment for normal
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
size = align64(size, ws->info.gart_page_size);
alignment = align(alignment, ws->info.gart_page_size);
- /* Only set one usage bit each for domains and flags, or the cache manager
- * might consider different sets of domains / flags compatible
- */
- if (domain == RADEON_DOMAIN_VRAM_GTT)
- usage = 1 << 2;
- else
- usage = domain >> 1;
- assert(flags < sizeof(usage) * 8 - 3);
- usage |= 1 << (flags + 3);
-
- /* Determine the pb_cache bucket for minimizing pb_cache misses. */
- pb_cache_bucket = 0;
- if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
- pb_cache_bucket += 1;
- if (flags == RADEON_FLAG_GTT_WC) /* WC */
- pb_cache_bucket += 2;
- assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
-
- /* Get a buffer from the cache. */
- bo = (struct amdgpu_winsys_bo*)
- pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
- pb_cache_bucket);
- if (bo)
- return &bo->base;
+ bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
+
+ if (use_reusable_pool) {
+ heap = radeon_get_heap_index(domain, flags);
+ assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
+
+ /* Get a buffer from the cache. */
+ bo = (struct amdgpu_winsys_bo*)
+ pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
+ if (bo)
+ return &bo->base;
+ }
/* Create a new one. */
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
- bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
- pb_cache_bucket);
+ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo)
return NULL;
}
- bo->u.real.use_reusable_pool = true;
+ bo->u.real.use_reusable_pool = use_reusable_pool;
return &bo->base;
}
enum amdgpu_bo_handle_type type;
int r;
- if (!bo->bo) {
- offset += bo->va - bo->u.slab.real->va;
- bo = bo->u.slab.real;
- }
+ /* Don't allow exports of slab entries and sparse buffers. */
+ if (!bo->bo)
+ return false;
bo->u.real.use_reusable_pool = false;
return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
}
+static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
+{
+ struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
+
+ return !bo->bo && !bo->sparse;
+}
+
static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->va;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
+ ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
ws->base.buffer_get_handle = amdgpu_bo_get_handle;
ws->base.buffer_commit = amdgpu_bo_sparse_commit;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;