* of the Software.
*/
-#define _FILE_OFFSET_BITS 64
#include "radeon_drm_cs.h"
#include "util/u_hash_table.h"
#include "util/u_memory.h"
-#include "util/u_simple_list.h"
-#include "util/u_double_list.h"
+#include "util/simple_list.h"
+#include "util/list.h"
#include "os/os_thread.h"
#include "os/os_mman.h"
#include "os/os_time.h"
#include <fcntl.h>
#include <stdio.h>
-/*
- * this are copy from radeon_drm, once an updated libdrm is released
- * we should bump configure.ac requirement for it and remove the following
- * field
- */
-#define RADEON_BO_FLAGS_MACRO_TILE 1
-#define RADEON_BO_FLAGS_MICRO_TILE 2
-#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
-
-#ifndef DRM_RADEON_GEM_WAIT
-#define DRM_RADEON_GEM_WAIT 0x2b
-
-#define RADEON_GEM_NO_WAIT 0x1
-#define RADEON_GEM_USAGE_READ 0x2
-#define RADEON_GEM_USAGE_WRITE 0x4
-
-struct drm_radeon_gem_wait {
- uint32_t handle;
- uint32_t flags; /* one of RADEON_GEM_* */
-};
-
-#endif
-
-#ifndef RADEON_VA_MAP
-
-#define RADEON_VA_MAP 1
-#define RADEON_VA_UNMAP 2
-
-#define RADEON_VA_RESULT_OK 0
-#define RADEON_VA_RESULT_ERROR 1
-#define RADEON_VA_RESULT_VA_EXIST 2
-
-#define RADEON_VM_PAGE_VALID (1 << 0)
-#define RADEON_VM_PAGE_READABLE (1 << 1)
-#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
-#define RADEON_VM_PAGE_SYSTEM (1 << 3)
-#define RADEON_VM_PAGE_SNOOPED (1 << 4)
-
-struct drm_radeon_gem_va {
- uint32_t handle;
- uint32_t operation;
- uint32_t vm_id;
- uint32_t flags;
- uint64_t offset;
-};
-
-#define DRM_RADEON_GEM_VA 0x2b
-#endif
-
-#ifndef DRM_RADEON_GEM_OP
-#define DRM_RADEON_GEM_OP 0x2c
-
-/* Sets or returns a value associated with a buffer. */
-struct drm_radeon_gem_op {
- uint32_t handle; /* buffer */
- uint32_t op; /* RADEON_GEM_OP_* */
- uint64_t value; /* input or return value */
-};
-
-#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
-#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
-#endif
-
-
-extern const struct pb_vtbl radeon_bo_vtbl;
+static const struct pb_vtbl radeon_bo_vtbl;
-
-static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
+static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
{
assert(bo->vtbl == &radeon_bo_vtbl);
return (struct radeon_bo *)bo;
bool va;
uint64_t va_offset;
struct list_head va_holes;
+
+ /* BO size alignment */
+ unsigned size_align;
};
-static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
+static inline struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
{
return (struct radeon_bomgr *)mgr;
}
return bo;
}
-static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
+static bool radeon_bo_is_busy(struct radeon_bo *bo)
{
- struct radeon_bo *bo = get_radeon_bo(_buf);
+ struct drm_radeon_gem_busy args = {0};
- while (p_atomic_read(&bo->num_active_ioctls)) {
- sched_yield();
- }
+ args.handle = bo->handle;
+ return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
+ &args, sizeof(args)) != 0;
+}
- /* XXX use this when it's ready */
- /*if (bo->rws->info.drm_minor >= 12) {
- struct drm_radeon_gem_wait args = {};
- args.handle = bo->handle;
- args.flags = usage;
- while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
- &args, sizeof(args)) == -EBUSY);
- } else*/ {
- struct drm_radeon_gem_wait_idle args;
- memset(&args, 0, sizeof(args));
- args.handle = bo->handle;
- while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
- &args, sizeof(args)) == -EBUSY);
- }
+static void radeon_bo_wait_idle(struct radeon_bo *bo)
+{
+ struct drm_radeon_gem_wait_idle args = {0};
+
+ args.handle = bo->handle;
+ while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
+ &args, sizeof(args)) == -EBUSY);
}
-static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
+static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
+ enum radeon_bo_usage usage)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
+ int64_t abs_timeout;
+
+ /* No timeout. Just query. */
+ if (timeout == 0)
+ return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
+
+ abs_timeout = os_time_get_absolute_timeout(timeout);
+
+ /* Wait if any ioctl is being submitted with this buffer. */
+ if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
+ return false;
- if (p_atomic_read(&bo->num_active_ioctls)) {
- return TRUE;
+ /* Infinite timeout. */
+ if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
+ radeon_bo_wait_idle(bo);
+ return true;
}
- /* XXX use this when it's ready */
- /*if (bo->rws->info.drm_minor >= 12) {
- struct drm_radeon_gem_wait args = {};
- args.handle = bo->handle;
- args.flags = usage | RADEON_GEM_NO_WAIT;
- return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
- &args, sizeof(args)) != 0;
- } else*/ {
- struct drm_radeon_gem_busy args;
- memset(&args, 0, sizeof(args));
- args.handle = bo->handle;
- return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
- &args, sizeof(args)) != 0;
+ /* Other timeouts need to be emulated with a loop. */
+ while (radeon_bo_is_busy(bo)) {
+ if (os_time_get_nano() >= abs_timeout)
+ return false;
+ os_time_sleep(10);
}
+
+ return true;
}
static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
struct radeon_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0;
- alignment = MAX2(alignment, 4096);
- size = align(size, 4096);
+ /* All VM address space holes will implicitly start aligned to the
+ * size alignment, so we don't need to sanitize the alignment here
+ */
+ size = align(size, mgr->size_align);
pipe_mutex_lock(mgr->bo_va_mutex);
/* first look for a hole */
{
struct radeon_bo_va_hole *hole;
- size = align(size, 4096);
+ size = align(size, mgr->size_align);
pipe_mutex_lock(mgr->bo_va_mutex);
if ((va + size) == mgr->va_offset) {
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_remove(bo->mgr->bo_handles, (void*)(uintptr_t)bo->handle);
- if (bo->name) {
+ if (bo->flink_name) {
util_hash_table_remove(bo->mgr->bo_names,
- (void*)(uintptr_t)bo->name);
+ (void*)(uintptr_t)bo->flink_name);
}
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
if (bo->ptr)
os_munmap(bo->ptr, bo->base.size);
+ if (mgr->va) {
+ if (bo->rws->va_unmap_working) {
+ struct drm_radeon_gem_va va;
+
+ va.handle = bo->handle;
+ va.vm_id = 0;
+ va.operation = RADEON_VA_UNMAP;
+ va.flags = RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_WRITEABLE |
+ RADEON_VM_PAGE_SNOOPED;
+ va.offset = bo->va;
+
+ if (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_VA, &va,
+ sizeof(va)) != 0 &&
+ va.operation == RADEON_VA_RESULT_ERROR) {
+ fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
+ fprintf(stderr, "radeon: size : %d bytes\n", bo->base.size);
+ fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
+ }
+ }
+
+ radeon_bomgr_free_va(mgr, bo->va, bo->base.size);
+ }
+
/* Close object. */
args.handle = bo->handle;
drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
- if (mgr->va) {
- radeon_bomgr_free_va(mgr, bo->va, bo->base.size);
- }
-
pipe_mutex_destroy(bo->map_mutex);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->rws->allocated_vram -= align(bo->base.size, 4096);
+ bo->rws->allocated_vram -= align(bo->base.size, mgr->size_align);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->rws->allocated_gtt -= align(bo->base.size, 4096);
+ bo->rws->allocated_gtt -= align(bo->base.size, mgr->size_align);
FREE(bo);
}
struct drm_radeon_gem_mmap args = {0};
void *ptr;
- /* Return the pointer if it's already mapped. */
- if (bo->ptr)
- return bo->ptr;
+ /* If the buffer is created from user memory, return the user pointer. */
+ if (bo->user_ptr)
+ return bo->user_ptr;
/* Map the buffer. */
pipe_mutex_lock(bo->map_mutex);
- /* Return the pointer if it's already mapped (in case of a race). */
+ /* Return the pointer if it's already mapped. */
if (bo->ptr) {
+ bo->map_count++;
pipe_mutex_unlock(bo->map_mutex);
return bo->ptr;
}
return NULL;
}
bo->ptr = ptr;
+ bo->map_count = 1;
pipe_mutex_unlock(bo->map_mutex);
return bo->ptr;
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
+ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo,
- RADEON_USAGE_WRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
+ RADEON_USAGE_WRITE)) {
return NULL;
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
+ cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo,
- RADEON_USAGE_READWRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
+ RADEON_USAGE_READWRITE)) {
return NULL;
}
}
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0);
+ cs->flush_cs(cs->flush_data, 0, NULL);
}
- radeon_bo_wait((struct pb_buffer*)bo,
+ radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
if (cs) {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0);
+ cs->flush_cs(cs->flush_data, 0, NULL);
} else {
/* Try to avoid busy-waiting in radeon_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
}
}
- radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
+ radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ RADEON_USAGE_READWRITE);
}
bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
{
- /* NOP */
+ struct radeon_bo *bo = (struct radeon_bo*)_buf;
+
+ if (bo->user_ptr)
+ return;
+
+ pipe_mutex_lock(bo->map_mutex);
+ if (!bo->ptr) {
+ pipe_mutex_unlock(bo->map_mutex);
+ return; /* it's not been mapped */
+ }
+
+ assert(bo->map_count);
+ if (--bo->map_count) {
+ pipe_mutex_unlock(bo->map_mutex);
+ return; /* it's been mapped multiple times */
+ }
+
+ os_munmap(bo->ptr, bo->base.size);
+ bo->ptr = NULL;
+ pipe_mutex_unlock(bo->map_mutex);
}
static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
{
}
-const struct pb_vtbl radeon_bo_vtbl = {
+static const struct pb_vtbl radeon_bo_vtbl = {
radeon_bo_destroy,
NULL, /* never called */
NULL, /* never called */
radeon_bo_get_base_buffer,
};
+#ifndef RADEON_GEM_GTT_WC
+#define RADEON_GEM_GTT_WC (1 << 2)
+#endif
+#ifndef RADEON_GEM_CPU_ACCESS
+/* BO is expected to be accessed by the CPU */
+#define RADEON_GEM_CPU_ACCESS (1 << 3)
+#endif
+#ifndef RADEON_GEM_NO_CPU_ACCESS
+/* CPU access is not expected to work for this BO */
+#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
+#endif
+
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
args.size = size;
args.alignment = desc->alignment;
args.initial_domain = rdesc->initial_domains;
+ args.flags = 0;
+
+ if (rdesc->flags & RADEON_FLAG_GTT_WC)
+ args.flags |= RADEON_GEM_GTT_WC;
+ if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
+ args.flags |= RADEON_GEM_CPU_ACCESS;
+ if (rdesc->flags & RADEON_FLAG_NO_CPU_ACCESS)
+ args.flags |= RADEON_GEM_NO_CPU_ACCESS;
if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args))) {
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
+ fprintf(stderr, "radeon: flags : %d\n", args.flags);
return NULL;
}
}
if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
- rws->allocated_vram += align(size, 4096);
+ rws->allocated_vram += align(size, mgr->size_align);
else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
- rws->allocated_gtt += align(size, 4096);
+ rws->allocated_gtt += align(size, mgr->size_align);
return &bo->base;
}
return TRUE;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
return TRUE;
}
pipe_mutex_init(mgr->bo_va_mutex);
mgr->va = rws->info.r600_virtual_address;
- mgr->va_offset = rws->info.r600_va_start;
+ mgr->va_offset = rws->va_start;
list_inithead(&mgr->va_holes);
+ /* TTM aligns the BO size to the CPU page size */
+ mgr->size_align = sysconf(_SC_PAGESIZE);
+
return &mgr->base;
}
*microtiled = RADEON_LAYOUT_LINEAR;
*macrotiled = RADEON_LAYOUT_LINEAR;
- if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
+ if (args.tiling_flags & RADEON_TILING_MICRO)
*microtiled = RADEON_LAYOUT_TILED;
else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
*microtiled = RADEON_LAYOUT_SQUARETILED;
- if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
+ if (args.tiling_flags & RADEON_TILING_MACRO)
*macrotiled = RADEON_LAYOUT_TILED;
if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
*bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
struct radeon_winsys_cs *rcs,
enum radeon_bo_layout microtiled,
enum radeon_bo_layout macrotiled,
+ unsigned pipe_config,
unsigned bankw, unsigned bankh,
unsigned tile_split,
unsigned stencil_tile_split,
- unsigned mtilea,
+ unsigned mtilea, unsigned num_banks,
uint32_t pitch,
bool scanout)
{
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0);
+ cs->flush_cs(cs->flush_data, 0, NULL);
}
- while (p_atomic_read(&bo->num_active_ioctls)) {
- sched_yield();
- }
+ os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
if (microtiled == RADEON_LAYOUT_TILED)
- args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
+ args.tiling_flags |= RADEON_TILING_MICRO;
else if (microtiled == RADEON_LAYOUT_SQUARETILED)
- args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
+ args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
if (macrotiled == RADEON_LAYOUT_TILED)
- args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE;
+ args.tiling_flags |= RADEON_TILING_MACRO;
args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
RADEON_TILING_EG_BANKW_SHIFT;
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
- enum radeon_bo_domain domain)
+ enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
memset(&desc, 0, sizeof(desc));
desc.base.alignment = alignment;
- /* Additional criteria for the cache manager. */
- desc.base.usage = domain;
+ /* Align size to page size. This is the minimum alignment for normal
+ * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
+ * like constant/uniform buffers, can benefit from better and more reuse.
+ */
+ size = align(size, mgr->size_align);
+
+ /* Only set one usage bit each for domains and flags, or the cache manager
+ * might consider different sets of domains / flags compatible
+ */
+ if (domain == RADEON_DOMAIN_VRAM_GTT)
+ desc.base.usage = 1 << 2;
+ else
+ desc.base.usage = domain >> 1;
+ assert(flags < sizeof(desc.base.usage) * 8 - 3);
+ desc.base.usage |= 1 << (flags + 3);
+
desc.initial_domains = domain;
+ desc.flags = flags;
/* Assign a buffer manager. */
if (use_reusable_pool)
return (struct pb_buffer*)buffer;
}
+static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
+ void *pointer, unsigned size)
+{
+ struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
+ struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
+ struct drm_radeon_gem_userptr args;
+ struct radeon_bo *bo;
+ int r;
+
+ bo = CALLOC_STRUCT(radeon_bo);
+ if (!bo)
+ return NULL;
+
+ memset(&args, 0, sizeof(args));
+ args.addr = (uintptr_t)pointer;
+ args.size = align(size, sysconf(_SC_PAGE_SIZE));
+ args.flags = RADEON_GEM_USERPTR_ANONONLY |
+ RADEON_GEM_USERPTR_VALIDATE |
+ RADEON_GEM_USERPTR_REGISTER;
+ if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
+ &args, sizeof(args))) {
+ FREE(bo);
+ return NULL;
+ }
+
+ pipe_mutex_lock(mgr->bo_handles_mutex);
+
+ /* Initialize it. */
+ pipe_reference_init(&bo->base.reference, 1);
+ bo->handle = args.handle;
+ bo->base.alignment = 0;
+ bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
+ bo->base.size = size;
+ bo->base.vtbl = &radeon_bo_vtbl;
+ bo->mgr = mgr;
+ bo->rws = mgr->rws;
+ bo->user_ptr = pointer;
+ bo->va = 0;
+ bo->initial_domain = RADEON_DOMAIN_GTT;
+ pipe_mutex_init(bo->map_mutex);
+
+ util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
+
+ pipe_mutex_unlock(mgr->bo_handles_mutex);
+
+ if (mgr->va) {
+ struct drm_radeon_gem_va va;
+
+ bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
+
+ va.handle = bo->handle;
+ va.operation = RADEON_VA_MAP;
+ va.vm_id = 0;
+ va.offset = bo->va;
+ va.flags = RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_WRITEABLE |
+ RADEON_VM_PAGE_SNOOPED;
+ va.offset = bo->va;
+ r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
+ if (r && va.operation == RADEON_VA_RESULT_ERROR) {
+ fprintf(stderr, "radeon: Failed to assign virtual address space\n");
+ radeon_bo_destroy(&bo->base);
+ return NULL;
+ }
+ pipe_mutex_lock(mgr->bo_handles_mutex);
+ if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
+ struct pb_buffer *b = &bo->base;
+ struct radeon_bo *old_bo =
+ util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
+
+ pipe_mutex_unlock(mgr->bo_handles_mutex);
+ pb_reference(&b, &old_bo->base);
+ return b;
+ }
+
+ util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
+ pipe_mutex_unlock(mgr->bo_handles_mutex);
+ }
+
+ ws->allocated_gtt += align(bo->base.size, mgr->size_align);
+
+ return (struct pb_buffer*)bo;
+}
+
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned *stride)
}
handle = open_arg.handle;
size = open_arg.size;
- bo->name = whandle->handle;
+ bo->flink_name = whandle->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
size = lseek(whandle->handle, 0, SEEK_END);
/*
bo->va = 0;
pipe_mutex_init(bo->map_mutex);
- if (bo->name)
- util_hash_table_set(mgr->bo_names, (void*)(uintptr_t)bo->name, bo);
+ if (bo->flink_name)
+ util_hash_table_set(mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align(bo->base.size, 4096);
+ ws->allocated_vram += align(bo->base.size, mgr->size_align);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align(bo->base.size, 4096);
+ ws->allocated_gtt += align(bo->base.size, mgr->size_align);
return (struct pb_buffer*)bo;
memset(&flink, 0, sizeof(flink));
+ if ((void*)bo != (void*)buffer)
+ pb_cache_manager_remove_buffer(buffer);
+
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
- if (!bo->flinked) {
+ if (!bo->flink_name) {
flink.handle = bo->handle;
if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return FALSE;
}
- bo->flinked = TRUE;
- bo->flink = flink.name;
+ bo->flink_name = flink.name;
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
- util_hash_table_set(bo->mgr->bo_names, (void*)(uintptr_t)bo->flink, bo);
+ util_hash_table_set(bo->mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
}
- whandle->handle = bo->flink;
+ whandle->handle = bo->flink_name;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = bo->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
ws->base.buffer_map = radeon_bo_map;
ws->base.buffer_unmap = radeon_bo_unmap;
ws->base.buffer_wait = radeon_bo_wait;
- ws->base.buffer_is_busy = radeon_bo_is_busy;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
+ ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;