#include "util/u_hash_table.h"
#include "util/u_memory.h"
#include "util/u_simple_list.h"
+#include "util/u_double_list.h"
#include "os/os_thread.h"
#include "os/os_mman.h"
#include <xf86drm.h>
#include <errno.h>
+/*
+ * this are copy from radeon_drm, once an updated libdrm is released
+ * we should bump configure.ac requirement for it and remove the following
+ * field
+ */
#define RADEON_BO_FLAGS_MACRO_TILE 1
#define RADEON_BO_FLAGS_MICRO_TILE 2
#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
#ifndef DRM_RADEON_GEM_WAIT
-#define DRM_RADEON_GEM_WAIT 0x2b
+#define DRM_RADEON_GEM_WAIT 0x2b
-#define RADEON_GEM_NO_WAIT 0x1
-#define RADEON_GEM_USAGE_READ 0x2
-#define RADEON_GEM_USAGE_WRITE 0x4
+#define RADEON_GEM_NO_WAIT 0x1
+#define RADEON_GEM_USAGE_READ 0x2
+#define RADEON_GEM_USAGE_WRITE 0x4
struct drm_radeon_gem_wait {
- uint32_t handle;
- uint32_t flags; /* one of RADEON_GEM_* */
+ uint32_t handle;
+ uint32_t flags; /* one of RADEON_GEM_* */
};
#endif
+#ifndef RADEON_VA_MAP
+
+#define RADEON_VA_MAP 1
+#define RADEON_VA_UNMAP 2
+
+#define RADEON_VA_RESULT_OK 0
+#define RADEON_VA_RESULT_ERROR 1
+#define RADEON_VA_RESULT_VA_EXIST 2
+
+#define RADEON_VM_PAGE_VALID (1 << 0)
+#define RADEON_VM_PAGE_READABLE (1 << 1)
+#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
+#define RADEON_VM_PAGE_SYSTEM (1 << 3)
+#define RADEON_VM_PAGE_SNOOPED (1 << 4)
+
+struct drm_radeon_gem_va {
+ uint32_t handle;
+ uint32_t operation;
+ uint32_t vm_id;
+ uint32_t flags;
+ uint64_t offset;
+};
+
+#define DRM_RADEON_GEM_VA 0x2b
+#endif
+
+
extern const struct pb_vtbl radeon_bo_vtbl;
return (struct radeon_bo *)bo;
}
+struct radeon_bo_va_hole {
+ struct list_head list;
+ uint64_t offset;
+ uint64_t size;
+};
+
struct radeon_bomgr {
/* Base class. */
struct pb_manager base;
/* List of buffer handles and its mutex. */
struct util_hash_table *bo_handles;
pipe_mutex bo_handles_mutex;
+ pipe_mutex bo_va_mutex;
+
+ /* is virtual address supported */
+ bool va;
+ unsigned va_offset;
+ struct list_head va_holes;
};
static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
if (_buf->vtbl == &radeon_bo_vtbl) {
bo = radeon_bo(_buf);
} else {
- struct pb_buffer *base_buf;
- pb_size offset;
- pb_get_base_buffer(_buf, &base_buf, &offset);
+ struct pb_buffer *base_buf;
+ pb_size offset;
+ pb_get_base_buffer(_buf, &base_buf, &offset);
if (base_buf->vtbl == &radeon_bo_vtbl)
bo = radeon_bo(base_buf);
sched_yield();
}
- if (bo->rws->info.drm_minor >= 12) {
+ /* XXX use this when it's ready */
+ /*if (bo->rws->info.drm_minor >= 12) {
struct drm_radeon_gem_wait args = {};
args.handle = bo->handle;
args.flags = usage;
while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) == -EBUSY);
- } else {
- struct drm_radeon_gem_wait_idle args = {};
+ } else*/ {
+ struct drm_radeon_gem_wait_idle args;
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
&args, sizeof(args)) == -EBUSY);
return TRUE;
}
- if (bo->rws->info.drm_minor >= 12) {
+ /* XXX use this when it's ready */
+ /*if (bo->rws->info.drm_minor >= 12) {
struct drm_radeon_gem_wait args = {};
args.handle = bo->handle;
args.flags = usage | RADEON_GEM_NO_WAIT;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) != 0;
- } else {
- struct drm_radeon_gem_busy args = {};
+ } else*/ {
+ struct drm_radeon_gem_busy args;
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
&args, sizeof(args)) != 0;
}
}
+static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
+{
+ struct radeon_bo_va_hole *hole, *n;
+ uint64_t offset = 0, waste = 0;
+
+ pipe_mutex_lock(mgr->bo_va_mutex);
+ /* first look for a hole */
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ offset = hole->offset;
+ waste = 0;
+ if (alignment) {
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ }
+ offset += waste;
+ if (offset >= (hole->offset + hole->size)) {
+ continue;
+ }
+ if (!waste && hole->size == size) {
+ offset = hole->offset;
+ list_del(&hole->list);
+ FREE(hole);
+ pipe_mutex_unlock(mgr->bo_va_mutex);
+ return offset;
+ }
+ if ((hole->size - waste) >= size) {
+ if (waste) {
+ n = CALLOC_STRUCT(radeon_bo_va_hole);
+ n->size = waste;
+ n->offset = hole->offset;
+ list_add(&n->list, &mgr->va_holes);
+ }
+ hole->size -= (size + waste);
+ hole->offset += size + waste;
+ pipe_mutex_unlock(mgr->bo_va_mutex);
+ return offset;
+ }
+ }
+
+ offset = mgr->va_offset;
+ waste = 0;
+ if (alignment) {
+ waste = offset % alignment;
+ waste = waste ? alignment - waste : 0;
+ }
+ offset += waste;
+ mgr->va_offset += size + waste;
+ pipe_mutex_unlock(mgr->bo_va_mutex);
+ return offset;
+}
+
+static void radeon_bomgr_force_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
+{
+ pipe_mutex_lock(mgr->bo_va_mutex);
+ if (va >= mgr->va_offset) {
+ if (va > mgr->va_offset) {
+ struct radeon_bo_va_hole *hole;
+ hole = CALLOC_STRUCT(radeon_bo_va_hole);
+ if (hole) {
+ hole->size = va - mgr->va_offset;
+ hole->offset = mgr->va_offset;
+ list_add(&hole->list, &mgr->va_holes);
+ }
+ }
+ mgr->va_offset = va + size;
+ } else {
+ struct radeon_bo_va_hole *hole, *n;
+ uint64_t stmp, etmp;
+
+ /* free all holes that fall into the range
+ * NOTE that we might lose virtual address space
+ */
+ LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
+ stmp = hole->offset;
+ etmp = stmp + hole->size;
+ if (va >= stmp && va < etmp) {
+ list_del(&hole->list);
+ FREE(hole);
+ }
+ }
+ }
+ pipe_mutex_unlock(mgr->bo_va_mutex);
+}
+
+static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
+{
+ pipe_mutex_lock(mgr->bo_va_mutex);
+ if ((va + size) == mgr->va_offset) {
+ mgr->va_offset = va;
+ } else {
+ struct radeon_bo_va_hole *hole;
+
+ /* FIXME on allocation failure we just lose virtual address space
+ * maybe print a warning
+ */
+ hole = CALLOC_STRUCT(radeon_bo_va_hole);
+ if (hole) {
+ hole->size = size;
+ hole->offset = va;
+ list_add(&hole->list, &mgr->va_holes);
+ }
+ }
+ pipe_mutex_unlock(mgr->bo_va_mutex);
+}
+
static void radeon_bo_destroy(struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
- struct drm_gem_close args = {};
+ struct radeon_bomgr *mgr = bo->mgr;
+ struct drm_gem_close args;
+
+ memset(&args, 0, sizeof(args));
if (bo->name) {
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_remove(bo->mgr->bo_handles,
- (void*)(uintptr_t)bo->name);
+ (void*)(uintptr_t)bo->name);
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
}
if (bo->ptr)
- os_munmap(bo->ptr, bo->size);
+ os_munmap(bo->ptr, bo->base.size);
+
+ if (mgr->va) {
+ radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
+ }
/* Close object. */
args.handle = bo->handle;
{
struct radeon_bo *bo = radeon_bo(_buf);
struct radeon_drm_cs *cs = flush_ctx;
- struct drm_radeon_gem_mmap args = {};
+ struct drm_radeon_gem_mmap args;
void *ptr;
+ memset(&args, 0, sizeof(args));
+
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
}
args.handle = bo->handle;
args.offset = 0;
- args.size = (uint64_t)bo->size;
+ args.size = (uint64_t)bo->base.size;
if (drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_MMAP,
&args,
}
static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
- struct pb_buffer **base_buf,
- unsigned *offset)
+ struct pb_buffer **base_buf,
+ unsigned *offset)
{
*base_buf = buf;
*offset = 0;
}
static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
- struct pb_validate *vl,
- unsigned flags)
+ struct pb_validate *vl,
+ unsigned flags)
{
/* Always pinned */
return PIPE_OK;
};
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
- pb_size size,
- const struct pb_desc *desc)
+ pb_size size,
+ const struct pb_desc *desc)
{
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
struct radeon_drm_winsys *rws = mgr->rws;
struct radeon_bo *bo;
- struct drm_radeon_gem_create args = {};
+ struct drm_radeon_gem_create args;
+ struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+
+ assert(rdesc->initial_domains);
+ assert((rdesc->initial_domains &
+ ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
args.size = size;
args.alignment = desc->alignment;
- args.initial_domain =
- (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ?
- RADEON_GEM_DOMAIN_GTT : 0) |
- (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ?
- RADEON_GEM_DOMAIN_VRAM : 0);
+ args.initial_domain = rdesc->initial_domains;
if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args))) {
bo = CALLOC_STRUCT(radeon_bo);
if (!bo)
- return NULL;
+ return NULL;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = desc->alignment;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->handle = args.handle;
- bo->size = size;
+ bo->va = 0;
pipe_mutex_init(bo->map_mutex);
+ if (mgr->va) {
+ struct drm_radeon_gem_va va;
+
+ bo->va_size = align(size, 4096);
+ bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment);
+
+ va.handle = bo->handle;
+ va.vm_id = 0;
+ va.operation = RADEON_VA_MAP;
+ va.flags = RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_WRITEABLE |
+ RADEON_VM_PAGE_SNOOPED;
+ va.offset = bo->va;
+ r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
+ if (r && va.operation == RADEON_VA_RESULT_ERROR) {
+ fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
+ fprintf(stderr, "radeon: size : %d bytes\n", size);
+ fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
+ fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
+ radeon_bo_destroy(&bo->base);
+ return NULL;
+ }
+ if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
+ radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
+ bo->va = va.offset;
+ radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
+ }
+ }
+
return &bo->base;
}
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
util_hash_table_destroy(mgr->bo_handles);
pipe_mutex_destroy(mgr->bo_handles_mutex);
+ pipe_mutex_destroy(mgr->bo_va_mutex);
FREE(mgr);
}
mgr = CALLOC_STRUCT(radeon_bomgr);
if (!mgr)
- return NULL;
+ return NULL;
mgr->base.destroy = radeon_bomgr_destroy;
mgr->base.create_buffer = radeon_bomgr_create_bo;
mgr->rws = rws;
mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
pipe_mutex_init(mgr->bo_handles_mutex);
+ pipe_mutex_init(mgr->bo_va_mutex);
+
+ mgr->va = rws->info.r600_virtual_address;
+ mgr->va_offset = rws->info.r600_va_start;
+ list_inithead(&mgr->va_holes);
+
return &mgr->base;
}
return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs);
}
+static unsigned eg_tile_split(unsigned tile_split)
+{
+ switch (tile_split) {
+ case 0: tile_split = 64; break;
+ case 1: tile_split = 128; break;
+ case 2: tile_split = 256; break;
+ case 3: tile_split = 512; break;
+ default:
+ case 4: tile_split = 1024; break;
+ case 5: tile_split = 2048; break;
+ case 6: tile_split = 4096; break;
+ }
+ return tile_split;
+}
+
static void radeon_bo_get_tiling(struct pb_buffer *_buf,
enum radeon_bo_layout *microtiled,
- enum radeon_bo_layout *macrotiled)
+ enum radeon_bo_layout *macrotiled,
+ unsigned *bankw, unsigned *bankh,
+ unsigned *tile_split,
+ unsigned *stencil_tile_split,
+ unsigned *mtilea)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
- struct drm_radeon_gem_set_tiling args = {};
+ struct drm_radeon_gem_set_tiling args;
+
+ memset(&args, 0, sizeof(args));
args.handle = bo->handle;
*microtiled = RADEON_LAYOUT_LINEAR;
*macrotiled = RADEON_LAYOUT_LINEAR;
if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
- *microtiled = RADEON_LAYOUT_TILED;
+ *microtiled = RADEON_LAYOUT_TILED;
if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
- *macrotiled = RADEON_LAYOUT_TILED;
+ *macrotiled = RADEON_LAYOUT_TILED;
+ if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
+ *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+ *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+ *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+ *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+ *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+ *tile_split = eg_tile_split(*tile_split);
+ }
}
static void radeon_bo_set_tiling(struct pb_buffer *_buf,
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
- struct drm_radeon_gem_set_tiling args = {};
+ struct drm_radeon_gem_set_tiling args;
+
+ memset(&args, 0, sizeof(args));
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
}
-static unsigned get_pb_usage_from_create_flags(enum radeon_bo_domain domain)
-{
- unsigned res = 0;
-
- if (domain & RADEON_DOMAIN_GTT)
- res |= RADEON_PB_USAGE_DOMAIN_GTT;
-
- if (domain & RADEON_DOMAIN_VRAM)
- res |= RADEON_PB_USAGE_DOMAIN_VRAM;
-
- return res;
-}
-
static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys *rws,
unsigned size,
enum radeon_bo_domain domain)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
- struct pb_desc desc;
+ struct radeon_bo_desc desc;
struct pb_manager *provider;
struct pb_buffer *buffer;
memset(&desc, 0, sizeof(desc));
- desc.alignment = alignment;
- desc.usage = get_pb_usage_from_create_flags(domain);
+ desc.base.alignment = alignment;
+
+ /* Additional criteria for the cache manager. */
+ desc.base.usage = domain;
+ desc.initial_domains = domain;
/* Assign a buffer manager. */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM))
- provider = ws->cman;
+ provider = ws->cman;
else
provider = ws->kman;
- buffer = provider->create_buffer(provider, size, &desc);
+ buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
- return NULL;
+ return NULL;
return (struct pb_buffer*)buffer;
}
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
- struct winsys_handle *whandle,
- unsigned *stride,
- unsigned *size)
+ struct winsys_handle *whandle,
+ unsigned *stride)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo;
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
struct drm_gem_open open_arg = {};
+ int r;
+
+ memset(&open_arg, 0, sizeof(open_arg));
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
goto fail;
}
bo->handle = open_arg.handle;
- bo->size = open_arg.size;
bo->name = whandle->handle;
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = 0;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
- bo->base.size = bo->size;
+ bo->base.size = open_arg.size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
+ bo->va = 0;
pipe_mutex_init(bo->map_mutex);
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);
if (stride)
*stride = whandle->stride;
- if (size)
- *size = bo->base.size;
+
+ if (mgr->va) {
+ struct drm_radeon_gem_va va;
+
+ bo->va_size = ((bo->base.size + 4095) & ~4095);
+ bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20);
+
+ va.handle = bo->handle;
+ va.operation = RADEON_VA_MAP;
+ va.vm_id = 0;
+ va.offset = bo->va;
+ va.flags = RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_WRITEABLE |
+ RADEON_VM_PAGE_SNOOPED;
+ va.offset = bo->va;
+ r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
+ if (r && va.operation == RADEON_VA_RESULT_ERROR) {
+ fprintf(stderr, "radeon: Failed to assign virtual address space\n");
+ radeon_bo_destroy(&bo->base);
+ return NULL;
+ }
+ if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
+ radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
+ bo->va = va.offset;
+ radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
+ }
+ }
return (struct pb_buffer*)bo;
unsigned stride,
struct winsys_handle *whandle)
{
- struct drm_gem_flink flink = {};
+ struct drm_gem_flink flink;
struct radeon_bo *bo = get_radeon_bo(buffer);
+ memset(&flink, 0, sizeof(flink));
+
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
if (!bo->flinked) {
flink.handle = bo->handle;
return TRUE;
}
+static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
+{
+ return ((struct radeon_bo*)buf)->va;
+}
+
void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
+ ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
}