uint32_t begin, end;
};
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
- uint64_t size,
- unsigned alignment,
- enum radeon_bo_domain domain,
- enum radeon_bo_flag flags);
static void amdgpu_bo_unmap(struct pb_buffer *buf);
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
return true;
}
-static void *amdgpu_bo_map(struct pb_buffer *buf,
- struct radeon_cmdbuf *rcs,
- enum pipe_transfer_usage usage)
+void *amdgpu_bo_map(struct pb_buffer *buf,
+ struct radeon_cmdbuf *rcs,
+ enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
struct amdgpu_winsys_bo *real;
}
assert(slab_size != 0);
- slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
+ slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
slab_size, slab_size,
domains, flags));
if (!slab->buffer)
if (!slab->entries)
goto fail_buffer;
- LIST_INITHEAD(&slab->base.free);
+ list_inithead(&slab->base.free);
base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
- buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
+ buf = amdgpu_bo_create(bo->ws, size, RADEON_SPARSE_PAGE_SIZE,
bo->initial_domain,
bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
if (!buf) {
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- LIST_INITHEAD(&bo->u.sparse.backing);
+ list_inithead(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
+struct pb_buffer *
+amdgpu_bo_create(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
- struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
int heap = -1;
+ if (domain & (RADEON_DOMAIN_GDS | RADEON_DOMAIN_OA))
+ flags |= RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_SUBALLOC;
+
/* VRAM implies WC. This is not optional. */
assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
- /* NO_CPU_ACCESS is valid with VRAM only. */
- assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
+ /* NO_CPU_ACCESS is not valid with GTT. */
+ assert(!(domain & RADEON_DOMAIN_GTT) || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
/* Sparse buffers must have NO_CPU_ACCESS set. */
assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
return &bo->base;
}
+static struct pb_buffer *
+amdgpu_buffer_create(struct radeon_winsys *ws,
+ uint64_t size,
+ unsigned alignment,
+ enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags)
+{
+ return amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
+ flags);
+}
+
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
- unsigned vm_alignment,
- unsigned *stride,
- unsigned *offset)
+ unsigned vm_alignment)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo = NULL;
return NULL;
}
- if (stride)
- *stride = whandle->stride;
- if (offset)
- *offset = whandle->offset;
-
r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
if (r)
return NULL;
return NULL;
}
-static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
- unsigned stride, unsigned offset,
- unsigned slice_size,
+static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
+ struct pb_buffer *buffer,
struct winsys_handle *whandle)
{
+ struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
struct amdgpu_winsys *ws = bo->ws;
enum amdgpu_bo_handle_type type;
case WINSYS_HANDLE_TYPE_SHARED:
type = amdgpu_bo_handle_type_gem_flink_name;
break;
+ case WINSYS_HANDLE_TYPE_KMS:
case WINSYS_HANDLE_TYPE_FD:
type = amdgpu_bo_handle_type_dma_buf_fd;
break;
- case WINSYS_HANDLE_TYPE_KMS:
- type = amdgpu_bo_handle_type_kms;
- break;
default:
return false;
}
if (r)
return false;
+ if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
+ int dma_fd = whandle->handle;
+
+ r = drmPrimeFDToHandle(sws->fd, dma_fd, &whandle->handle);
+ close(dma_fd);
+
+ if (r)
+ return false;
+ }
+
simple_mtx_lock(&ws->bo_export_table_lock);
util_hash_table_set(ws->bo_export_table, bo->bo, bo);
simple_mtx_unlock(&ws->bo_export_table_lock);
- whandle->stride = stride;
- whandle->offset = offset;
- whandle->offset += slice_size * whandle->layer;
bo->is_shared = true;
return true;
}
return ((struct amdgpu_winsys_bo*)buf)->va;
}
-void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
{
ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
ws->base.buffer_map = amdgpu_bo_map;
ws->base.buffer_unmap = amdgpu_bo_unmap;
ws->base.buffer_wait = amdgpu_bo_wait;
- ws->base.buffer_create = amdgpu_bo_create;
+ ws->base.buffer_create = amdgpu_buffer_create;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;