*/
enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
+ /**
+ * Query the flags used for creation of this buffer.
+ *
+ * Note that for imported buffer this may be lossy since not all flags
+ * are passed 1:1.
+ */
+ enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
+
/**************************************************************************
* Command submission.
*
return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
}
+static enum radeon_bo_flag amdgpu_bo_get_flags(
+ struct pb_buffer *buf)
+{
+ return ((struct amdgpu_winsys_bo*)buf)->flags;
+}
+
static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
{
for (unsigned i = 0; i < bo->num_fences; ++i)
bo->va = va;
bo->u.real.va_handle = va_handle;
bo->initial_domain = initial_domain;
+ bo->flags = flags;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
if (initial_domain & RADEON_DOMAIN_VRAM)
amdgpu_va_handle va_handle = NULL;
struct amdgpu_bo_info info = {0};
enum radeon_bo_domain initial = 0;
+ enum radeon_bo_flag flags = 0;
int r;
switch (whandle->type) {
initial |= RADEON_DOMAIN_VRAM;
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
initial |= RADEON_DOMAIN_GTT;
+ if (info.alloc_flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ flags |= RADEON_FLAG_NO_CPU_ACCESS;
+ if (info.alloc_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ flags |= RADEON_FLAG_GTT_WC;
/* Initialize the structure. */
simple_mtx_init(&bo->lock, mtx_plain);
bo->va = va;
bo->u.real.va_handle = va_handle;
bo->initial_domain = initial;
+ bo->flags = flags;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
bo->is_shared = true;
ws->base.buffer_commit = amdgpu_bo_sparse_commit;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
+ ws->base.buffer_get_flags = amdgpu_bo_get_flags;
}