util: remove LIST_INITHEAD macro
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
index 09cf924775514e232dbab63e4dafdffd56925249..edef12ccf23ea93fbe5cb1fb913e78174d9b6993 100644 (file)
@@ -50,12 +50,6 @@ struct amdgpu_sparse_backing_chunk {
    uint32_t begin, end;
 };
 
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
-                 uint64_t size,
-                 unsigned alignment,
-                 enum radeon_bo_domain domain,
-                 enum radeon_bo_flag flags);
 static void amdgpu_bo_unmap(struct pb_buffer *buf);
 
 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
@@ -251,9 +245,9 @@ static bool amdgpu_bo_do_map(struct amdgpu_winsys_bo *bo, void **cpu)
    return true;
 }
 
-static void *amdgpu_bo_map(struct pb_buffer *buf,
-                           struct radeon_cmdbuf *rcs,
-                           enum pipe_transfer_usage usage)
+void *amdgpu_bo_map(struct pb_buffer *buf,
+                    struct radeon_cmdbuf *rcs,
+                    enum pipe_transfer_usage usage)
 {
    struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
    struct amdgpu_winsys_bo *real;
@@ -658,7 +652,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
    }
    assert(slab_size != 0);
 
-   slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
+   slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
                                                     slab_size, slab_size,
                                                     domains, flags));
    if (!slab->buffer)
@@ -670,7 +664,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
    if (!slab->entries)
       goto fail_buffer;
 
-   LIST_INITHEAD(&slab->base.free);
+   list_inithead(&slab->base.free);
 
    base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
 
@@ -833,7 +827,7 @@ sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_
                   bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
       size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
 
-      buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
+      buf = amdgpu_bo_create(bo->ws, size, RADEON_SPARSE_PAGE_SIZE,
                              bo->initial_domain,
                              bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
       if (!buf) {
@@ -1023,7 +1017,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
    if (!bo->u.sparse.commitments)
       goto error_alloc_commitments;
 
-   LIST_INITHEAD(&bo->u.sparse.backing);
+   list_inithead(&bo->u.sparse.backing);
 
    /* For simplicity, we always map a multiple of the page size. */
    map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
@@ -1298,22 +1292,24 @@ static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
    amdgpu_bo_set_metadata(bo->bo, &metadata);
 }
 
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
+struct pb_buffer *
+amdgpu_bo_create(struct amdgpu_winsys *ws,
                  uint64_t size,
                  unsigned alignment,
                  enum radeon_bo_domain domain,
                  enum radeon_bo_flag flags)
 {
-   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
    struct amdgpu_winsys_bo *bo;
    int heap = -1;
 
+   if (domain & (RADEON_DOMAIN_GDS | RADEON_DOMAIN_OA))
+      flags |= RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_SUBALLOC;
+
    /* VRAM implies WC. This is not optional. */
    assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
 
-   /* NO_CPU_ACCESS is valid with VRAM only. */
-   assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
+   /* NO_CPU_ACCESS is not valid with GTT. */
+   assert(!(domain & RADEON_DOMAIN_GTT) || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
 
    /* Sparse buffers must have NO_CPU_ACCESS set. */
    assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
@@ -1399,11 +1395,20 @@ no_slab:
    return &bo->base;
 }
 
+static struct pb_buffer *
+amdgpu_buffer_create(struct radeon_winsys *ws,
+                     uint64_t size,
+                     unsigned alignment,
+                     enum radeon_bo_domain domain,
+                     enum radeon_bo_flag flags)
+{
+   return amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
+                           flags);
+}
+
 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
                                                struct winsys_handle *whandle,
-                                               unsigned vm_alignment,
-                                               unsigned *stride,
-                                               unsigned *offset)
+                                               unsigned vm_alignment)
 {
    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
    struct amdgpu_winsys_bo *bo = NULL;
@@ -1426,11 +1431,6 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
       return NULL;
    }
 
-   if (stride)
-      *stride = whandle->stride;
-   if (offset)
-      *offset = whandle->offset;
-
    r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
    if (r)
       return NULL;
@@ -1517,11 +1517,11 @@ error:
    return NULL;
 }
 
-static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
-                                 unsigned stride, unsigned offset,
-                                 unsigned slice_size,
+static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
+                                 struct pb_buffer *buffer,
                                  struct winsys_handle *whandle)
 {
+   struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
    struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
    struct amdgpu_winsys *ws = bo->ws;
    enum amdgpu_bo_handle_type type;
@@ -1537,12 +1537,10 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
    case WINSYS_HANDLE_TYPE_SHARED:
       type = amdgpu_bo_handle_type_gem_flink_name;
       break;
+   case WINSYS_HANDLE_TYPE_KMS:
    case WINSYS_HANDLE_TYPE_FD:
       type = amdgpu_bo_handle_type_dma_buf_fd;
       break;
-   case WINSYS_HANDLE_TYPE_KMS:
-      type = amdgpu_bo_handle_type_kms;
-      break;
    default:
       return false;
    }
@@ -1551,13 +1549,20 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
    if (r)
       return false;
 
+   if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
+      int dma_fd = whandle->handle;
+
+      r = drmPrimeFDToHandle(sws->fd, dma_fd, &whandle->handle);
+      close(dma_fd);
+
+      if (r)
+         return false;
+   }
+
    simple_mtx_lock(&ws->bo_export_table_lock);
    util_hash_table_set(ws->bo_export_table, bo->bo, bo);
    simple_mtx_unlock(&ws->bo_export_table_lock);
 
-   whandle->stride = stride;
-   whandle->offset = offset;
-   whandle->offset += slice_size * whandle->layer;
    bo->is_shared = true;
    return true;
 }
@@ -1642,14 +1647,14 @@ static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
    return ((struct amdgpu_winsys_bo*)buf)->va;
 }
 
-void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
 {
    ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
    ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
    ws->base.buffer_map = amdgpu_bo_map;
    ws->base.buffer_unmap = amdgpu_bo_unmap;
    ws->base.buffer_wait = amdgpu_bo_wait;
-   ws->base.buffer_create = amdgpu_bo_create;
+   ws->base.buffer_create = amdgpu_buffer_create;
    ws->base.buffer_from_handle = amdgpu_bo_from_handle;
    ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
    ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;