#include <stdio.h>
#include <inttypes.h>
+
+struct amdgpu_sparse_backing_chunk {
+ uint32_t begin, end;
+};
+
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
uint64_t size,
void *cpu = NULL;
uint64_t offset = 0;
+ assert(!bo->sparse);
+
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
struct amdgpu_winsys_bo *real;
+ assert(!bo->sparse);
+
if (bo->user_ptr)
return;
#include "pipebuffer/pb_slab.h"
+struct amdgpu_sparse_backing_chunk;
+
+/*
+ * Sub-allocation information for a real buffer used as backing memory of a
+ * sparse buffer.
+ */
+struct amdgpu_sparse_backing {
+ struct list_head list;
+
+ struct amdgpu_winsys_bo *bo;
+
+ /* Sorted list of free chunks. */
+ struct amdgpu_sparse_backing_chunk *chunks;
+ uint32_t max_chunks;
+ uint32_t num_chunks;
+};
+
+struct amdgpu_sparse_commitment {
+ struct amdgpu_sparse_backing *backing;
+ uint32_t page;
+};
+
struct amdgpu_winsys_bo {
struct pb_buffer base;
union {
struct pb_slab_entry entry;
struct amdgpu_winsys_bo *real;
} slab;
+ struct {
+ mtx_t commit_lock;
+ amdgpu_va_handle va_handle;
+ enum radeon_bo_flag flags;
+
+ uint32_t num_va_pages;
+ uint32_t num_backing_pages;
+
+ struct list_head backing;
+
+ /* Commitment information for each page of the virtual memory area. */
+ struct amdgpu_sparse_commitment *commitments;
+ } sparse;
} u;
struct amdgpu_winsys *ws;
void *user_ptr; /* from buffer_from_ptr */
- amdgpu_bo_handle bo; /* NULL for slab entries */
+ amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
+ bool sparse;
uint32_t unique_id;
uint64_t va;
enum radeon_bo_domain initial_domain;