We'll use it in the upcoming mapping change. Sparse buffers have always
had one.
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
ws->num_mapped_buffers--;
}
ws->num_mapped_buffers--;
}
+ simple_mtx_destroy(&bo->lock);
if (r)
goto error_va_map;
if (r)
goto error_va_map;
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
bo->base.usage = 0;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
bo->base.usage = 0;
for (unsigned i = 0; i < slab->base.num_entries; ++i) {
struct amdgpu_winsys_bo *bo = &slab->entries[i];
for (unsigned i = 0; i < slab->base.num_entries; ++i) {
struct amdgpu_winsys_bo *bo = &slab->entries[i];
+ simple_mtx_init(&bo->lock, mtx_plain);
bo->base.alignment = entry_size;
bo->base.usage = slab->buffer->base.usage;
bo->base.size = entry_size;
bo->base.alignment = entry_size;
bo->base.usage = slab->buffer->base.usage;
bo->base.size = entry_size;
{
struct amdgpu_slab *slab = amdgpu_slab(pslab);
{
struct amdgpu_slab *slab = amdgpu_slab(pslab);
- for (unsigned i = 0; i < slab->base.num_entries; ++i)
+ for (unsigned i = 0; i < slab->base.num_entries; ++i) {
amdgpu_bo_remove_fences(&slab->entries[i]);
amdgpu_bo_remove_fences(&slab->entries[i]);
+ simple_mtx_destroy(&slab->entries[i].lock);
+ }
FREE(slab->entries);
amdgpu_winsys_bo_reference(&slab->buffer, NULL);
FREE(slab->entries);
amdgpu_winsys_bo_reference(&slab->buffer, NULL);
}
amdgpu_va_range_free(bo->u.sparse.va_handle);
}
amdgpu_va_range_free(bo->u.sparse.va_handle);
- simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
FREE(bo->u.sparse.commitments);
+ simple_mtx_destroy(&bo->lock);
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
bo->base.size = size;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
bo->base.size = size;
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
LIST_INITHEAD(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
LIST_INITHEAD(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
error_va_map:
amdgpu_va_range_free(bo->u.sparse.va_handle);
error_va_alloc:
error_va_map:
amdgpu_va_range_free(bo->u.sparse.va_handle);
error_va_alloc:
- simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
error_alloc_commitments:
FREE(bo->u.sparse.commitments);
error_alloc_commitments:
+ simple_mtx_destroy(&bo->lock);
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
#if DEBUG_SPARSE_COMMITS
sparse_dump(bo, __func__);
#if DEBUG_SPARSE_COMMITS
sparse_dump(bo, __func__);
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
initial |= RADEON_DOMAIN_GTT;
/* Initialize the structure. */
initial |= RADEON_DOMAIN_GTT;
/* Initialize the structure. */
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->bo = result.buf_handle;
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->bo = result.buf_handle;
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
+ simple_mtx_init(&bo->lock, mtx_plain);
bo->bo = buf_handle;
bo->base.alignment = 0;
bo->base.size = size;
bo->bo = buf_handle;
bo->base.alignment = 0;
bo->base.size = size;
struct amdgpu_winsys_bo *real;
} slab;
struct {
struct amdgpu_winsys_bo *real;
} slab;
struct {
- simple_mtx_t commit_lock;
amdgpu_va_handle va_handle;
enum radeon_bo_flag flags;
amdgpu_va_handle va_handle;
enum radeon_bo_flag flags;
amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
bool sparse;
amdgpu_bo_handle bo; /* NULL for slab entries and sparse buffers */
bool sparse;
uint32_t unique_id;
uint64_t va;
enum radeon_bo_domain initial_domain;
uint32_t unique_id;
uint64_t va;
enum radeon_bo_domain initial_domain;
unsigned max_fences;
struct pipe_fence_handle **fences;
unsigned max_fences;
struct pipe_fence_handle **fences;
/* We delay adding the backing buffers until we really have to. However,
* we cannot delay accounting for memory use.
*/
/* We delay adding the backing buffers until we really have to. However,
* we cannot delay accounting for memory use.
*/
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
acs->main.base.used_gart += backing->bo->base.size;
}
acs->main.base.used_gart += backing->bo->base.size;
}
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
struct amdgpu_winsys_bo *bo = buffer->bo;
struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
struct amdgpu_winsys_bo *bo = buffer->bo;
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
/* We can directly add the buffer here, because we know that each
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
/* We can directly add the buffer here, because we know that each
int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
if (idx < 0) {
fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
if (idx < 0) {
fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
p_atomic_inc(&backing->bo->num_active_ioctls);
}
p_atomic_inc(&backing->bo->num_active_ioctls);
}
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);