* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
-/*
- * Authors:
- * Marek Olšák <maraeo@gmail.com>
- */
#include "amdgpu_cs.h"
#include "os/os_time.h"
/* We delay adding the backing buffers until we really have to. However,
* we cannot delay accounting for memory use.
*/
- mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->u.sparse.commit_lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
acs->main.base.used_gart += backing->bo->base.size;
}
- mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->u.sparse.commit_lock);
return idx;
}
return index;
}
-static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
+static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
+ enum ring_type ring_type)
{
struct pb_buffer *pb;
uint8_t *mapped;
pb = ws->base.buffer_create(&ws->base, buffer_size,
ws->info.gart_page_size,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ (ring_type == RING_GFX ||
+ ring_type == RING_COMPUTE ||
+ ring_type == RING_DMA ?
+ RADEON_FLAG_GTT_WC : 0));
if (!pb)
return false;
/* Allocate a new buffer for IBs if the current buffer is all used. */
if (!ib->big_ib_buffer ||
ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- if (!amdgpu_ib_new_buffer(aws, ib))
+ if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
return false;
}
rcs->max_prev = new_max_prev;
}
- if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
+ if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib, cs->ring_type))
return false;
assert(ib->used_ib_space == 0);
struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
struct amdgpu_winsys_bo *bo = buffer->bo;
- mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->u.sparse.commit_lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
/* We can directly add the buffer here, because we know that each
int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
if (idx < 0) {
fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
- mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->u.sparse.commit_lock);
return false;
}
p_atomic_inc(&backing->bo->num_active_ioctls);
}
- mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->u.sparse.commit_lock);
}
return true;
amdgpu_bo_handle *handles;
unsigned num = 0;
- mtx_lock(&ws->global_bo_list_lock);
+ simple_mtx_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
- mtx_unlock(&ws->global_bo_list_lock);
+ simple_mtx_unlock(&ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
cs->error_code = -ENOMEM;
return;
r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
handles, NULL, &bo_list);
free(handles);
- mtx_unlock(&ws->global_bo_list_lock);
+ simple_mtx_unlock(&ws->global_bo_list_lock);
} else {
unsigned num_handles;
while (rcs->current.cdw & 7)
radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
}
+ ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
break;
case RING_UVD:
while (rcs->current.cdw & 15)
* that the order of fence dependency updates matches the order of
* submissions.
*/
- mtx_lock(&ws->bo_fence_lock);
+ simple_mtx_lock(&ws->bo_fence_lock);
amdgpu_add_fence_dependencies_bo_lists(cs);
/* Swap command streams. "cst" is going to be submitted. */
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
amdgpu_cs_submit_ib, NULL);
/* The submission has been queued, unlock the fence now. */
- mtx_unlock(&ws->bo_fence_lock);
+ simple_mtx_unlock(&ws->bo_fence_lock);
if (!(flags & RADEON_FLUSH_ASYNC)) {
amdgpu_cs_sync_flush(rcs);