* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
-/*
- * Authors:
- * Marek Olšák <maraeo@gmail.com>
- */
#include "amdgpu_cs.h"
-#include "os/os_time.h"
+#include "util/os_time.h"
+#include <inttypes.h>
#include <stdio.h>
-#include <amdgpu_drm.h>
#include "amd/common/sid.h"
struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
fence->reference.count = 1;
+ fence->ws = ctx->ws;
fence->ctx = ctx;
fence->fence.context = ctx->ctx;
fence->fence.ip_type = ip_type;
fence->fence.ip_instance = ip_instance;
fence->fence.ring = ring;
- fence->submission_in_progress = true;
+ util_queue_fence_init(&fence->submitted);
+ util_queue_fence_reset(&fence->submitted);
p_atomic_inc(&ctx->refcount);
return (struct pipe_fence_handle *)fence;
}
+static struct pipe_fence_handle *
+amdgpu_fence_import_syncobj(struct radeon_winsys *rws, int fd)
+{
+ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
+ struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
+ int r;
+
+ if (!fence)
+ return NULL;
+
+ pipe_reference_init(&fence->reference, 1);
+ fence->ws = ws;
+
+ r = amdgpu_cs_import_syncobj(ws->dev, fd, &fence->syncobj);
+ if (r) {
+ FREE(fence);
+ return NULL;
+ }
+
+ util_queue_fence_init(&fence->submitted);
+
+ assert(amdgpu_fence_is_syncobj(fence));
+ return (struct pipe_fence_handle*)fence;
+}
+
+static struct pipe_fence_handle *
+amdgpu_fence_import_sync_file(struct radeon_winsys *rws, int fd)
+{
+ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
+ struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
+
+ if (!fence)
+ return NULL;
+
+ pipe_reference_init(&fence->reference, 1);
+ fence->ws = ws;
+ /* fence->ctx == NULL means that the fence is syncobj-based. */
+
+ /* Convert sync_file into syncobj. */
+ int r = amdgpu_cs_create_syncobj(ws->dev, &fence->syncobj);
+ if (r) {
+ FREE(fence);
+ return NULL;
+ }
+
+ r = amdgpu_cs_syncobj_import_sync_file(ws->dev, fence->syncobj, fd);
+ if (r) {
+ amdgpu_cs_destroy_syncobj(ws->dev, fence->syncobj);
+ FREE(fence);
+ return NULL;
+ }
+
+ util_queue_fence_init(&fence->submitted);
+
+ return (struct pipe_fence_handle*)fence;
+}
+
+static int amdgpu_fence_export_sync_file(struct radeon_winsys *rws,
+ struct pipe_fence_handle *pfence)
+{
+ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
+ struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
+
+ if (amdgpu_fence_is_syncobj(fence)) {
+ int fd, r;
+
+ /* Convert syncobj into sync_file. */
+ r = amdgpu_cs_syncobj_export_sync_file(ws->dev, fence->syncobj, &fd);
+ return r ? -1 : fd;
+ }
+
+ util_queue_fence_wait(&fence->submitted);
+
+ /* Convert the amdgpu fence into a fence FD. */
+ int fd;
+ if (amdgpu_cs_fence_to_handle(ws->dev, &fence->fence,
+ AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD,
+ (uint32_t*)&fd))
+ return -1;
+
+ return fd;
+}
+
+static int amdgpu_export_signalled_sync_file(struct radeon_winsys *rws)
+{
+ struct amdgpu_winsys *ws = amdgpu_winsys(rws);
+ uint32_t syncobj;
+ int fd = -1;
+
+ int r = amdgpu_cs_create_syncobj2(ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED,
+ &syncobj);
+ if (r) {
+ return -1;
+ }
+
+ r = amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, &fd);
+ if (r) {
+ fd = -1;
+ }
+
+ amdgpu_cs_destroy_syncobj(ws->dev, syncobj);
+ return fd;
+}
+
static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
- struct amdgpu_cs_request* request,
- uint64_t *user_fence_cpu_address)
+ uint64_t seq_no,
+ uint64_t *user_fence_cpu_address)
{
- struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
+ struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
- rfence->fence.fence = request->seq_no;
- rfence->user_fence_cpu_address = user_fence_cpu_address;
- rfence->submission_in_progress = false;
+ afence->fence.fence = seq_no;
+ afence->user_fence_cpu_address = user_fence_cpu_address;
+ util_queue_fence_signal(&afence->submitted);
}
static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
{
- struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
+ struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
- rfence->signalled = true;
- rfence->submission_in_progress = false;
+ afence->signalled = true;
+ util_queue_fence_signal(&afence->submitted);
}
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
bool absolute)
{
- struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
+ struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
uint32_t expired;
int64_t abs_timeout;
uint64_t *user_fence_cpu;
int r;
- if (rfence->signalled)
+ if (afence->signalled)
return true;
if (absolute)
else
abs_timeout = os_time_get_absolute_timeout(timeout);
+ /* Handle syncobjs. */
+ if (amdgpu_fence_is_syncobj(afence)) {
+ if (amdgpu_cs_syncobj_wait(afence->ws->dev, &afence->syncobj, 1,
+ abs_timeout, 0, NULL))
+ return false;
+
+ afence->signalled = true;
+ return true;
+ }
+
/* The fence might not have a number assigned if its IB is being
* submitted in the other thread right now. Wait until the submission
* is done. */
- if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
- abs_timeout))
+ if (!util_queue_fence_wait_timeout(&afence->submitted, abs_timeout))
return false;
- user_fence_cpu = rfence->user_fence_cpu_address;
+ user_fence_cpu = afence->user_fence_cpu_address;
if (user_fence_cpu) {
- if (*user_fence_cpu >= rfence->fence.fence) {
- rfence->signalled = true;
+ if (*user_fence_cpu >= afence->fence.fence) {
+ afence->signalled = true;
return true;
}
}
/* Now use the libdrm query. */
- r = amdgpu_cs_query_fence_status(&rfence->fence,
+ r = amdgpu_cs_query_fence_status(&afence->fence,
abs_timeout,
AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
&expired);
if (expired) {
/* This variable can only transition from false to true, so it doesn't
* matter if threads race for it. */
- rfence->signalled = true;
+ afence->signalled = true;
return true;
}
return false;
}
static struct pipe_fence_handle *
-amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
+amdgpu_cs_get_next_fence(struct radeon_cmdbuf *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct pipe_fence_handle *fence = NULL;
}
fence = amdgpu_fence_create(cs->ctx,
- cs->csc->request.ip_type,
- cs->csc->request.ip_instance,
- cs->csc->request.ring);
+ cs->csc->ib[IB_MAIN].ip_type,
+ cs->csc->ib[IB_MAIN].ip_instance,
+ cs->csc->ib[IB_MAIN].ring);
if (!fence)
return NULL;
ctx->ws = amdgpu_winsys(ws);
ctx->refcount = 1;
+ ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
if (r) {
amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
- uint32_t result, hangs;
int r;
- r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
- return PIPE_NO_RESET;
+ /* Return a failure due to a GPU hang. */
+ if (ctx->ws->info.drm_minor >= 24) {
+ uint64_t flags;
+
+ r = amdgpu_cs_query_reset_state2(ctx->ctx, &flags);
+ if (r) {
+ fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
+ return PIPE_NO_RESET;
+ }
+
+ if (flags & AMDGPU_CTX_QUERY2_FLAGS_RESET) {
+ if (flags & AMDGPU_CTX_QUERY2_FLAGS_GUILTY)
+ return PIPE_GUILTY_CONTEXT_RESET;
+ else
+ return PIPE_INNOCENT_CONTEXT_RESET;
+ }
+ } else {
+ uint32_t result, hangs;
+
+ r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
+ if (r) {
+ fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
+ return PIPE_NO_RESET;
+ }
+
+ switch (result) {
+ case AMDGPU_CTX_GUILTY_RESET:
+ return PIPE_GUILTY_CONTEXT_RESET;
+ case AMDGPU_CTX_INNOCENT_RESET:
+ return PIPE_INNOCENT_CONTEXT_RESET;
+ case AMDGPU_CTX_UNKNOWN_RESET:
+ return PIPE_UNKNOWN_CONTEXT_RESET;
+ }
}
- switch (result) {
- case AMDGPU_CTX_GUILTY_RESET:
- return PIPE_GUILTY_CONTEXT_RESET;
- case AMDGPU_CTX_INNOCENT_RESET:
- return PIPE_INNOCENT_CONTEXT_RESET;
- case AMDGPU_CTX_UNKNOWN_RESET:
- return PIPE_UNKNOWN_CONTEXT_RESET;
- case AMDGPU_CTX_NO_RESET:
- default:
- return PIPE_NO_RESET;
+ /* Return a failure due to a rejected command submission. */
+ if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
+ return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
+ PIPE_INNOCENT_CONTEXT_RESET;
}
+ return PIPE_NO_RESET;
}
/* COMMAND SUBMISSION */
static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
{
- return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
- cs->request.ip_type != AMDGPU_HW_IP_VCE;
+ return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&
+ cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&
+ cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD_ENC &&
+ cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC &&
+ cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_ENC &&
+ cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_JPEG;
}
static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
{
- return cs->ctx->ws->info.chip_class >= CIK &&
- cs->ring_type == RING_GFX;
+ return cs->ctx->ws->info.chip_class >= GFX7 &&
+ (cs->ring_type == RING_GFX || cs->ring_type == RING_COMPUTE);
}
-static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
+static unsigned amdgpu_cs_epilog_dws(struct amdgpu_cs *cs)
{
- if (ring_type == RING_GFX)
+ if (amdgpu_cs_has_chaining(cs))
return 4; /* for chaining */
return 0;
if (bo->bo) {
buffers = cs->real_buffers;
num_buffers = cs->num_real_buffers;
- } else {
+ } else if (!bo->sparse) {
buffers = cs->slab_buffers;
num_buffers = cs->num_slab_buffers;
+ } else {
+ buffers = cs->sparse_buffers;
+ num_buffers = cs->num_sparse_buffers;
}
/* not found or found */
}
static int
-amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
+amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
{
- struct amdgpu_cs_context *cs = acs->csc;
struct amdgpu_cs_buffer *buffer;
- unsigned hash;
- int idx = amdgpu_lookup_buffer(cs, bo);
-
- if (idx >= 0)
- return idx;
+ int idx;
/* New buffer, check if the backing array is large enough. */
if (cs->num_real_buffers >= cs->max_real_buffers) {
unsigned new_max =
MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
struct amdgpu_cs_buffer *new_buffers;
- amdgpu_bo_handle *new_handles;
- uint8_t *new_flags;
new_buffers = MALLOC(new_max * sizeof(*new_buffers));
- new_handles = MALLOC(new_max * sizeof(*new_handles));
- new_flags = MALLOC(new_max * sizeof(*new_flags));
- if (!new_buffers || !new_handles || !new_flags) {
- fprintf(stderr, "amdgpu_lookup_or_add_buffer: allocation failed\n");
+ if (!new_buffers) {
+ fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
FREE(new_buffers);
- FREE(new_handles);
- FREE(new_flags);
return -1;
}
memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
- memcpy(new_handles, cs->handles, cs->num_real_buffers * sizeof(*new_handles));
- memcpy(new_flags, cs->flags, cs->num_real_buffers * sizeof(*new_flags));
FREE(cs->real_buffers);
- FREE(cs->handles);
- FREE(cs->flags);
cs->max_real_buffers = new_max;
cs->real_buffers = new_buffers;
- cs->handles = new_handles;
- cs->flags = new_flags;
}
idx = cs->num_real_buffers;
memset(buffer, 0, sizeof(*buffer));
amdgpu_winsys_bo_reference(&buffer->bo, bo);
- cs->handles[idx] = bo->bo;
- cs->flags[idx] = 0;
p_atomic_inc(&bo->num_cs_references);
cs->num_real_buffers++;
+ return idx;
+}
+
+static int
+amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
+{
+ struct amdgpu_cs_context *cs = acs->csc;
+ unsigned hash;
+ int idx = amdgpu_lookup_buffer(cs, bo);
+
+ if (idx >= 0)
+ return idx;
+
+ idx = amdgpu_do_add_real_buffer(cs, bo);
+
hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
cs->buffer_indices_hashlist[hash] = idx;
return idx;
}
-static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
+static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
+ struct amdgpu_winsys_bo *bo)
+{
+ struct amdgpu_cs_context *cs = acs->csc;
+ struct amdgpu_cs_buffer *buffer;
+ unsigned hash;
+ int idx = amdgpu_lookup_buffer(cs, bo);
+
+ if (idx >= 0)
+ return idx;
+
+ /* New buffer, check if the backing array is large enough. */
+ if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {
+ unsigned new_max =
+ MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));
+ struct amdgpu_cs_buffer *new_buffers;
+
+ new_buffers = REALLOC(cs->sparse_buffers,
+ cs->max_sparse_buffers * sizeof(*new_buffers),
+ new_max * sizeof(*new_buffers));
+ if (!new_buffers) {
+ fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
+ return -1;
+ }
+
+ cs->max_sparse_buffers = new_max;
+ cs->sparse_buffers = new_buffers;
+ }
+
+ idx = cs->num_sparse_buffers;
+ buffer = &cs->sparse_buffers[idx];
+
+ memset(buffer, 0, sizeof(*buffer));
+ amdgpu_winsys_bo_reference(&buffer->bo, bo);
+ p_atomic_inc(&bo->num_cs_references);
+ cs->num_sparse_buffers++;
+
+ hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
+ cs->buffer_indices_hashlist[hash] = idx;
+
+ /* We delay adding the backing buffers until we really have to. However,
+ * we cannot delay accounting for memory use.
+ */
+ simple_mtx_lock(&bo->lock);
+
+ list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
+ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
+ acs->main.base.used_vram += backing->bo->base.size;
+ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
+ acs->main.base.used_gart += backing->bo->base.size;
+ }
+
+ simple_mtx_unlock(&bo->lock);
+
+ return idx;
+}
+
+static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,
struct pb_buffer *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
struct amdgpu_cs_buffer *buffer;
int index;
- if (!bo->bo) {
- index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
- if (index < 0)
- return 0;
-
- buffer = &cs->slab_buffers[index];
- buffer->usage |= usage;
+ /* Fast exit for no-op calls.
+ * This is very effective with suballocators and linear uploaders that
+ * are outside of the winsys.
+ */
+ if (bo == cs->last_added_bo &&
+ (usage & cs->last_added_bo_usage) == usage &&
+ (1u << priority) & cs->last_added_bo_priority_usage)
+ return cs->last_added_bo_index;
+
+ if (!bo->sparse) {
+ if (!bo->bo) {
+ index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
+ if (index < 0)
+ return 0;
+
+ buffer = &cs->slab_buffers[index];
+ buffer->usage |= usage;
+
+ usage &= ~RADEON_USAGE_SYNCHRONIZED;
+ index = buffer->u.slab.real_idx;
+ } else {
+ index = amdgpu_lookup_or_add_real_buffer(acs, bo);
+ if (index < 0)
+ return 0;
+ }
- usage &= ~RADEON_USAGE_SYNCHRONIZED;
- index = buffer->u.slab.real_idx;
+ buffer = &cs->real_buffers[index];
} else {
- index = amdgpu_lookup_or_add_real_buffer(acs, bo);
+ index = amdgpu_lookup_or_add_sparse_buffer(acs, bo);
if (index < 0)
return 0;
+
+ buffer = &cs->sparse_buffers[index];
}
- buffer = &cs->real_buffers[index];
- buffer->u.real.priority_usage |= 1llu << priority;
+ buffer->u.real.priority_usage |= 1u << priority;
buffer->usage |= usage;
- cs->flags[index] = MAX2(cs->flags[index], priority / 4);
+
+ cs->last_added_bo = bo;
+ cs->last_added_bo_index = index;
+ cs->last_added_bo_usage = buffer->usage;
+ cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
return index;
}
-static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
+static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
+ struct amdgpu_ib *ib,
+ enum ring_type ring_type)
{
struct pb_buffer *pb;
uint8_t *mapped;
else
buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
- buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
+ const unsigned min_size = MAX2(ib->max_check_space_size, 8 * 1024 * 4);
+ const unsigned max_size = 512 * 1024 * 4;
- switch (ib->ib_type) {
- case IB_CONST_PREAMBLE:
- buffer_size = MAX2(buffer_size, 4 * 1024);
- break;
- case IB_CONST:
- buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
- break;
- case IB_MAIN:
- buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
- break;
- default:
- unreachable("unhandled IB type");
- }
+ buffer_size = MIN2(buffer_size, max_size);
+ buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */
- pb = ws->base.buffer_create(&ws->base, buffer_size,
- ws->info.gart_page_size,
- RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ pb = amdgpu_bo_create(ws, buffer_size,
+ ws->info.gart_page_size,
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ (ring_type == RING_GFX ||
+ ring_type == RING_COMPUTE ||
+ ring_type == RING_DMA ?
+ RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
if (!pb)
return false;
- mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
+ mapped = amdgpu_bo_map(pb, NULL, PIPE_TRANSFER_WRITE);
if (!mapped) {
pb_reference(&pb, NULL);
return false;
static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
{
+ /* The maximum IB size including all chained IBs. */
switch (ib_type) {
case IB_MAIN:
/* Smaller submits means the GPU gets busy sooner and there is less
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
*/
return 20 * 1024;
- case IB_CONST_PREAMBLE:
- case IB_CONST:
- /* There isn't really any reason to limit CE IB size beyond the natural
- * limit implied by the main IB, except perhaps GTT size. Just return
- * an extremely large value that we never get anywhere close to.
- */
- return 16 * 1024 * 1024;
+ case IB_PARALLEL_COMPUTE:
+ /* Always chain this IB. */
+ return UINT_MAX;
default:
unreachable("bad ib_type");
}
}
-static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
+static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws, struct amdgpu_cs *cs,
enum ib_type ib_type)
{
- struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
/* Small IBs are better than big IBs, because the GPU goes idle quicker
* and there is less waiting for buffers and fences. Proof:
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
*/
struct amdgpu_ib *ib = NULL;
- struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
- unsigned ib_size = 0;
+ struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib_type];
+ /* This is the minimum size of a contiguous IB. */
+ unsigned ib_size = 4 * 1024 * 4;
switch (ib_type) {
- case IB_CONST_PREAMBLE:
- ib = &cs->const_preamble_ib;
- ib_size = 256 * 4;
- break;
- case IB_CONST:
- ib = &cs->const_ib;
- ib_size = 8 * 1024 * 4;
+ case IB_PARALLEL_COMPUTE:
+ ib = &cs->compute_ib;
break;
case IB_MAIN:
ib = &cs->main;
- ib_size = 4 * 1024 * 4;
break;
default:
unreachable("unhandled IB type");
}
+ /* Always allocate at least the size of the biggest cs_check_space call,
+ * because precisely the last call might have requested this size.
+ */
+ ib_size = MAX2(ib_size, ib->max_check_space_size);
+
if (!amdgpu_cs_has_chaining(cs)) {
ib_size = MAX2(ib_size,
4 * MIN2(util_next_power_of_two(ib->max_ib_size),
/* Allocate a new buffer for IBs if the current buffer is all used. */
if (!ib->big_ib_buffer ||
ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- if (!amdgpu_ib_new_buffer(aws, ib))
+ if (!amdgpu_ib_new_buffer(ws, ib, cs->ring_type))
return false;
}
- info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
- ib->used_ib_space;
- info->size = 0;
- ib->ptr_ib_size = &info->size;
+ info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;
+ info->ib_bytes = 0;
+ /* ib_bytes is in dwords and the conversion to bytes will be done before
+ * the CS ioctl. */
+ ib->ptr_ib_size = &info->ib_bytes;
+ ib->ptr_ib_size_inside_ib = false;
amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
- ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
+ ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs);
+ assert(ib->base.current.max_dw >= ib->max_check_space_size / 4);
+ ib->base.gpu_address = info->va_start;
return true;
}
-static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
+static void amdgpu_set_ib_size(struct amdgpu_ib *ib)
{
- *ib->ptr_ib_size |= ib->base.current.cdw;
+ if (ib->ptr_ib_size_inside_ib) {
+ *ib->ptr_ib_size = ib->base.current.cdw |
+ S_3F2_CHAIN(1) | S_3F2_VALID(1);
+ } else {
+ *ib->ptr_ib_size = ib->base.current.cdw;
+ }
+}
+
+static void amdgpu_ib_finalize(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
+{
+ amdgpu_set_ib_size(ib);
ib->used_ib_space += ib->base.current.cdw * 4;
+ ib->used_ib_space = align(ib->used_ib_space, ws->info.ib_alignment);
ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
}
-static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
+static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,
+ struct amdgpu_cs_context *cs,
enum ring_type ring_type)
{
- int i;
-
switch (ring_type) {
case RING_DMA:
- cs->request.ip_type = AMDGPU_HW_IP_DMA;
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
break;
case RING_UVD:
- cs->request.ip_type = AMDGPU_HW_IP_UVD;
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
+ break;
+
+ case RING_UVD_ENC:
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD_ENC;
break;
case RING_VCE:
- cs->request.ip_type = AMDGPU_HW_IP_VCE;
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
break;
- case RING_COMPUTE:
- cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
+ case RING_VCN_DEC:
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
break;
- default:
+ case RING_VCN_ENC:
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_ENC;
+ break;
+
+ case RING_VCN_JPEG:
+ cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_JPEG;
+ break;
+
+ case RING_COMPUTE:
case RING_GFX:
- cs->request.ip_type = AMDGPU_HW_IP_GFX;
+ cs->ib[IB_MAIN].ip_type = ring_type == RING_GFX ? AMDGPU_HW_IP_GFX :
+ AMDGPU_HW_IP_COMPUTE;
+
+ /* The kernel shouldn't invalidate L2 and vL1. The proper place for cache
+ * invalidation is the beginning of IBs (the previous commit does that),
+ * because completion of an IB doesn't care about the state of GPU caches,
+ * but the beginning of an IB does. Draw calls from multiple IBs can be
+ * executed in parallel, so draw calls from the current IB can finish after
+ * the next IB starts drawing, and so the cache flush at the end of IB
+ * is always late.
+ */
+ if (ws->info.drm_minor >= 26)
+ cs->ib[IB_MAIN].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
break;
- }
- for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
- cs->buffer_indices_hashlist[i] = -1;
+ default:
+ assert(0);
}
- cs->request.number_of_ibs = 1;
- cs->request.ibs = &cs->ib[IB_MAIN];
-
- cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
- cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
- AMDGPU_IB_FLAG_PREAMBLE;
+ cs->ib[IB_PARALLEL_COMPUTE].ip_type = AMDGPU_HW_IP_COMPUTE;
+ cs->ib[IB_PARALLEL_COMPUTE].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
+ memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
+ cs->last_added_bo = NULL;
return true;
}
+static void cleanup_fence_list(struct amdgpu_fence_list *fences)
+{
+ for (unsigned i = 0; i < fences->num; i++)
+ amdgpu_fence_reference(&fences->list[i], NULL);
+ fences->num = 0;
+}
+
static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
{
unsigned i;
p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
}
+ for (i = 0; i < cs->num_sparse_buffers; i++) {
+ p_atomic_dec(&cs->sparse_buffers[i].bo->num_cs_references);
+ amdgpu_winsys_bo_reference(&cs->sparse_buffers[i].bo, NULL);
+ }
+ cleanup_fence_list(&cs->fence_dependencies);
+ cleanup_fence_list(&cs->syncobj_dependencies);
+ cleanup_fence_list(&cs->syncobj_to_signal);
+ cleanup_fence_list(&cs->compute_fence_dependencies);
+ cleanup_fence_list(&cs->compute_start_fence_dependencies);
cs->num_real_buffers = 0;
cs->num_slab_buffers = 0;
+ cs->num_sparse_buffers = 0;
amdgpu_fence_reference(&cs->fence, NULL);
- for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
- cs->buffer_indices_hashlist[i] = -1;
- }
+ memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
+ cs->last_added_bo = NULL;
}
static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
{
amdgpu_cs_context_cleanup(cs);
- FREE(cs->flags);
FREE(cs->real_buffers);
- FREE(cs->handles);
FREE(cs->slab_buffers);
- FREE(cs->request.dependencies);
+ FREE(cs->sparse_buffers);
+ FREE(cs->fence_dependencies.list);
+ FREE(cs->syncobj_dependencies.list);
+ FREE(cs->syncobj_to_signal.list);
+ FREE(cs->compute_fence_dependencies.list);
+ FREE(cs->compute_start_fence_dependencies.list);
}
-static struct radeon_winsys_cs *
+static struct radeon_cmdbuf *
amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
enum ring_type ring_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
- void *flush_ctx)
+ void *flush_ctx,
+ bool stop_exec_on_failure)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->ring_type = ring_type;
+ cs->stop_exec_on_failure = stop_exec_on_failure;
+
+ struct amdgpu_cs_fence_info fence_info;
+ fence_info.handle = cs->ctx->user_fence_bo;
+ fence_info.offset = cs->ring_type;
+ amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);
cs->main.ib_type = IB_MAIN;
- cs->const_ib.ib_type = IB_CONST;
- cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
+ cs->compute_ib.ib_type = IB_PARALLEL_COMPUTE;
- if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
+ if (!amdgpu_init_cs_context(ctx->ws, &cs->csc1, ring_type)) {
FREE(cs);
return NULL;
}
- if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
+ if (!amdgpu_init_cs_context(ctx->ws, &cs->csc2, ring_type)) {
amdgpu_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
- if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
+ if (!amdgpu_get_new_ib(ctx->ws, cs, IB_MAIN)) {
amdgpu_destroy_cs_context(&cs->csc2);
amdgpu_destroy_cs_context(&cs->csc1);
FREE(cs);
return &cs->main.base;
}
-static struct radeon_winsys_cs *
-amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
+static struct radeon_cmdbuf *
+amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf *ib,
+ bool uses_gds_ordered_append)
{
- struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
+ struct amdgpu_cs *cs = (struct amdgpu_cs*)ib;
struct amdgpu_winsys *ws = cs->ctx->ws;
- /* only one const IB can be added */
- if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
+ if (cs->ring_type != RING_GFX)
return NULL;
- if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
+ /* only one secondary IB can be added */
+ if (cs->compute_ib.ib_mapped)
return NULL;
- cs->csc->request.number_of_ibs = 2;
- cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
-
- cs->cst->request.number_of_ibs = 2;
- cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
+ /* Allocate the compute IB. */
+ if (!amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE))
+ return NULL;
- return &cs->const_ib.base;
+ if (uses_gds_ordered_append) {
+ cs->csc1.ib[IB_PARALLEL_COMPUTE].flags |=
+ AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;
+ cs->csc2.ib[IB_PARALLEL_COMPUTE].flags |=
+ AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;
+ }
+ return &cs->compute_ib.base;
}
-static struct radeon_winsys_cs *
-amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
+static bool
+amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_ib,
+ unsigned preamble_num_dw)
{
- struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
+ struct amdgpu_ib *ib = amdgpu_ib(rcs);
+ struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
struct amdgpu_winsys *ws = cs->ctx->ws;
+ struct amdgpu_cs_context *csc[2] = {&cs->csc1, &cs->csc2};
+ unsigned size = align(preamble_num_dw * 4, ws->info.ib_alignment);
+ struct pb_buffer *preamble_bo;
+ uint32_t *map;
+
+ /* Create the preamble IB buffer. */
+ preamble_bo = amdgpu_bo_create(ws, size, ws->info.ib_alignment,
+ RADEON_DOMAIN_VRAM,
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_GTT_WC |
+ RADEON_FLAG_READ_ONLY);
+ if (!preamble_bo)
+ return false;
- /* only one const preamble IB can be added and only when the const IB has
- * also been mapped */
- if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
- cs->const_preamble_ib.ib_mapped)
- return NULL;
+ map = (uint32_t*)amdgpu_bo_map(preamble_bo, NULL,
+ PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
+ if (!map) {
+ pb_reference(&preamble_bo, NULL);
+ return false;
+ }
- if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
- return NULL;
+ /* Upload the preamble IB. */
+ memcpy(map, preamble_ib, preamble_num_dw * 4);
- cs->csc->request.number_of_ibs = 3;
- cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
+ /* Pad the IB. */
+ uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ring_type];
+ while (preamble_num_dw & ib_pad_dw_mask)
+ map[preamble_num_dw++] = PKT3_NOP_PAD;
+ amdgpu_bo_unmap(preamble_bo);
- cs->cst->request.number_of_ibs = 3;
- cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
+ for (unsigned i = 0; i < 2; i++) {
+ csc[i]->ib[IB_PREAMBLE] = csc[i]->ib[IB_MAIN];
+ csc[i]->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAG_PREAMBLE;
+ csc[i]->ib[IB_PREAMBLE].va_start = amdgpu_winsys_bo(preamble_bo)->va;
+ csc[i]->ib[IB_PREAMBLE].ib_bytes = preamble_num_dw * 4;
- return &cs->const_preamble_ib.base;
+ csc[i]->ib[IB_MAIN].flags |= AMDGPU_IB_FLAG_PREEMPT;
+ }
+
+ assert(!cs->preamble_ib_bo);
+ cs->preamble_ib_bo = preamble_bo;
+
+ amdgpu_cs_add_buffer(rcs, cs->preamble_ib_bo, RADEON_USAGE_READ, 0,
+ RADEON_PRIO_IB1);
+ return true;
}
-static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
+static bool amdgpu_cs_validate(struct radeon_cmdbuf *rcs)
{
return true;
}
-static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
+static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
+ bool force_chaining)
{
struct amdgpu_ib *ib = amdgpu_ib(rcs);
struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
+ unsigned cs_epilog_dw = amdgpu_cs_epilog_dws(cs);
+ unsigned need_byte_size = (dw + cs_epilog_dw) * 4;
uint64_t va;
uint32_t *new_ptr_ib_size;
assert(rcs->current.cdw <= rcs->current.max_dw);
- if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
- return false;
+ /* 125% of the size for IB epilog. */
+ unsigned safe_byte_size = need_byte_size + need_byte_size / 4;
+ ib->max_check_space_size = MAX2(ib->max_check_space_size,
+ safe_byte_size);
- ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
+ /* If force_chaining is true, we can't return. We have to chain. */
+ if (!force_chaining) {
+ if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
+ return false;
- if (rcs->current.max_dw - rcs->current.cdw >= dw)
- return true;
+ ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
+
+ if (rcs->current.max_dw - rcs->current.cdw >= dw)
+ return true;
+ }
- if (!amdgpu_cs_has_chaining(cs))
+ if (!amdgpu_cs_has_chaining(cs)) {
+ assert(!force_chaining);
return false;
+ }
/* Allocate a new chunk */
if (rcs->num_prev >= rcs->max_prev) {
unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
- struct radeon_winsys_cs_chunk *new_prev;
+ struct radeon_cmdbuf_chunk *new_prev;
new_prev = REALLOC(rcs->prev,
sizeof(*new_prev) * rcs->max_prev,
rcs->max_prev = new_max_prev;
}
- if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
+ if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib, cs->ring_type))
return false;
assert(ib->used_ib_space == 0);
va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
/* This space was originally reserved. */
- rcs->current.max_dw += 4;
- assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
+ rcs->current.max_dw += cs_epilog_dw;
- /* Pad with NOPs and add INDIRECT_BUFFER packet */
- while ((rcs->current.cdw & 7) != 4)
- radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ /* Pad with NOPs but leave 4 dwords for INDIRECT_BUFFER. */
+ uint32_t ib_pad_dw_mask = cs->ctx->ws->info.ib_pad_dw_mask[cs->ring_type];
+ while ((rcs->current.cdw & ib_pad_dw_mask) != ib_pad_dw_mask - 3)
+ radeon_emit(rcs, PKT3_NOP_PAD);
- radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
- : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
+ radeon_emit(rcs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
radeon_emit(rcs, va);
radeon_emit(rcs, va >> 32);
- new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
- radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
+ new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw++];
+ assert((rcs->current.cdw & ib_pad_dw_mask) == 0);
assert((rcs->current.cdw & 7) == 0);
assert(rcs->current.cdw <= rcs->current.max_dw);
- *ib->ptr_ib_size |= rcs->current.cdw;
+ amdgpu_set_ib_size(ib);
ib->ptr_ib_size = new_ptr_ib_size;
+ ib->ptr_ib_size_inside_ib = true;
/* Hook up the new chunk */
rcs->prev[rcs->num_prev].buf = rcs->current.buf;
ib->base.current.cdw = 0;
ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
- ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
+ ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - cs_epilog_dw;
+ assert(ib->base.current.max_dw >= ib->max_check_space_size / 4);
+ ib->base.gpu_address = va;
amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
return true;
}
-static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
+static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
struct radeon_bo_list_item *list)
{
struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
return cs->num_real_buffers;
}
-DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
+static void add_fence_to_list(struct amdgpu_fence_list *fences,
+ struct amdgpu_fence *fence)
+{
+ unsigned idx = fences->num++;
+
+ if (idx >= fences->max) {
+ unsigned size;
+ const unsigned increment = 8;
+
+ fences->max = idx + increment;
+ size = fences->max * sizeof(fences->list[0]);
+ fences->list = realloc(fences->list, size);
+ /* Clear the newly-allocated elements. */
+ memset(fences->list + idx, 0,
+ increment * sizeof(fences->list[0]));
+ }
+ amdgpu_fence_reference(&fences->list[idx], (struct pipe_fence_handle*)fence);
+}
+
+static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
+ struct amdgpu_fence *fence)
+{
+ struct amdgpu_cs_context *cs = acs->csc;
+
+ /* Detect no-op dependencies only when there is only 1 ring,
+ * because IBs on one ring are always executed one at a time.
+ *
+ * We always want no dependency between back-to-back gfx IBs, because
+ * we need the parallelism between IBs for good performance.
+ */
+ if ((acs->ring_type == RING_GFX ||
+ acs->ctx->ws->info.num_rings[acs->ring_type] == 1) &&
+ !amdgpu_fence_is_syncobj(fence) &&
+ fence->ctx == acs->ctx &&
+ fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&
+ fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&
+ fence->fence.ring == cs->ib[IB_MAIN].ring)
+ return true;
+
+ return amdgpu_fence_wait((void *)fence, 0, false);
+}
+
+static void amdgpu_cs_add_fence_dependency(struct radeon_cmdbuf *rws,
+ struct pipe_fence_handle *pfence,
+ unsigned dependency_flags)
+{
+ struct amdgpu_cs *acs = amdgpu_cs(rws);
+ struct amdgpu_cs_context *cs = acs->csc;
+ struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
+
+ util_queue_fence_wait(&fence->submitted);
+
+ if (dependency_flags & RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY) {
+ /* Syncobjs are not needed here. */
+ assert(!amdgpu_fence_is_syncobj(fence));
+
+ if (acs->ctx->ws->info.has_scheduled_fence_dependency &&
+ dependency_flags & RADEON_DEPENDENCY_START_FENCE)
+ add_fence_to_list(&cs->compute_start_fence_dependencies, fence);
+ else
+ add_fence_to_list(&cs->compute_fence_dependencies, fence);
+ return;
+ }
+
+ /* Start fences are not needed here. */
+ assert(!(dependency_flags & RADEON_DEPENDENCY_START_FENCE));
-static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
- struct amdgpu_cs_buffer *buffer)
+ if (is_noop_fence_dependency(acs, fence))
+ return;
+
+ if (amdgpu_fence_is_syncobj(fence))
+ add_fence_to_list(&cs->syncobj_dependencies, fence);
+ else
+ add_fence_to_list(&cs->fence_dependencies, fence);
+}
+
+static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs *acs,
+ struct amdgpu_cs_buffer *buffer)
{
struct amdgpu_cs_context *cs = acs->csc;
struct amdgpu_winsys_bo *bo = buffer->bo;
- struct amdgpu_cs_fence *dep;
unsigned new_num_fences = 0;
for (unsigned j = 0; j < bo->num_fences; ++j) {
struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
- unsigned idx;
-
- if (bo_fence->ctx == acs->ctx &&
- bo_fence->fence.ip_type == cs->request.ip_type &&
- bo_fence->fence.ip_instance == cs->request.ip_instance &&
- bo_fence->fence.ring == cs->request.ring)
- continue;
- if (amdgpu_fence_wait((void *)bo_fence, 0, false))
+ if (is_noop_fence_dependency(acs, bo_fence))
continue;
amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
continue;
- if (bo_fence->submission_in_progress)
- os_wait_until_zero(&bo_fence->submission_in_progress,
- PIPE_TIMEOUT_INFINITE);
-
- idx = cs->request.number_of_dependencies++;
- if (idx >= cs->max_dependencies) {
- unsigned size;
-
- cs->max_dependencies = idx + 8;
- size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
- cs->request.dependencies = realloc(cs->request.dependencies, size);
- }
-
- dep = &cs->request.dependencies[idx];
- memcpy(dep, &bo_fence->fence, sizeof(*dep));
+ add_fence_to_list(&cs->fence_dependencies, bo_fence);
}
for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
bo->num_fences = new_num_fences;
}
-/* Since the kernel driver doesn't synchronize execution between different
- * rings automatically, we have to add fence dependencies manually.
+/* Add the given list of fences to the buffer's fence list.
+ *
+ * Must be called with the winsys bo_fence_lock held.
*/
-static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
+void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
+ unsigned num_fences,
+ struct pipe_fence_handle **fences)
{
- struct amdgpu_cs_context *cs = acs->csc;
- int i;
-
- cs->request.number_of_dependencies = 0;
-
- for (i = 0; i < cs->num_real_buffers; i++)
- amdgpu_add_fence_dependency(acs, &cs->real_buffers[i]);
- for (i = 0; i < cs->num_slab_buffers; i++)
- amdgpu_add_fence_dependency(acs, &cs->slab_buffers[i]);
-}
-
-static void amdgpu_add_fence(struct amdgpu_winsys_bo *bo,
- struct pipe_fence_handle *fence)
-{
- if (bo->num_fences >= bo->max_fences) {
- unsigned new_max_fences = MAX2(1, bo->max_fences * 2);
+ if (bo->num_fences + num_fences > bo->max_fences) {
+ unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);
struct pipe_fence_handle **new_fences =
REALLOC(bo->fences,
bo->num_fences * sizeof(*new_fences),
new_max_fences * sizeof(*new_fences));
- if (new_fences) {
+ if (likely(new_fences)) {
bo->fences = new_fences;
bo->max_fences = new_max_fences;
} else {
- fprintf(stderr, "amdgpu_add_fence: allocation failure, dropping fence\n");
+ unsigned drop;
+
+ fprintf(stderr, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
if (!bo->num_fences)
return;
- bo->num_fences--; /* prefer to keep a more recent fence if possible */
+ bo->num_fences--; /* prefer to keep the most recent fence if possible */
amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
+
+ drop = bo->num_fences + num_fences - bo->max_fences;
+ num_fences -= drop;
+ fences += drop;
}
}
- bo->fences[bo->num_fences] = NULL;
- amdgpu_fence_reference(&bo->fences[bo->num_fences], fence);
- bo->num_fences++;
+ for (unsigned i = 0; i < num_fences; ++i) {
+ bo->fences[bo->num_fences] = NULL;
+ amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);
+ bo->num_fences++;
+ }
+}
+
+static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs *acs,
+ struct pipe_fence_handle *fence,
+ unsigned num_buffers,
+ struct amdgpu_cs_buffer *buffers)
+{
+ for (unsigned i = 0; i < num_buffers; i++) {
+ struct amdgpu_cs_buffer *buffer = &buffers[i];
+ struct amdgpu_winsys_bo *bo = buffer->bo;
+
+ amdgpu_add_bo_fence_dependencies(acs, buffer);
+ p_atomic_inc(&bo->num_active_ioctls);
+ amdgpu_add_fences(bo, 1, &fence);
+ }
+}
+
+/* Since the kernel driver doesn't synchronize execution between different
+ * rings automatically, we have to add fence dependencies manually.
+ */
+static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs *acs)
+{
+ struct amdgpu_cs_context *cs = acs->csc;
+
+ amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
+ amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
+ amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);
+}
+
+static void amdgpu_cs_add_syncobj_signal(struct radeon_cmdbuf *rws,
+ struct pipe_fence_handle *fence)
+{
+ struct amdgpu_cs *acs = amdgpu_cs(rws);
+ struct amdgpu_cs_context *cs = acs->csc;
+
+ assert(amdgpu_fence_is_syncobj((struct amdgpu_fence *)fence));
+
+ add_fence_to_list(&cs->syncobj_to_signal, (struct amdgpu_fence*)fence);
+}
+
+/* Add backing of sparse buffers to the buffer list.
+ *
+ * This is done late, during submission, to keep the buffer list short before
+ * submit, and to avoid managing fences for the backing buffers.
+ */
+static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
+{
+ for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
+ struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
+ struct amdgpu_winsys_bo *bo = buffer->bo;
+
+ simple_mtx_lock(&bo->lock);
+
+ list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
+ /* We can directly add the buffer here, because we know that each
+ * backing buffer occurs only once.
+ */
+ int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
+ if (idx < 0) {
+ fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
+ simple_mtx_unlock(&bo->lock);
+ return false;
+ }
+
+ cs->real_buffers[idx].usage = buffer->usage & ~RADEON_USAGE_SYNCHRONIZED;
+ cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;
+ p_atomic_inc(&backing->bo->num_active_ioctls);
+ }
+
+ simple_mtx_unlock(&bo->lock);
+ }
+
+ return true;
}
void amdgpu_cs_submit_ib(void *job, int thread_index)
struct amdgpu_winsys *ws = acs->ctx->ws;
struct amdgpu_cs_context *cs = acs->cst;
int i, r;
+ uint32_t bo_list = 0;
+ uint64_t seq_no = 0;
+ bool has_user_fence = amdgpu_cs_has_user_fence(cs);
+ bool use_bo_list_create = ws->info.drm_minor < 27;
+ struct drm_amdgpu_bo_list_in bo_list_in;
+
+ /* Prepare the buffer list. */
+ if (ws->debug_all_bos) {
+ /* The buffer list contains all buffers. This is a slow path that
+ * ensures that no buffer is missing in the BO list.
+ */
+ unsigned num_handles = 0;
+ struct drm_amdgpu_bo_list_entry *list =
+ alloca(ws->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
+ struct amdgpu_winsys_bo *bo;
- cs->request.fence_info.handle = NULL;
- if (amdgpu_cs_has_user_fence(cs)) {
- cs->request.fence_info.handle = acs->ctx->user_fence_bo;
- cs->request.fence_info.offset = acs->ring_type;
- }
+ simple_mtx_lock(&ws->global_bo_list_lock);
+ LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
+ list[num_handles].bo_handle = bo->u.real.kms_handle;
+ list[num_handles].bo_priority = 0;
+ ++num_handles;
+ }
- /* Create the buffer list.
- * Use a buffer list containing all allocated buffers if requested.
- */
- if (debug_get_option_all_bos()) {
- struct amdgpu_winsys_bo *bo;
- amdgpu_bo_handle *handles;
- unsigned num = 0;
+ r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers, list, &bo_list);
+ simple_mtx_unlock(&ws->global_bo_list_lock);
+ if (r) {
+ fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
+ goto cleanup;
+ }
+ } else {
+ if (!amdgpu_add_sparse_backing_buffers(cs)) {
+ fprintf(stderr, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");
+ r = -ENOMEM;
+ goto cleanup;
+ }
+
+ struct drm_amdgpu_bo_list_entry *list =
+ alloca((cs->num_real_buffers + 2) * sizeof(struct drm_amdgpu_bo_list_entry));
- pipe_mutex_lock(ws->global_bo_list_lock);
+ unsigned num_handles = 0;
+ for (i = 0; i < cs->num_real_buffers; ++i) {
+ struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
+ assert(buffer->u.real.priority_usage != 0);
- handles = malloc(sizeof(handles[0]) * ws->num_buffers);
- if (!handles) {
- pipe_mutex_unlock(ws->global_bo_list_lock);
- amdgpu_cs_context_cleanup(cs);
- cs->error_code = -ENOMEM;
- return;
+ list[num_handles].bo_handle = buffer->bo->u.real.kms_handle;
+ list[num_handles].bo_priority = (util_last_bit(buffer->u.real.priority_usage) - 1) / 2;
+ ++num_handles;
}
- LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
- assert(num < ws->num_buffers);
- handles[num++] = bo->bo;
+ if (use_bo_list_create) {
+ /* Legacy path creating the buffer list handle and passing it to the CS ioctl. */
+ r = amdgpu_bo_list_create_raw(ws->dev, num_handles, list, &bo_list);
+ if (r) {
+ fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
+ goto cleanup;
+ }
+ } else {
+ /* Standard path passing the buffer list via the CS ioctl. */
+ bo_list_in.operation = ~0;
+ bo_list_in.list_handle = ~0;
+ bo_list_in.bo_number = num_handles;
+ bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)list;
}
+ }
+
+ if (acs->ring_type == RING_GFX)
+ ws->gfx_bo_list_counter += cs->num_real_buffers;
- r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
- handles, NULL,
- &cs->request.resources);
- free(handles);
- pipe_mutex_unlock(ws->global_bo_list_lock);
+ if (acs->stop_exec_on_failure && acs->ctx->num_rejected_cs) {
+ r = -ECANCELED;
} else {
- r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
- cs->handles, cs->flags,
- &cs->request.resources);
- }
+ struct drm_amdgpu_cs_chunk chunks[7];
+ unsigned num_chunks = 0;
+
+ /* BO list */
+ if (!use_bo_list_create) {
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
+ num_chunks++;
+ }
- if (r) {
- fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
- cs->request.resources = NULL;
- amdgpu_fence_signalled(cs->fence);
- cs->error_code = r;
- goto cleanup;
+ /* Fence dependencies. */
+ unsigned num_dependencies = cs->fence_dependencies.num;
+ if (num_dependencies) {
+ struct drm_amdgpu_cs_chunk_dep *dep_chunk =
+ alloca(num_dependencies * sizeof(*dep_chunk));
+
+ for (unsigned i = 0; i < num_dependencies; i++) {
+ struct amdgpu_fence *fence =
+ (struct amdgpu_fence*)cs->fence_dependencies.list[i];
+
+ assert(util_queue_fence_is_signalled(&fence->submitted));
+ amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
+ }
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;
+ chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
+ num_chunks++;
+ }
+
+ /* Syncobj dependencies. */
+ unsigned num_syncobj_dependencies = cs->syncobj_dependencies.num;
+ if (num_syncobj_dependencies) {
+ struct drm_amdgpu_cs_chunk_sem *sem_chunk =
+ alloca(num_syncobj_dependencies * sizeof(sem_chunk[0]));
+
+ for (unsigned i = 0; i < num_syncobj_dependencies; i++) {
+ struct amdgpu_fence *fence =
+ (struct amdgpu_fence*)cs->syncobj_dependencies.list[i];
+
+ if (!amdgpu_fence_is_syncobj(fence))
+ continue;
+
+ assert(util_queue_fence_is_signalled(&fence->submitted));
+ sem_chunk[i].handle = fence->syncobj;
+ }
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
+ chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4 * num_syncobj_dependencies;
+ chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
+ num_chunks++;
+ }
+
+ /* Submit the parallel compute IB first. */
+ if (cs->ib[IB_PARALLEL_COMPUTE].ib_bytes > 0) {
+ unsigned old_num_chunks = num_chunks;
+
+ /* Add compute fence dependencies. */
+ unsigned num_dependencies = cs->compute_fence_dependencies.num;
+ if (num_dependencies) {
+ struct drm_amdgpu_cs_chunk_dep *dep_chunk =
+ alloca(num_dependencies * sizeof(*dep_chunk));
+
+ for (unsigned i = 0; i < num_dependencies; i++) {
+ struct amdgpu_fence *fence =
+ (struct amdgpu_fence*)cs->compute_fence_dependencies.list[i];
+
+ assert(util_queue_fence_is_signalled(&fence->submitted));
+ amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
+ }
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;
+ chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
+ num_chunks++;
+ }
+
+ /* Add compute start fence dependencies. */
+ unsigned num_start_dependencies = cs->compute_start_fence_dependencies.num;
+ if (num_start_dependencies) {
+ struct drm_amdgpu_cs_chunk_dep *dep_chunk =
+ alloca(num_start_dependencies * sizeof(*dep_chunk));
+
+ for (unsigned i = 0; i < num_start_dependencies; i++) {
+ struct amdgpu_fence *fence =
+ (struct amdgpu_fence*)cs->compute_start_fence_dependencies.list[i];
+
+ assert(util_queue_fence_is_signalled(&fence->submitted));
+ amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
+ }
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES;
+ chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_start_dependencies;
+ chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
+ num_chunks++;
+ }
+
+ /* Convert from dwords to bytes. */
+ cs->ib[IB_PARALLEL_COMPUTE].ib_bytes *= 4;
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_PARALLEL_COMPUTE];
+ num_chunks++;
+
+ r = amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,
+ num_chunks, chunks, NULL);
+ if (r)
+ goto finalize;
+
+ /* Back off the compute chunks. */
+ num_chunks = old_num_chunks;
+ }
+
+ /* Syncobj signals. */
+ unsigned num_syncobj_to_signal = cs->syncobj_to_signal.num;
+ if (num_syncobj_to_signal) {
+ struct drm_amdgpu_cs_chunk_sem *sem_chunk =
+ alloca(num_syncobj_to_signal * sizeof(sem_chunk[0]));
+
+ for (unsigned i = 0; i < num_syncobj_to_signal; i++) {
+ struct amdgpu_fence *fence =
+ (struct amdgpu_fence*)cs->syncobj_to_signal.list[i];
+
+ assert(amdgpu_fence_is_syncobj(fence));
+ sem_chunk[i].handle = fence->syncobj;
+ }
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;
+ chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4
+ * num_syncobj_to_signal;
+ chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
+ num_chunks++;
+ }
+
+ /* Fence */
+ if (has_user_fence) {
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;
+ num_chunks++;
+ }
+
+ /* IB */
+ if (cs->ib[IB_PREAMBLE].ib_bytes) {
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_PREAMBLE];
+ num_chunks++;
+ }
+
+ /* IB */
+ cs->ib[IB_MAIN].ib_bytes *= 4; /* Convert from dwords to bytes. */
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
+ num_chunks++;
+
+ if (ws->secure && cs->secure) {
+ cs->ib[IB_PREAMBLE].flags |= AMDGPU_IB_FLAGS_SECURE;
+ cs->ib[IB_MAIN].flags |= AMDGPU_IB_FLAGS_SECURE;
+ } else {
+ cs->ib[IB_PREAMBLE].flags &= ~AMDGPU_IB_FLAGS_SECURE;
+ cs->ib[IB_MAIN].flags &= ~AMDGPU_IB_FLAGS_SECURE;
+ }
+
+ assert(num_chunks <= ARRAY_SIZE(chunks));
+
+ r = amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,
+ num_chunks, chunks, &seq_no);
}
+finalize:
- r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
- cs->error_code = r;
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
+ else if (r == -ECANCELED)
+ fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
else
fprintf(stderr, "amdgpu: The CS has been rejected, "
"see dmesg for more information (%i).\n", r);
- amdgpu_fence_signalled(cs->fence);
+ acs->ctx->num_rejected_cs++;
+ ws->num_total_rejected_cs++;
} else {
/* Success. */
uint64_t *user_fence = NULL;
- if (amdgpu_cs_has_user_fence(cs))
- user_fence = acs->ctx->user_fence_cpu_address_base +
- cs->request.fence_info.offset;
- amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
+
+ if (has_user_fence)
+ user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
+ amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
}
/* Cleanup. */
- if (cs->request.resources)
- amdgpu_bo_list_destroy(cs->request.resources);
+ if (bo_list)
+ amdgpu_bo_list_destroy_raw(ws->dev, bo_list);
cleanup:
+ /* If there was an error, signal the fence, because it won't be signalled
+ * by the hardware. */
+ if (r)
+ amdgpu_fence_signalled(cs->fence);
+
+ cs->error_code = r;
+
for (i = 0; i < cs->num_real_buffers; i++)
p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
for (i = 0; i < cs->num_slab_buffers; i++)
p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
+ for (i = 0; i < cs->num_sparse_buffers; i++)
+ p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
amdgpu_cs_context_cleanup(cs);
}
/* Make sure the previous submission is completed. */
-void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
+void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
- struct amdgpu_winsys *ws = cs->ctx->ws;
/* Wait for any pending ioctl of this CS to complete. */
- if (util_queue_is_initialized(&ws->cs_queue))
- util_queue_job_wait(&cs->flush_completed);
+ util_queue_fence_wait(&cs->flush_completed);
}
-static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
+static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
unsigned flags,
struct pipe_fence_handle **fence)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
int error_code = 0;
+ uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ring_type];
- rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
+ rcs->current.max_dw += amdgpu_cs_epilog_dws(cs);
+ /* Pad the IB according to the mask. */
switch (cs->ring_type) {
case RING_DMA:
- /* pad DMA ring to 8 DWs */
- if (ws->info.chip_class <= SI) {
- while (rcs->current.cdw & 7)
+ if (ws->info.chip_class <= GFX6) {
+ while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0xf0000000); /* NOP packet */
} else {
- while (rcs->current.cdw & 7)
+ while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
- /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
+ case RING_COMPUTE:
if (ws->info.gfx_ib_pad_with_type2) {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0x80000000); /* type2 nop packet */
+ while (rcs->current.cdw & ib_pad_dw_mask)
+ radeon_emit(rcs, PKT2_NOP_PAD);
} else {
- while (rcs->current.cdw & 7)
- radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ while (rcs->current.cdw & ib_pad_dw_mask)
+ radeon_emit(rcs, PKT3_NOP_PAD);
}
+ if (cs->ring_type == RING_GFX)
+ ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
- /* Also pad the const IB. */
- if (cs->const_ib.ib_mapped)
- while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
- radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
-
- if (cs->const_preamble_ib.ib_mapped)
- while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
- radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
+ /* Also pad secondary IBs. */
+ if (cs->compute_ib.ib_mapped) {
+ while (cs->compute_ib.base.current.cdw & ib_pad_dw_mask)
+ radeon_emit(&cs->compute_ib.base, PKT3_NOP_PAD);
+ }
break;
case RING_UVD:
- while (rcs->current.cdw & 15)
+ case RING_UVD_ENC:
+ while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0x80000000); /* type2 nop packet */
break;
+ case RING_VCN_JPEG:
+ if (rcs->current.cdw % 2)
+ assert(0);
+ while (rcs->current.cdw & ib_pad_dw_mask) {
+ radeon_emit(rcs, 0x60000000); /* nop packet */
+ radeon_emit(rcs, 0x00000000);
+ }
+ break;
+ case RING_VCN_DEC:
+ while (rcs->current.cdw & ib_pad_dw_mask)
+ radeon_emit(rcs, 0x81ff); /* nop packet */
+ break;
default:
break;
}
}
/* If the CS is not empty or overflowed.... */
- if (radeon_emitted(&cs->main.base, 0) &&
+ if (likely(radeon_emitted(&cs->main.base, 0) &&
cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
- !debug_get_option_noop()) {
+ !debug_get_option_noop())) {
struct amdgpu_cs_context *cur = cs->csc;
- unsigned i, num_buffers;
/* Set IB sizes. */
- amdgpu_ib_finalize(&cs->main);
+ amdgpu_ib_finalize(ws, &cs->main);
- if (cs->const_ib.ib_mapped)
- amdgpu_ib_finalize(&cs->const_ib);
-
- if (cs->const_preamble_ib.ib_mapped)
- amdgpu_ib_finalize(&cs->const_preamble_ib);
+ if (cs->compute_ib.ib_mapped)
+ amdgpu_ib_finalize(ws, &cs->compute_ib);
/* Create a fence. */
amdgpu_fence_reference(&cur->fence, NULL);
cs->next_fence = NULL;
} else {
cur->fence = amdgpu_fence_create(cs->ctx,
- cur->request.ip_type,
- cur->request.ip_instance,
- cur->request.ring);
+ cur->ib[IB_MAIN].ip_type,
+ cur->ib[IB_MAIN].ip_instance,
+ cur->ib[IB_MAIN].ring);
}
if (fence)
amdgpu_fence_reference(fence, cur->fence);
- /* Prepare buffers. */
- pipe_mutex_lock(ws->bo_fence_lock);
- amdgpu_add_fence_dependencies(cs);
-
- num_buffers = cur->num_real_buffers;
- for (i = 0; i < num_buffers; i++) {
- struct amdgpu_winsys_bo *bo = cur->real_buffers[i].bo;
- p_atomic_inc(&bo->num_active_ioctls);
- amdgpu_add_fence(bo, cur->fence);
- }
-
- num_buffers = cur->num_slab_buffers;
- for (i = 0; i < num_buffers; i++) {
- struct amdgpu_winsys_bo *bo = cur->slab_buffers[i].bo;
- p_atomic_inc(&bo->num_active_ioctls);
- amdgpu_add_fence(bo, cur->fence);
- }
- pipe_mutex_unlock(ws->bo_fence_lock);
-
amdgpu_cs_sync_flush(rcs);
+ /* Prepare buffers.
+ *
+ * This fence must be held until the submission is queued to ensure
+ * that the order of fence dependency updates matches the order of
+ * submissions.
+ */
+ simple_mtx_lock(&ws->bo_fence_lock);
+ amdgpu_add_fence_dependencies_bo_lists(cs);
+
/* Swap command streams. "cst" is going to be submitted. */
cs->csc = cs->cst;
cs->cst = cur;
/* Submit. */
- if ((flags & RADEON_FLUSH_ASYNC) &&
- util_queue_is_initialized(&ws->cs_queue)) {
- util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
- amdgpu_cs_submit_ib, NULL);
- } else {
- amdgpu_cs_submit_ib(cs, 0);
- error_code = cs->cst->error_code;
+ util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
+ amdgpu_cs_submit_ib, NULL, 0);
+ /* The submission has been queued, unlock the fence now. */
+ simple_mtx_unlock(&ws->bo_fence_lock);
+
+ if (!(flags & PIPE_FLUSH_ASYNC)) {
+ amdgpu_cs_sync_flush(rcs);
+ error_code = cur->error_code;
}
} else {
amdgpu_cs_context_cleanup(cs->csc);
}
- amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
- if (cs->const_ib.ib_mapped)
- amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
- if (cs->const_preamble_ib.ib_mapped)
- amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
+ amdgpu_get_new_ib(ws, cs, IB_MAIN);
+ if (cs->compute_ib.ib_mapped)
+ amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE);
+
+ if (cs->preamble_ib_bo) {
+ amdgpu_cs_add_buffer(rcs, cs->preamble_ib_bo, RADEON_USAGE_READ, 0,
+ RADEON_PRIO_IB1);
+ }
cs->main.base.used_gart = 0;
cs->main.base.used_vram = 0;
- ws->num_gfx_IBs++;
+ if (cs->ring_type == RING_GFX)
+ ws->num_gfx_IBs++;
+ else if (cs->ring_type == RING_DMA)
+ ws->num_sdma_IBs++;
+
return error_code;
}
-static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
+static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
amdgpu_cs_sync_flush(rcs);
util_queue_fence_destroy(&cs->flush_completed);
p_atomic_dec(&cs->ctx->ws->num_cs);
+ pb_reference(&cs->preamble_ib_bo, NULL);
pb_reference(&cs->main.big_ib_buffer, NULL);
FREE(cs->main.base.prev);
- pb_reference(&cs->const_ib.big_ib_buffer, NULL);
- FREE(cs->const_ib.base.prev);
- pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
- FREE(cs->const_preamble_ib.base.prev);
+ pb_reference(&cs->compute_ib.big_ib_buffer, NULL);
+ FREE(cs->compute_ib.base.prev);
amdgpu_destroy_cs_context(&cs->csc1);
amdgpu_destroy_cs_context(&cs->csc2);
amdgpu_fence_reference(&cs->next_fence, NULL);
FREE(cs);
}
-static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
+static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,
struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
}
-void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws)
{
ws->base.ctx_create = amdgpu_ctx_create;
ws->base.ctx_destroy = amdgpu_ctx_destroy;
ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
ws->base.cs_create = amdgpu_cs_create;
- ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
- ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
+ ws->base.cs_add_parallel_compute_ib = amdgpu_cs_add_parallel_compute_ib;
+ ws->base.cs_setup_preemption = amdgpu_cs_setup_preemption;
ws->base.cs_destroy = amdgpu_cs_destroy;
ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
ws->base.cs_validate = amdgpu_cs_validate;
ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
+ ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
+ ws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
ws->base.fence_reference = amdgpu_fence_reference;
+ ws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;
+ ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
+ ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
+ ws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;
}