#include <stdio.h>
#include <amdgpu_drm.h>
+#include "amd/common/sid.h"
+
+DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
/* FENCES */
&expired);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
- return FALSE;
+ return false;
}
if (expired) {
return amdgpu_fence_wait(fence, timeout, false);
}
+static struct pipe_fence_handle *
+amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
+{
+ struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ struct pipe_fence_handle *fence = NULL;
+
+ if (debug_get_option_noop())
+ return NULL;
+
+ if (cs->next_fence) {
+ amdgpu_fence_reference(&fence, cs->next_fence);
+ return fence;
+ }
+
+ fence = amdgpu_fence_create(cs->ctx,
+ cs->csc->request.ip_type,
+ cs->csc->request.ip_instance,
+ cs->csc->request.ring);
+ if (!fence)
+ return NULL;
+
+ amdgpu_fence_reference(&cs->next_fence, fence);
+ return fence;
+}
+
/* CONTEXTS */
static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
ctx->ws = amdgpu_winsys(ws);
ctx->refcount = 1;
+ ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
if (r) {
uint32_t result, hangs;
int r;
+ /* Return a failure due to a rejected command submission. */
+ if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
+ return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
+ PIPE_INNOCENT_CONTEXT_RESET;
+ }
+
+ /* Return a failure due to a GPU hang. */
r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
cs->request.ip_type != AMDGPU_HW_IP_VCE;
}
+static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
+{
+ return cs->ctx->ws->info.chip_class >= CIK &&
+ cs->ring_type == RING_GFX;
+}
+
+static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
+{
+ if (ring_type == RING_GFX)
+ return 4; /* for chaining */
+
+ return 0;
+}
+
int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
{
unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
int i = cs->buffer_indices_hashlist[hash];
+ struct amdgpu_cs_buffer *buffers;
+ int num_buffers;
+
+ if (bo->bo) {
+ buffers = cs->real_buffers;
+ num_buffers = cs->num_real_buffers;
+ } else {
+ buffers = cs->slab_buffers;
+ num_buffers = cs->num_slab_buffers;
+ }
/* not found or found */
- if (i == -1 || cs->buffers[i].bo == bo)
+ if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
return i;
/* Hash collision, look for the BO in the list of buffers linearly. */
- for (i = cs->num_buffers - 1; i >= 0; i--) {
- if (cs->buffers[i].bo == bo) {
+ for (i = num_buffers - 1; i >= 0; i--) {
+ if (buffers[i].bo == bo) {
/* Put this buffer in the hash list.
* This will prevent additional hash collisions if there are
* several consecutive lookup_buffer calls for the same buffer.
return -1;
}
-static unsigned amdgpu_add_buffer(struct amdgpu_cs *acs,
- struct amdgpu_winsys_bo *bo,
- enum radeon_bo_usage usage,
- enum radeon_bo_domain domains,
- unsigned priority,
- enum radeon_bo_domain *added_domains)
+static int
+amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
{
struct amdgpu_cs_context *cs = acs->csc;
struct amdgpu_cs_buffer *buffer;
- unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
- int i = -1;
+ unsigned hash;
+ int idx = amdgpu_lookup_buffer(cs, bo);
- assert(priority < 64);
- *added_domains = 0;
+ if (idx >= 0)
+ return idx;
- i = amdgpu_lookup_buffer(cs, bo);
+ /* New buffer, check if the backing array is large enough. */
+ if (cs->num_real_buffers >= cs->max_real_buffers) {
+ unsigned new_max =
+ MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
+ struct amdgpu_cs_buffer *new_buffers;
+ amdgpu_bo_handle *new_handles;
+ uint8_t *new_flags;
+
+ new_buffers = MALLOC(new_max * sizeof(*new_buffers));
+ new_handles = MALLOC(new_max * sizeof(*new_handles));
+ new_flags = MALLOC(new_max * sizeof(*new_flags));
+
+ if (!new_buffers || !new_handles || !new_flags) {
+ fprintf(stderr, "amdgpu_lookup_or_add_buffer: allocation failed\n");
+ FREE(new_buffers);
+ FREE(new_handles);
+ FREE(new_flags);
+ return -1;
+ }
- if (i >= 0) {
- buffer = &cs->buffers[i];
- buffer->priority_usage |= 1llu << priority;
- buffer->usage |= usage;
- *added_domains = domains & ~buffer->domains;
- buffer->domains |= domains;
- cs->flags[i] = MAX2(cs->flags[i], priority / 4);
- return i;
+ memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
+ memcpy(new_handles, cs->handles, cs->num_real_buffers * sizeof(*new_handles));
+ memcpy(new_flags, cs->flags, cs->num_real_buffers * sizeof(*new_flags));
+
+ FREE(cs->real_buffers);
+ FREE(cs->handles);
+ FREE(cs->flags);
+
+ cs->max_real_buffers = new_max;
+ cs->real_buffers = new_buffers;
+ cs->handles = new_handles;
+ cs->flags = new_flags;
}
- /* New buffer, check if the backing array is large enough. */
- if (cs->num_buffers >= cs->max_num_buffers) {
- uint32_t size;
- cs->max_num_buffers += 10;
+ idx = cs->num_real_buffers;
+ buffer = &cs->real_buffers[idx];
+
+ memset(buffer, 0, sizeof(*buffer));
+ amdgpu_winsys_bo_reference(&buffer->bo, bo);
+ cs->handles[idx] = bo->bo;
+ cs->flags[idx] = 0;
+ p_atomic_inc(&bo->num_cs_references);
+ cs->num_real_buffers++;
- size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
- cs->buffers = realloc(cs->buffers, size);
+ hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
+ cs->buffer_indices_hashlist[hash] = idx;
- size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
- cs->handles = realloc(cs->handles, size);
+ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
+ acs->main.base.used_vram += bo->base.size;
+ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
+ acs->main.base.used_gart += bo->base.size;
- cs->flags = realloc(cs->flags, cs->max_num_buffers);
+ return idx;
+}
+
+static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
+ struct amdgpu_winsys_bo *bo)
+{
+ struct amdgpu_cs_context *cs = acs->csc;
+ struct amdgpu_cs_buffer *buffer;
+ unsigned hash;
+ int idx = amdgpu_lookup_buffer(cs, bo);
+ int real_idx;
+
+ if (idx >= 0)
+ return idx;
+
+ real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
+ if (real_idx < 0)
+ return -1;
+
+ /* New buffer, check if the backing array is large enough. */
+ if (cs->num_slab_buffers >= cs->max_slab_buffers) {
+ unsigned new_max =
+ MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
+ struct amdgpu_cs_buffer *new_buffers;
+
+ new_buffers = REALLOC(cs->slab_buffers,
+ cs->max_slab_buffers * sizeof(*new_buffers),
+ new_max * sizeof(*new_buffers));
+ if (!new_buffers) {
+ fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
+ return -1;
+ }
+
+ cs->max_slab_buffers = new_max;
+ cs->slab_buffers = new_buffers;
}
- /* Initialize the new buffer. */
- cs->buffers[cs->num_buffers].bo = NULL;
- amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
- cs->handles[cs->num_buffers] = bo->bo;
- cs->flags[cs->num_buffers] = priority / 4;
+ idx = cs->num_slab_buffers;
+ buffer = &cs->slab_buffers[idx];
+
+ memset(buffer, 0, sizeof(*buffer));
+ amdgpu_winsys_bo_reference(&buffer->bo, bo);
+ buffer->u.slab.real_idx = real_idx;
p_atomic_inc(&bo->num_cs_references);
- buffer = &cs->buffers[cs->num_buffers];
- buffer->bo = bo;
- buffer->priority_usage = 1llu << priority;
- buffer->usage = usage;
- buffer->domains = domains;
+ cs->num_slab_buffers++;
- cs->buffer_indices_hashlist[hash] = cs->num_buffers;
+ hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
+ cs->buffer_indices_hashlist[hash] = idx;
- *added_domains = domains;
- return cs->num_buffers++;
+ return idx;
}
static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
/* Don't use the "domains" parameter. Amdgpu doesn't support changing
* the buffer placement during command submission.
*/
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ struct amdgpu_cs *acs = amdgpu_cs(rcs);
+ struct amdgpu_cs_context *cs = acs->csc;
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
- enum radeon_bo_domain added_domains;
- unsigned index = amdgpu_add_buffer(cs, bo, usage, bo->initial_domain,
- priority, &added_domains);
+ struct amdgpu_cs_buffer *buffer;
+ int index;
- if (added_domains & RADEON_DOMAIN_VRAM)
- cs->csc->used_vram += bo->base.size;
- else if (added_domains & RADEON_DOMAIN_GTT)
- cs->csc->used_gart += bo->base.size;
+ /* Fast exit for no-op calls.
+ * This is very effective with suballocators and linear uploaders that
+ * are outside of the winsys.
+ */
+ if (bo == cs->last_added_bo &&
+ (usage & cs->last_added_bo_usage) == usage &&
+ (1ull << priority) & cs->last_added_bo_priority_usage)
+ return cs->last_added_bo_index;
+
+ if (!bo->bo) {
+ index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
+ if (index < 0)
+ return 0;
+
+ buffer = &cs->slab_buffers[index];
+ buffer->usage |= usage;
+ usage &= ~RADEON_USAGE_SYNCHRONIZED;
+ index = buffer->u.slab.real_idx;
+ } else {
+ index = amdgpu_lookup_or_add_real_buffer(acs, bo);
+ if (index < 0)
+ return 0;
+ }
+
+ buffer = &cs->real_buffers[index];
+ buffer->u.real.priority_usage |= 1llu << priority;
+ buffer->usage |= usage;
+ cs->flags[index] = MAX2(cs->flags[index], priority / 4);
+
+ cs->last_added_bo = bo;
+ cs->last_added_bo_index = index;
+ cs->last_added_bo_usage = buffer->usage;
+ cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
return index;
}
-static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
- unsigned buffer_size)
+static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
{
struct pb_buffer *pb;
uint8_t *mapped;
+ unsigned buffer_size;
+
+ /* Always create a buffer that is at least as large as the maximum seen IB
+ * size, aligned to a power of two (and multiplied by 4 to reduce internal
+ * fragmentation if chaining is not available). Limit to 512k dwords, which
+ * is the largest power of two that fits into the size field of the
+ * INDIRECT_BUFFER packet.
+ */
+ if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
+ buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
+ else
+ buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
+
+ buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
+
+ switch (ib->ib_type) {
+ case IB_CONST_PREAMBLE:
+ buffer_size = MAX2(buffer_size, 4 * 1024);
+ break;
+ case IB_CONST:
+ buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
+ break;
+ case IB_MAIN:
+ buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
+ break;
+ default:
+ unreachable("unhandled IB type");
+ }
pb = ws->base.buffer_create(&ws->base, buffer_size,
ws->info.gart_page_size,
return true;
}
+static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
+{
+ switch (ib_type) {
+ case IB_MAIN:
+ /* Smaller submits means the GPU gets busy sooner and there is less
+ * waiting for buffers and fences. Proof:
+ * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
+ */
+ return 20 * 1024;
+ case IB_CONST_PREAMBLE:
+ case IB_CONST:
+ /* There isn't really any reason to limit CE IB size beyond the natural
+ * limit implied by the main IB, except perhaps GTT size. Just return
+ * an extremely large value that we never get anywhere close to.
+ */
+ return 16 * 1024 * 1024;
+ default:
+ unreachable("bad ib_type");
+ }
+}
+
static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
enum ib_type ib_type)
{
*/
struct amdgpu_ib *ib = NULL;
struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
- unsigned buffer_size, ib_size;
+ unsigned ib_size = 0;
switch (ib_type) {
case IB_CONST_PREAMBLE:
ib = &cs->const_preamble_ib;
- buffer_size = 4 * 1024 * 4;
- ib_size = 1024 * 4;
+ ib_size = 256 * 4;
break;
case IB_CONST:
ib = &cs->const_ib;
- buffer_size = 512 * 1024 * 4;
- ib_size = 128 * 1024 * 4;
+ ib_size = 8 * 1024 * 4;
break;
case IB_MAIN:
ib = &cs->main;
- buffer_size = 128 * 1024 * 4;
- ib_size = 20 * 1024 * 4;
+ ib_size = 4 * 1024 * 4;
break;
default:
unreachable("unhandled IB type");
}
- ib->base.cdw = 0;
- ib->base.buf = NULL;
+ if (!amdgpu_cs_has_chaining(cs)) {
+ ib_size = MAX2(ib_size,
+ 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
+ amdgpu_ib_max_submit_dwords(ib_type)));
+ }
+
+ ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
+
+ ib->base.prev_dw = 0;
+ ib->base.num_prev = 0;
+ ib->base.current.cdw = 0;
+ ib->base.current.buf = NULL;
/* Allocate a new buffer for IBs if the current buffer is all used. */
if (!ib->big_ib_buffer ||
ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- if (!amdgpu_ib_new_buffer(aws, ib, buffer_size))
+ if (!amdgpu_ib_new_buffer(aws, ib))
return false;
}
info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
ib->used_ib_space;
+ info->size = 0;
+ ib->ptr_ib_size = &info->size;
+
amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
- ib->base.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
- ib->base.max_dw = ib_size / 4;
+ ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
+
+ ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
+ ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
return true;
}
-static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
- enum ring_type ring_type)
+static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
+{
+ *ib->ptr_ib_size |= ib->base.current.cdw;
+ ib->used_ib_space += ib->base.current.cdw * 4;
+ ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
+}
+
+static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
+ enum ring_type ring_type)
{
int i;
break;
}
- cs->max_num_buffers = 512;
- cs->buffers = (struct amdgpu_cs_buffer*)
- CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
- if (!cs->buffers) {
- return FALSE;
- }
-
- cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
- if (!cs->handles) {
- FREE(cs->buffers);
- return FALSE;
- }
-
- cs->flags = CALLOC(1, cs->max_num_buffers);
- if (!cs->flags) {
- FREE(cs->handles);
- FREE(cs->buffers);
- return FALSE;
- }
-
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
cs->buffer_indices_hashlist[i] = -1;
}
+ cs->last_added_bo = NULL;
cs->request.number_of_ibs = 1;
cs->request.ibs = &cs->ib[IB_MAIN];
cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
AMDGPU_IB_FLAG_PREAMBLE;
- return TRUE;
+ return true;
}
static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
{
unsigned i;
- for (i = 0; i < cs->num_buffers; i++) {
- p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
- amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
- cs->handles[i] = NULL;
- cs->flags[i] = 0;
+ for (i = 0; i < cs->num_real_buffers; i++) {
+ p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
+ amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
+ }
+ for (i = 0; i < cs->num_slab_buffers; i++) {
+ p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
+ amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
}
- cs->num_buffers = 0;
- cs->used_gart = 0;
- cs->used_vram = 0;
+ cs->num_real_buffers = 0;
+ cs->num_slab_buffers = 0;
amdgpu_fence_reference(&cs->fence, NULL);
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
cs->buffer_indices_hashlist[i] = -1;
}
+ cs->last_added_bo = NULL;
}
static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
{
amdgpu_cs_context_cleanup(cs);
FREE(cs->flags);
- FREE(cs->buffers);
+ FREE(cs->real_buffers);
FREE(cs->handles);
+ FREE(cs->slab_buffers);
FREE(cs->request.dependencies);
}
return NULL;
}
- pipe_semaphore_init(&cs->flush_completed, 1);
+ util_queue_fence_init(&cs->flush_completed);
cs->ctx = ctx;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->ring_type = ring_type;
+ cs->main.ib_type = IB_MAIN;
+ cs->const_ib.ib_type = IB_CONST;
+ cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
+
if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
FREE(cs);
return NULL;
return &cs->const_preamble_ib.base;
}
-#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
-
-static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
- struct pb_buffer *buf)
+static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
-
- return amdgpu_lookup_buffer(cs->csc, (struct amdgpu_winsys_bo*)buf);
-}
-
-static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
-{
- return TRUE;
+ return true;
}
static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
{
- assert(rcs->cdw <= rcs->max_dw);
- return rcs->max_dw - rcs->cdw >= dw;
-}
+ struct amdgpu_ib *ib = amdgpu_ib(rcs);
+ struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
+ unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
+ uint64_t va;
+ uint32_t *new_ptr_ib_size;
-static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
-{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
- struct amdgpu_winsys *ws = cs->ctx->ws;
+ assert(rcs->current.cdw <= rcs->current.max_dw);
- vram += cs->csc->used_vram;
- gtt += cs->csc->used_gart;
+ if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
+ return false;
- /* Anything that goes above the VRAM size should go to GTT. */
- if (vram > ws->info.vram_size)
- gtt += vram - ws->info.vram_size;
+ ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
- /* Now we just need to check if we have enough GTT. */
- return gtt < ws->info.gart_size * 0.7;
-}
+ if (rcs->current.max_dw - rcs->current.cdw >= dw)
+ return true;
-static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs *rcs)
-{
- struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
+ if (!amdgpu_cs_has_chaining(cs))
+ return false;
- return cs->used_vram + cs->used_gart;
+ /* Allocate a new chunk */
+ if (rcs->num_prev >= rcs->max_prev) {
+ unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
+ struct radeon_winsys_cs_chunk *new_prev;
+
+ new_prev = REALLOC(rcs->prev,
+ sizeof(*new_prev) * rcs->max_prev,
+ sizeof(*new_prev) * new_max_prev);
+ if (!new_prev)
+ return false;
+
+ rcs->prev = new_prev;
+ rcs->max_prev = new_max_prev;
+ }
+
+ if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
+ return false;
+
+ assert(ib->used_ib_space == 0);
+ va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
+
+ /* This space was originally reserved. */
+ rcs->current.max_dw += 4;
+ assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
+
+ /* Pad with NOPs and add INDIRECT_BUFFER packet */
+ while ((rcs->current.cdw & 7) != 4)
+ radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+
+ radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
+ : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
+ radeon_emit(rcs, va);
+ radeon_emit(rcs, va >> 32);
+ new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
+ radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
+
+ assert((rcs->current.cdw & 7) == 0);
+ assert(rcs->current.cdw <= rcs->current.max_dw);
+
+ *ib->ptr_ib_size |= rcs->current.cdw;
+ ib->ptr_ib_size = new_ptr_ib_size;
+
+ /* Hook up the new chunk */
+ rcs->prev[rcs->num_prev].buf = rcs->current.buf;
+ rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
+ rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
+ rcs->num_prev++;
+
+ ib->base.prev_dw += ib->base.current.cdw;
+ ib->base.current.cdw = 0;
+
+ ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
+ ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
+
+ amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
+ RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
+
+ return true;
}
static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
int i;
if (list) {
- for (i = 0; i < cs->num_buffers; i++) {
- pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
- list[i].vm_address = cs->buffers[i].bo->va;
- list[i].priority_usage = cs->buffers[i].priority_usage;
+ for (i = 0; i < cs->num_real_buffers; i++) {
+ list[i].bo_size = cs->real_buffers[i].bo->base.size;
+ list[i].vm_address = cs->real_buffers[i].bo->va;
+ list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
}
}
- return cs->num_buffers;
+ return cs->num_real_buffers;
}
-DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
+DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
-/* Since the kernel driver doesn't synchronize execution between different
- * rings automatically, we have to add fence dependencies manually.
- */
-static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
+static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
+ struct amdgpu_cs_buffer *buffer)
{
struct amdgpu_cs_context *cs = acs->csc;
- int i, j;
+ struct amdgpu_winsys_bo *bo = buffer->bo;
+ struct amdgpu_cs_fence *dep;
+ unsigned new_num_fences = 0;
- cs->request.number_of_dependencies = 0;
+ for (unsigned j = 0; j < bo->num_fences; ++j) {
+ struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
+ unsigned idx;
+
+ if (bo_fence->ctx == acs->ctx &&
+ bo_fence->fence.ip_type == cs->request.ip_type &&
+ bo_fence->fence.ip_instance == cs->request.ip_instance &&
+ bo_fence->fence.ring == cs->request.ring)
+ continue;
+
+ if (amdgpu_fence_wait((void *)bo_fence, 0, false))
+ continue;
- for (i = 0; i < cs->num_buffers; i++) {
- for (j = 0; j < RING_LAST; j++) {
- struct amdgpu_cs_fence *dep;
- unsigned idx;
+ amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
+ new_num_fences++;
- struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
- if (!bo_fence)
- continue;
+ if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
+ continue;
- if (bo_fence->ctx == acs->ctx &&
- bo_fence->fence.ip_type == cs->request.ip_type &&
- bo_fence->fence.ip_instance == cs->request.ip_instance &&
- bo_fence->fence.ring == cs->request.ring)
- continue;
+ if (bo_fence->submission_in_progress)
+ os_wait_until_zero(&bo_fence->submission_in_progress,
+ PIPE_TIMEOUT_INFINITE);
+
+ idx = cs->request.number_of_dependencies++;
+ if (idx >= cs->max_dependencies) {
+ unsigned size;
+
+ cs->max_dependencies = idx + 8;
+ size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
+ cs->request.dependencies = realloc(cs->request.dependencies, size);
+ }
- if (amdgpu_fence_wait((void *)bo_fence, 0, false))
- continue;
+ dep = &cs->request.dependencies[idx];
+ memcpy(dep, &bo_fence->fence, sizeof(*dep));
+ }
- if (bo_fence->submission_in_progress)
- os_wait_until_zero(&bo_fence->submission_in_progress,
- PIPE_TIMEOUT_INFINITE);
+ for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
+ amdgpu_fence_reference(&bo->fences[j], NULL);
- idx = cs->request.number_of_dependencies++;
- if (idx >= cs->max_dependencies) {
- unsigned size;
+ bo->num_fences = new_num_fences;
+}
- cs->max_dependencies = idx + 8;
- size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
- cs->request.dependencies = realloc(cs->request.dependencies, size);
- }
+static void amdgpu_add_fence(struct amdgpu_winsys_bo *bo,
+ struct pipe_fence_handle *fence)
+{
+ if (bo->num_fences >= bo->max_fences) {
+ unsigned new_max_fences = MAX2(1, bo->max_fences * 2);
+ struct pipe_fence_handle **new_fences =
+ REALLOC(bo->fences,
+ bo->num_fences * sizeof(*new_fences),
+ new_max_fences * sizeof(*new_fences));
+ if (new_fences) {
+ bo->fences = new_fences;
+ bo->max_fences = new_max_fences;
+ } else {
+ fprintf(stderr, "amdgpu_add_fence: allocation failure, dropping fence\n");
+ if (!bo->num_fences)
+ return;
- dep = &cs->request.dependencies[idx];
- memcpy(dep, &bo_fence->fence, sizeof(*dep));
+ bo->num_fences--; /* prefer to keep a more recent fence if possible */
+ amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
}
}
+
+ bo->fences[bo->num_fences] = NULL;
+ amdgpu_fence_reference(&bo->fences[bo->num_fences], fence);
+ bo->num_fences++;
}
-void amdgpu_cs_submit_ib(struct amdgpu_cs *acs)
+/* Since the kernel driver doesn't synchronize execution between different
+ * rings automatically, we have to add fence dependencies manually.
+ */
+static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
{
+ struct amdgpu_cs_context *cs = acs->csc;
+ unsigned num_buffers;
+ int i;
+
+ cs->request.number_of_dependencies = 0;
+
+ num_buffers = cs->num_real_buffers;
+ for (i = 0; i < num_buffers; i++) {
+ struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
+ struct amdgpu_winsys_bo *bo = buffer->bo;
+
+ amdgpu_add_fence_dependency(acs, buffer);
+ p_atomic_inc(&bo->num_active_ioctls);
+ amdgpu_add_fence(bo, cs->fence);
+ }
+
+ num_buffers = cs->num_slab_buffers;
+ for (i = 0; i < num_buffers; i++) {
+ struct amdgpu_cs_buffer *buffer = &cs->slab_buffers[i];
+ struct amdgpu_winsys_bo *bo = buffer->bo;
+
+ amdgpu_add_fence_dependency(acs, buffer);
+ p_atomic_inc(&bo->num_active_ioctls);
+ amdgpu_add_fence(bo, cs->fence);
+ }
+}
+
+void amdgpu_cs_submit_ib(void *job, int thread_index)
+{
+ struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
struct amdgpu_winsys *ws = acs->ctx->ws;
struct amdgpu_cs_context *cs = acs->cst;
int i, r;
if (!handles) {
pipe_mutex_unlock(ws->global_bo_list_lock);
amdgpu_cs_context_cleanup(cs);
+ cs->error_code = -ENOMEM;
return;
}
- LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
+ LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
assert(num < ws->num_buffers);
handles[num++] = bo->bo;
}
free(handles);
pipe_mutex_unlock(ws->global_bo_list_lock);
} else {
- r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
+ r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
cs->handles, cs->flags,
&cs->request.resources);
}
fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
cs->request.resources = NULL;
amdgpu_fence_signalled(cs->fence);
+ cs->error_code = r;
goto cleanup;
}
- r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
+ if (acs->ctx->num_rejected_cs)
+ r = -ECANCELED;
+ else
+ r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
+
+ cs->error_code = r;
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
+ else if (r == -ECANCELED)
+ fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
else
fprintf(stderr, "amdgpu: The CS has been rejected, "
- "see dmesg for more information.\n");
+ "see dmesg for more information (%i).\n", r);
amdgpu_fence_signalled(cs->fence);
+
+ acs->ctx->num_rejected_cs++;
+ ws->num_total_rejected_cs++;
} else {
/* Success. */
uint64_t *user_fence = NULL;
amdgpu_bo_list_destroy(cs->request.resources);
cleanup:
- for (i = 0; i < cs->num_buffers; i++)
- p_atomic_dec(&cs->buffers[i].bo->num_active_ioctls);
+ for (i = 0; i < cs->num_real_buffers; i++)
+ p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
+ for (i = 0; i < cs->num_slab_buffers; i++)
+ p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
amdgpu_cs_context_cleanup(cs);
}
struct amdgpu_cs *cs = amdgpu_cs(rcs);
/* Wait for any pending ioctl of this CS to complete. */
- if (cs->ctx->ws->thread) {
- /* wait and set the semaphore to "busy" */
- pipe_semaphore_wait(&cs->flush_completed);
- /* set the semaphore to "idle" */
- pipe_semaphore_signal(&cs->flush_completed);
- }
+ util_queue_job_wait(&cs->flush_completed);
}
-DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
-
-static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
- unsigned flags,
- struct pipe_fence_handle **fence)
+static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
+ unsigned flags,
+ struct pipe_fence_handle **fence)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys *ws = cs->ctx->ws;
+ int error_code = 0;
+
+ rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
switch (cs->ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
- while (rcs->cdw & 7)
- OUT_CS(rcs, 0x00000000); /* NOP packet */
+ if (ws->info.chip_class <= SI) {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0xf0000000); /* NOP packet */
+ } else {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0x00000000); /* NOP packet */
+ }
break;
case RING_GFX:
/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
- while (rcs->cdw & 7)
- OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
+ if (ws->info.gfx_ib_pad_with_type2) {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0x80000000); /* type2 nop packet */
+ } else {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ }
/* Also pad the const IB. */
if (cs->const_ib.ib_mapped)
- while (!cs->const_ib.base.cdw || (cs->const_ib.base.cdw & 7))
- OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
+ while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
+ radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
if (cs->const_preamble_ib.ib_mapped)
- while (!cs->const_preamble_ib.base.cdw || (cs->const_preamble_ib.base.cdw & 7))
- OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
+ while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
+ radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
break;
case RING_UVD:
- while (rcs->cdw & 15)
- OUT_CS(rcs, 0x80000000); /* type2 nop packet */
+ while (rcs->current.cdw & 15)
+ radeon_emit(rcs, 0x80000000); /* type2 nop packet */
break;
default:
break;
}
- if (rcs->cdw > rcs->max_dw) {
+ if (rcs->current.cdw > rcs->current.max_dw) {
fprintf(stderr, "amdgpu: command stream overflowed\n");
}
/* If the CS is not empty or overflowed.... */
- if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw &&
- !debug_get_option_noop()) {
+ if (likely(radeon_emitted(&cs->main.base, 0) &&
+ cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
+ !debug_get_option_noop())) {
struct amdgpu_cs_context *cur = cs->csc;
- unsigned i, num_buffers = cur->num_buffers;
/* Set IB sizes. */
- cur->ib[IB_MAIN].size = cs->main.base.cdw;
- cs->main.used_ib_space += cs->main.base.cdw * 4;
+ amdgpu_ib_finalize(&cs->main);
- if (cs->const_ib.ib_mapped) {
- cur->ib[IB_CONST].size = cs->const_ib.base.cdw;
- cs->const_ib.used_ib_space += cs->const_ib.base.cdw * 4;
- }
+ if (cs->const_ib.ib_mapped)
+ amdgpu_ib_finalize(&cs->const_ib);
- if (cs->const_preamble_ib.ib_mapped) {
- cur->ib[IB_CONST_PREAMBLE].size = cs->const_preamble_ib.base.cdw;
- cs->const_preamble_ib.used_ib_space += cs->const_preamble_ib.base.cdw * 4;
- }
+ if (cs->const_preamble_ib.ib_mapped)
+ amdgpu_ib_finalize(&cs->const_preamble_ib);
/* Create a fence. */
amdgpu_fence_reference(&cur->fence, NULL);
- cur->fence = amdgpu_fence_create(cs->ctx,
- cur->request.ip_type,
- cur->request.ip_instance,
- cur->request.ring);
+ if (cs->next_fence) {
+ /* just move the reference */
+ cur->fence = cs->next_fence;
+ cs->next_fence = NULL;
+ } else {
+ cur->fence = amdgpu_fence_create(cs->ctx,
+ cur->request.ip_type,
+ cur->request.ip_instance,
+ cur->request.ring);
+ }
if (fence)
amdgpu_fence_reference(fence, cur->fence);
- /* Prepare buffers. */
+ amdgpu_cs_sync_flush(rcs);
+
+ /* Prepare buffers.
+ *
+ * This fence must be held until the submission is queued to ensure
+ * that the order of fence dependency updates matches the order of
+ * submissions.
+ */
pipe_mutex_lock(ws->bo_fence_lock);
amdgpu_add_fence_dependencies(cs);
- for (i = 0; i < num_buffers; i++) {
- p_atomic_inc(&cur->buffers[i].bo->num_active_ioctls);
- amdgpu_fence_reference(&cur->buffers[i].bo->fence[cs->ring_type],
- cur->fence);
- }
- pipe_mutex_unlock(ws->bo_fence_lock);
-
- amdgpu_cs_sync_flush(rcs);
/* Swap command streams. "cst" is going to be submitted. */
cs->csc = cs->cst;
cs->cst = cur;
/* Submit. */
- if (ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
- /* Set the semaphore to "busy". */
- pipe_semaphore_wait(&cs->flush_completed);
- amdgpu_ws_queue_cs(ws, cs);
- } else {
- amdgpu_cs_submit_ib(cs);
+ util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
+ amdgpu_cs_submit_ib, NULL);
+ /* The submission has been queued, unlock the fence now. */
+ pipe_mutex_unlock(ws->bo_fence_lock);
+
+ if (!(flags & RADEON_FLUSH_ASYNC)) {
+ amdgpu_cs_sync_flush(rcs);
+ error_code = cur->error_code;
}
} else {
amdgpu_cs_context_cleanup(cs->csc);
if (cs->const_preamble_ib.ib_mapped)
amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
- ws->num_cs_flushes++;
+ cs->main.base.used_gart = 0;
+ cs->main.base.used_vram = 0;
+
+ if (cs->ring_type == RING_GFX)
+ ws->num_gfx_IBs++;
+ else if (cs->ring_type == RING_DMA)
+ ws->num_sdma_IBs++;
+
+ return error_code;
}
static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
struct amdgpu_cs *cs = amdgpu_cs(rcs);
amdgpu_cs_sync_flush(rcs);
- pipe_semaphore_destroy(&cs->flush_completed);
+ util_queue_fence_destroy(&cs->flush_completed);
p_atomic_dec(&cs->ctx->ws->num_cs);
pb_reference(&cs->main.big_ib_buffer, NULL);
+ FREE(cs->main.base.prev);
pb_reference(&cs->const_ib.big_ib_buffer, NULL);
+ FREE(cs->const_ib.base.prev);
pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
+ FREE(cs->const_preamble_ib.base.prev);
amdgpu_destroy_cs_context(&cs->csc1);
amdgpu_destroy_cs_context(&cs->csc2);
+ amdgpu_fence_reference(&cs->next_fence, NULL);
FREE(cs);
}
-static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
+static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
+ struct pb_buffer *_buf,
+ enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
ws->base.cs_destroy = amdgpu_cs_destroy;
ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
- ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
ws->base.cs_validate = amdgpu_cs_validate;
ws->base.cs_check_space = amdgpu_cs_check_space;
- ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
- ws->base.cs_query_memory_usage = amdgpu_cs_query_memory_usage;
ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
ws->base.cs_flush = amdgpu_cs_flush;
+ ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;