#include <stdio.h>
#include <amdgpu_drm.h>
-#include "../../../drivers/radeonsi/sid.h"
+#include "amd/common/sid.h"
/* FENCES */
return amdgpu_fence_wait(fence, timeout, false);
}
+static struct pipe_fence_handle *
+amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
+{
+ struct amdgpu_cs *cs = amdgpu_cs(rcs);
+ struct pipe_fence_handle *fence = NULL;
+
+ if (cs->next_fence) {
+ amdgpu_fence_reference(&fence, cs->next_fence);
+ return fence;
+ }
+
+ fence = amdgpu_fence_create(cs->ctx,
+ cs->csc->request.ip_type,
+ cs->csc->request.ip_instance,
+ cs->csc->request.ring);
+ if (!fence)
+ return NULL;
+
+ amdgpu_fence_reference(&cs->next_fence, fence);
+ return fence;
+}
+
/* CONTEXTS */
static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
cs->request.ip_type != AMDGPU_HW_IP_VCE;
}
-static bool amdgpu_cs_has_chaining(enum ring_type ring_type)
+static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
{
- return ring_type == RING_GFX;
+ return cs->ctx->ws->info.chip_class >= CIK &&
+ cs->ring_type == RING_GFX;
}
static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
priority, &added_domains);
if (added_domains & RADEON_DOMAIN_VRAM)
- cs->csc->used_vram += bo->base.size;
+ cs->main.base.used_vram += bo->base.size;
else if (added_domains & RADEON_DOMAIN_GTT)
- cs->csc->used_gart += bo->base.size;
+ cs->main.base.used_gart += bo->base.size;
return index;
}
* is the largest power of two that fits into the size field of the
* INDIRECT_BUFFER packet.
*/
- if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)->ring_type))
+ if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
else
buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
unreachable("unhandled IB type");
}
- if (!amdgpu_cs_has_chaining(cs->ring_type)) {
+ if (!amdgpu_cs_has_chaining(cs)) {
ib_size = MAX2(ib_size,
4 * MIN2(util_next_power_of_two(ib->max_ib_size),
amdgpu_ib_max_submit_dwords(ib_type)));
}
cs->num_buffers = 0;
- cs->used_gart = 0;
- cs->used_vram = 0;
amdgpu_fence_reference(&cs->fence, NULL);
for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
return &cs->const_preamble_ib.base;
}
-#define OUT_CS(cs, value) (cs)->current.buf[(cs)->current.cdw++] = (value)
-
static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
struct pb_buffer *buf)
{
if (rcs->current.max_dw - rcs->current.cdw >= dw)
return true;
- if (!amdgpu_cs_has_chaining(cs->ring_type))
+ if (!amdgpu_cs_has_chaining(cs))
return false;
/* Allocate a new chunk */
/* Pad with NOPs and add INDIRECT_BUFFER packet */
while ((rcs->current.cdw & 7) != 4)
- OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
+ radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
- OUT_CS(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
+ radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
: PKT3_INDIRECT_BUFFER_CONST, 2, 0));
- OUT_CS(rcs, va);
- OUT_CS(rcs, va >> 32);
+ radeon_emit(rcs, va);
+ radeon_emit(rcs, va >> 32);
new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
- OUT_CS(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
+ radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
assert((rcs->current.cdw & 7) == 0);
assert(rcs->current.cdw <= rcs->current.max_dw);
return true;
}
-static bool amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs,
- uint64_t vram, uint64_t gtt)
-{
- struct amdgpu_cs *cs = amdgpu_cs(rcs);
- struct amdgpu_winsys *ws = cs->ctx->ws;
-
- vram += cs->csc->used_vram;
- gtt += cs->csc->used_gart;
-
- /* Anything that goes above the VRAM size should go to GTT. */
- if (vram > ws->info.vram_size)
- gtt += vram - ws->info.vram_size;
-
- /* Now we just need to check if we have enough GTT. */
- return gtt < ws->info.gart_size * 0.7;
-}
-
-static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs *rcs)
-{
- struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
-
- return cs->used_vram + cs->used_gart;
-}
-
static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
struct radeon_bo_list_item *list)
{
static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
{
struct amdgpu_cs_context *cs = acs->csc;
- int i, j;
+ int i;
cs->request.number_of_dependencies = 0;
for (i = 0; i < cs->num_buffers; i++) {
- for (j = 0; j < RING_LAST; j++) {
- struct amdgpu_cs_fence *dep;
- unsigned idx;
-
- struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
- if (!bo_fence)
- continue;
-
- if (bo_fence->ctx == acs->ctx &&
- bo_fence->fence.ip_type == cs->request.ip_type &&
- bo_fence->fence.ip_instance == cs->request.ip_instance &&
- bo_fence->fence.ring == cs->request.ring)
- continue;
-
- if (amdgpu_fence_wait((void *)bo_fence, 0, false))
- continue;
-
- if (bo_fence->submission_in_progress)
- os_wait_until_zero(&bo_fence->submission_in_progress,
- PIPE_TIMEOUT_INFINITE);
-
- idx = cs->request.number_of_dependencies++;
- if (idx >= cs->max_dependencies) {
- unsigned size;
-
- cs->max_dependencies = idx + 8;
- size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
- cs->request.dependencies = realloc(cs->request.dependencies, size);
- }
-
- dep = &cs->request.dependencies[idx];
- memcpy(dep, &bo_fence->fence, sizeof(*dep));
+ struct amdgpu_cs_fence *dep;
+ unsigned idx;
+
+ struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence;
+ if (!bo_fence)
+ continue;
+
+ if (bo_fence->ctx == acs->ctx &&
+ bo_fence->fence.ip_type == cs->request.ip_type &&
+ bo_fence->fence.ip_instance == cs->request.ip_instance &&
+ bo_fence->fence.ring == cs->request.ring)
+ continue;
+
+ if (amdgpu_fence_wait((void *)bo_fence, 0, false))
+ continue;
+
+ if (bo_fence->submission_in_progress)
+ os_wait_until_zero(&bo_fence->submission_in_progress,
+ PIPE_TIMEOUT_INFINITE);
+
+ idx = cs->request.number_of_dependencies++;
+ if (idx >= cs->max_dependencies) {
+ unsigned size;
+
+ cs->max_dependencies = idx + 8;
+ size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
+ cs->request.dependencies = realloc(cs->request.dependencies, size);
}
+
+ dep = &cs->request.dependencies[idx];
+ memcpy(dep, &bo_fence->fence, sizeof(*dep));
}
}
switch (cs->ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
- while (rcs->current.cdw & 7)
- OUT_CS(rcs, 0x00000000); /* NOP packet */
+ if (ws->info.chip_class <= SI) {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0xf0000000); /* NOP packet */
+ } else {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0x00000000); /* NOP packet */
+ }
break;
case RING_GFX:
/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
- while (rcs->current.cdw & 7)
- OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
+ if (ws->info.gfx_ib_pad_with_type2) {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0x80000000); /* type2 nop packet */
+ } else {
+ while (rcs->current.cdw & 7)
+ radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
+ }
/* Also pad the const IB. */
if (cs->const_ib.ib_mapped)
while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
- OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
+ radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
if (cs->const_preamble_ib.ib_mapped)
while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
- OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
+ radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
break;
case RING_UVD:
while (rcs->current.cdw & 15)
- OUT_CS(rcs, 0x80000000); /* type2 nop packet */
+ radeon_emit(rcs, 0x80000000); /* type2 nop packet */
break;
default:
break;
/* Create a fence. */
amdgpu_fence_reference(&cur->fence, NULL);
- cur->fence = amdgpu_fence_create(cs->ctx,
- cur->request.ip_type,
- cur->request.ip_instance,
- cur->request.ring);
+ if (cs->next_fence) {
+ /* just move the reference */
+ cur->fence = cs->next_fence;
+ cs->next_fence = NULL;
+ } else {
+ cur->fence = amdgpu_fence_create(cs->ctx,
+ cur->request.ip_type,
+ cur->request.ip_instance,
+ cur->request.ring);
+ }
if (fence)
amdgpu_fence_reference(fence, cur->fence);
amdgpu_add_fence_dependencies(cs);
for (i = 0; i < num_buffers; i++) {
p_atomic_inc(&cur->buffers[i].bo->num_active_ioctls);
- amdgpu_fence_reference(&cur->buffers[i].bo->fence[cs->ring_type],
+ amdgpu_fence_reference(&cur->buffers[i].bo->fence,
cur->fence);
}
pipe_mutex_unlock(ws->bo_fence_lock);
if (cs->const_preamble_ib.ib_mapped)
amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
+ cs->main.base.used_gart = 0;
+ cs->main.base.used_vram = 0;
+
ws->num_cs_flushes++;
return error_code;
}
FREE(cs->const_preamble_ib.base.prev);
amdgpu_destroy_cs_context(&cs->csc1);
amdgpu_destroy_cs_context(&cs->csc2);
+ amdgpu_fence_reference(&cs->next_fence, NULL);
FREE(cs);
}
ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
ws->base.cs_validate = amdgpu_cs_validate;
ws->base.cs_check_space = amdgpu_cs_check_space;
- ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
- ws->base.cs_query_memory_usage = amdgpu_cs_query_memory_usage;
ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
ws->base.cs_flush = amdgpu_cs_flush;
+ ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;