#include <stdlib.h>
#include <amdgpu.h>
-#include <amdgpu_drm.h>
+#include "drm-uapi/amdgpu_drm.h"
#include <assert.h>
+#include <pthread.h>
+#include <errno.h>
+#include "util/u_memory.h"
#include "ac_debug.h"
#include "radv_radeon_winsys.h"
#include "radv_amdgpu_cs.h"
};
struct radv_amdgpu_cs {
- struct radeon_winsys_cs base;
+ struct radeon_cmdbuf base;
struct radv_amdgpu_winsys *ws;
struct amdgpu_cs_ib_info ib;
uint8_t *ib_mapped;
unsigned max_num_buffers;
unsigned num_buffers;
- amdgpu_bo_handle *handles;
- uint8_t *priorities;
+ struct drm_amdgpu_bo_list_entry *handles;
struct radeon_winsys_bo **old_ib_buffers;
unsigned num_old_ib_buffers;
unsigned max_num_old_ib_buffers;
unsigned *ib_size_ptr;
- bool failed;
+ VkResult status;
bool is_chained;
int buffer_hash_table[1024];
unsigned num_virtual_buffers;
unsigned max_num_virtual_buffers;
struct radeon_winsys_bo **virtual_buffers;
- uint8_t *virtual_buffer_priorities;
int *virtual_buffer_hash_table;
+
+ /* For chips that don't support chaining. */
+ struct radeon_cmdbuf *old_cs_buffers;
+ unsigned num_old_cs_buffers;
};
static inline struct radv_amdgpu_cs *
-radv_amdgpu_cs(struct radeon_winsys_cs *base)
+radv_amdgpu_cs(struct radeon_cmdbuf *base)
{
return (struct radv_amdgpu_cs*)base;
}
}
}
+struct radv_amdgpu_cs_request {
+ /** Specify flags with additional information */
+ uint64_t flags;
+
+ /** Specify HW IP block type to which to send the IB. */
+ unsigned ip_type;
+
+ /** IP instance index if there are several IPs of the same type. */
+ unsigned ip_instance;
+
+ /**
+ * Specify ring index of the IP. We could have several rings
+ * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
+ */
+ uint32_t ring;
+
+ /**
+ * BO list handles used by this request.
+ */
+ struct drm_amdgpu_bo_list_entry *handles;
+ uint32_t num_handles;
+
+ /**
+ * Number of dependencies this Command submission needs to
+ * wait for before starting execution.
+ */
+ uint32_t number_of_dependencies;
+
+ /**
+ * Array of dependencies which need to be met before
+ * execution can start.
+ */
+ struct amdgpu_cs_fence *dependencies;
+
+ /** Number of IBs to submit in the field ibs. */
+ uint32_t number_of_ibs;
+
+ /**
+ * IBs to submit. Those IBs will be submit together as single entity
+ */
+ struct amdgpu_cs_ib_info *ibs;
+
+ /**
+ * The returned sequence number for the command submission
+ */
+ uint64_t seq_no;
+
+ /**
+ * The fence information
+ */
+ struct amdgpu_cs_fence_info fence_info;
+};
+
+
static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
uint32_t ip_type,
uint32_t ring,
struct radv_winsys_sem_info *sem_info);
static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
- struct amdgpu_cs_request *request,
+ struct radv_amdgpu_cs_request *request,
struct radv_winsys_sem_info *sem_info);
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
struct radv_amdgpu_fence *fence,
- struct amdgpu_cs_request *req)
+ struct radv_amdgpu_cs_request *req)
{
fence->fence.context = ctx->ctx;
fence->fence.ip_type = req->ip_type;
fence->fence.ip_instance = req->ip_instance;
fence->fence.ring = req->ring;
fence->fence.fence = req->seq_no;
- fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
+ fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
}
static struct radeon_winsys_fence *radv_amdgpu_create_fence()
{
struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
+ if (!fence)
+ return NULL;
+
+ fence->fence.fence = UINT64_MAX;
return (struct radeon_winsys_fence*)fence;
}
free(fence);
}
+static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ fence->fence.fence = UINT64_MAX;
+}
+
+static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ fence->fence.fence = 0;
+}
+
+static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ return fence->fence.fence < UINT64_MAX;
+}
+
static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
struct radeon_winsys_fence *_fence,
bool absolute,
int r;
uint32_t expired = 0;
+ /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
+ if (fence->fence.fence == UINT64_MAX)
+ return false;
+
+ if (fence->fence.fence == 0)
+ return true;
+
if (fence->user_ptr) {
if (*fence->user_ptr >= fence->fence.fence)
return true;
return false;
}
-static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
+
+static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
+ struct radeon_winsys_fence *const *_fences,
+ uint32_t fence_count,
+ bool wait_all,
+ uint64_t timeout)
+{
+ struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
+ int r;
+ uint32_t expired = 0, first = 0;
+
+ if (!fences)
+ return false;
+
+ for (uint32_t i = 0; i < fence_count; ++i)
+ fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
+
+ /* Now use the libdrm query. */
+ r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
+ timeout, &expired, &first);
+
+ free(fences);
+ if (r) {
+ fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
+ return false;
+ }
+
+ if (expired)
+ return true;
+
+ return false;
+}
+
+static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
+ for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
+ struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
+ free(rcs->buf);
+ }
+
+ free(cs->old_cs_buffers);
free(cs->old_ib_buffers);
free(cs->virtual_buffers);
- free(cs->virtual_buffer_priorities);
free(cs->virtual_buffer_hash_table);
free(cs->handles);
- free(cs->priorities);
free(cs);
}
-static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
- enum ring_type ring_type)
+static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
+ enum ring_type ring_type)
{
for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
cs->buffer_hash_table[i] = -1;
cs->hw_ip = ring_to_hw_ip(ring_type);
- return true;
}
-static struct radeon_winsys_cs *
+static struct radeon_cmdbuf *
radv_amdgpu_cs_create(struct radeon_winsys *ws,
enum ring_type ring_type)
{
if (cs->ws->use_ib_bos) {
cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
- RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS|
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_CPU_ACCESS |
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY |
+ RADEON_FLAG_GTT_WC,
+ RADV_BO_PRIORITY_CS);
if (!cs->ib_buffer) {
free(cs);
return NULL;
cs->ib_size_ptr = &cs->ib.size;
cs->ib.size = 0;
- ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
+ ws->cs_add_buffer(&cs->base, cs->ib_buffer);
} else {
- cs->base.buf = malloc(16384);
- cs->base.max_dw = 4096;
- if (!cs->base.buf) {
+ uint32_t *buf = malloc(16384);
+ if (!buf) {
free(cs);
return NULL;
}
+ cs->base.buf = buf;
+ cs->base.max_dw = 4096;
}
return &cs->base;
}
-static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
+static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
- if (cs->failed) {
+ if (cs->status != VK_SUCCESS) {
cs->base.cdw = 0;
return;
}
/* The total ib size cannot exceed limit_dws dwords. */
if (ib_dws > limit_dws)
{
- cs->failed = true;
+ /* The maximum size in dwords has been reached,
+ * try to allocate a new one.
+ */
+ struct radeon_cmdbuf *old_cs_buffers =
+ realloc(cs->old_cs_buffers,
+ (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
+ if (!old_cs_buffers) {
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ cs->base.cdw = 0;
+ return;
+ }
+ cs->old_cs_buffers = old_cs_buffers;
+
+ /* Store the current one for submitting it later. */
+ cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
+ cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
+ cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
+ cs->num_old_cs_buffers++;
+
+ /* Reset the cs, it will be re-allocated below. */
cs->base.cdw = 0;
- return;
+ cs->base.buf = NULL;
+
+ /* Re-compute the number of dwords to allocate. */
+ ib_dws = MAX2(cs->base.cdw + min_size,
+ MIN2(cs->base.max_dw * 2, limit_dws));
+ if (ib_dws > limit_dws) {
+ fprintf(stderr, "amdgpu: Too high number of "
+ "dwords to allocate\n");
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return;
+ }
}
uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
cs->base.buf = new_buf;
cs->base.max_dw = ib_dws;
} else {
- cs->failed = true;
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
cs->base.cdw = 0;
}
return;
ib_size = MIN2(ib_size, 0xfffff);
while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
- cs->base.buf[cs->base.cdw++] = 0xffff1000;
+ radeon_emit(&cs->base, PKT3_NOP_PAD);
*cs->ib_size_ptr |= cs->base.cdw + 4;
if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
- cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
- cs->old_ib_buffers = realloc(cs->old_ib_buffers,
- cs->max_num_old_ib_buffers * sizeof(void*));
+ unsigned max_num_old_ib_buffers =
+ MAX2(1, cs->max_num_old_ib_buffers * 2);
+ struct radeon_winsys_bo **old_ib_buffers =
+ realloc(cs->old_ib_buffers,
+ max_num_old_ib_buffers * sizeof(void*));
+ if (!old_ib_buffers) {
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return;
+ }
+ cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
+ cs->old_ib_buffers = old_ib_buffers;
}
cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS|
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
+ RADEON_FLAG_CPU_ACCESS |
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY |
+ RADEON_FLAG_GTT_WC,
+ RADV_BO_PRIORITY_CS);
if (!cs->ib_buffer) {
cs->base.cdw = 0;
- cs->failed = true;
+ cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
}
if (!cs->ib_mapped) {
cs->ws->base.buffer_destroy(cs->ib_buffer);
cs->base.cdw = 0;
- cs->failed = true;
+
+ /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
+ cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
}
- cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
+ cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
+
+ radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
+ radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
+ radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
+ radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
- cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
- cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
- cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
+ cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
cs->base.buf = (uint32_t *)cs->ib_mapped;
cs->base.cdw = 0;
}
-static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
+static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
if (cs->ws->use_ib_bos) {
while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
- cs->base.buf[cs->base.cdw++] = 0xffff1000;
+ radeon_emit(&cs->base, PKT3_NOP_PAD);
*cs->ib_size_ptr |= cs->base.cdw;
cs->is_chained = false;
}
- return !cs->failed;
+ return cs->status;
}
-static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
+static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
cs->base.cdw = 0;
- cs->failed = false;
+ cs->status = VK_SUCCESS;
for (unsigned i = 0; i < cs->num_buffers; ++i) {
- unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
- (ARRAY_SIZE(cs->buffer_hash_table) - 1);
+ unsigned hash = cs->handles[i].bo_handle &
+ (ARRAY_SIZE(cs->buffer_hash_table) - 1);
cs->buffer_hash_table[hash] = -1;
}
cs->num_virtual_buffers = 0;
if (cs->ws->use_ib_bos) {
- cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
+ cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
cs->ib_size_ptr = &cs->ib.size;
cs->ib.size = 0;
+ } else {
+ for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
+ struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
+ free(rcs->buf);
+ }
+
+ free(cs->old_cs_buffers);
+ cs->old_cs_buffers = NULL;
+ cs->num_old_cs_buffers = 0;
}
}
static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
- amdgpu_bo_handle bo)
+ uint32_t bo)
{
- unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
+ unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
int index = cs->buffer_hash_table[hash];
if (index == -1)
return -1;
- if (cs->handles[index] == bo)
+ if (cs->handles[index].bo_handle == bo)
return index;
for (unsigned i = 0; i < cs->num_buffers; ++i) {
- if (cs->handles[i] == bo) {
+ if (cs->handles[i].bo_handle == bo) {
cs->buffer_hash_table[hash] = i;
return i;
}
}
static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
- amdgpu_bo_handle bo,
- uint8_t priority)
+ uint32_t bo, uint8_t priority)
{
unsigned hash;
int index = radv_amdgpu_cs_find_buffer(cs, bo);
- if (index != -1) {
- cs->priorities[index] = MAX2(cs->priorities[index], priority);
+ if (index != -1)
return;
- }
if (cs->num_buffers == cs->max_num_buffers) {
unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
- cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
- cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
- cs->max_num_buffers = new_count;
+ struct drm_amdgpu_bo_list_entry *new_entries =
+ realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
+ if (new_entries) {
+ cs->max_num_buffers = new_count;
+ cs->handles = new_entries;
+ } else {
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return;
+ }
}
- cs->handles[cs->num_buffers] = bo;
- cs->priorities[cs->num_buffers] = priority;
+ cs->handles[cs->num_buffers].bo_handle = bo;
+ cs->handles[cs->num_buffers].bo_priority = priority;
- hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
+ hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
cs->buffer_hash_table[hash] = cs->num_buffers;
++cs->num_buffers;
}
-static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
- struct radeon_winsys_bo *bo,
- uint8_t priority)
+static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
+ struct radeon_winsys_bo *bo)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
if (!cs->virtual_buffer_hash_table) {
- cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
+ int *virtual_buffer_hash_table =
+ malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
+ if (!virtual_buffer_hash_table) {
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return;
+ }
+ cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
+
for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
cs->virtual_buffer_hash_table[i] = -1;
}
if (cs->virtual_buffer_hash_table[hash] >= 0) {
int idx = cs->virtual_buffer_hash_table[hash];
if (cs->virtual_buffers[idx] == bo) {
- cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
return;
}
for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
if (cs->virtual_buffers[i] == bo) {
- cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
cs->virtual_buffer_hash_table[hash] = i;
return;
}
}
if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
- cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
- cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
- cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
+ unsigned max_num_virtual_buffers =
+ MAX2(2, cs->max_num_virtual_buffers * 2);
+ struct radeon_winsys_bo **virtual_buffers =
+ realloc(cs->virtual_buffers,
+ sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * max_num_virtual_buffers);
+ if (!virtual_buffers) {
+ cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
+ return;
+ }
+ cs->max_num_virtual_buffers = max_num_virtual_buffers;
+ cs->virtual_buffers = virtual_buffers;
}
cs->virtual_buffers[cs->num_virtual_buffers] = bo;
- cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
++cs->num_virtual_buffers;
}
-static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
- struct radeon_winsys_bo *_bo,
- uint8_t priority)
+static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
+ struct radeon_winsys_bo *_bo)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
+ if (cs->status != VK_SUCCESS)
+ return;
+
if (bo->is_virtual) {
- radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
+ radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
return;
}
if (bo->base.is_local)
return;
- radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
+ radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
}
-static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
- struct radeon_winsys_cs *_child)
+static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
+ struct radeon_cmdbuf *_child)
{
struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
+ if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
+ return;
+
for (unsigned i = 0; i < child->num_buffers; ++i) {
- radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
- child->priorities[i]);
+ radv_amdgpu_cs_add_buffer_internal(parent,
+ child->handles[i].bo_handle,
+ child->handles[i].bo_priority);
}
for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
- radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
- child->virtual_buffer_priorities[i]);
+ radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
}
if (parent->ws->use_ib_bos) {
if (parent->base.cdw + 4 > parent->base.max_dw)
radv_amdgpu_cs_grow(&parent->base, 4);
- parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
- parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
- parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
- parent->base.buf[parent->base.cdw++] = child->ib.size;
+ radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
+ radeon_emit(&parent->base, child->ib.ib_mc_address);
+ radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
+ radeon_emit(&parent->base, child->ib.size);
} else {
if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
}
}
-static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
- struct radeon_winsys_cs **cs_array,
- unsigned count,
- struct radv_amdgpu_winsys_bo *extra_bo,
- struct radeon_winsys_cs *extra_cs,
- amdgpu_bo_list_handle *bo_list)
+static VkResult
+radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
+ struct radeon_cmdbuf **cs_array,
+ unsigned count,
+ struct radv_amdgpu_winsys_bo **extra_bo_array,
+ unsigned num_extra_bo,
+ struct radeon_cmdbuf *extra_cs,
+ const struct radv_winsys_bo_list *radv_bo_list,
+ unsigned *rnum_handles,
+ struct drm_amdgpu_bo_list_entry **rhandles)
{
- int r = 0;
+ struct drm_amdgpu_bo_list_entry *handles = NULL;
+ unsigned num_handles = 0;
if (ws->debug_all_bos) {
struct radv_amdgpu_winsys_bo *bo;
- amdgpu_bo_handle *handles;
- unsigned num = 0;
pthread_mutex_lock(&ws->global_bo_list_lock);
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
if (!handles) {
pthread_mutex_unlock(&ws->global_bo_list_lock);
- return -ENOMEM;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
- assert(num < ws->num_buffers);
- handles[num++] = bo->bo;
+ assert(num_handles < ws->num_buffers);
+ handles[num_handles].bo_handle = bo->bo_handle;
+ handles[num_handles].bo_priority = bo->priority;
+ num_handles++;
}
- r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
- handles, NULL,
- bo_list);
- free(handles);
pthread_mutex_unlock(&ws->global_bo_list_lock);
- } else if (count == 1 && !extra_bo && !extra_cs &&
+ } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
!radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
- if (cs->num_buffers == 0) {
- *bo_list = 0;
- return 0;
- }
- r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
- cs->priorities, bo_list);
+ if (cs->num_buffers == 0)
+ return VK_SUCCESS;
+
+ handles = malloc(sizeof(handles[0]) * cs->num_buffers);
+ if (!handles)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ memcpy(handles, cs->handles,
+ sizeof(handles[0]) * cs->num_buffers);
+ num_handles = cs->num_buffers;
} else {
- unsigned total_buffer_count = !!extra_bo;
- unsigned unique_bo_count = !!extra_bo;
+ unsigned total_buffer_count = num_extra_bo;
+ num_handles = num_extra_bo;
for (unsigned i = 0; i < count; ++i) {
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
total_buffer_count += cs->num_buffers;
if (extra_cs) {
total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
}
- if (total_buffer_count == 0) {
- *bo_list = 0;
- return 0;
- }
- amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
- uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
- if (!handles || !priorities) {
- free(handles);
- free(priorities);
- return -ENOMEM;
+
+ if (radv_bo_list) {
+ total_buffer_count += radv_bo_list->count;
}
- if (extra_bo) {
- handles[0] = extra_bo->bo;
- priorities[0] = 8;
+ if (total_buffer_count == 0)
+ return VK_SUCCESS;
+
+ handles = malloc(sizeof(handles[0]) * total_buffer_count);
+ if (!handles)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ for (unsigned i = 0; i < num_extra_bo; i++) {
+ handles[i].bo_handle = extra_bo_array[i]->bo_handle;
+ handles[i].bo_priority = extra_bo_array[i]->priority;
}
for (unsigned i = 0; i < count + !!extra_cs; ++i) {
if (!cs->num_buffers)
continue;
- if (unique_bo_count == 0) {
- memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
- memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
- unique_bo_count = cs->num_buffers;
+ if (num_handles == 0 && !cs->num_virtual_buffers) {
+ memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
+ num_handles = cs->num_buffers;
continue;
}
- int unique_bo_so_far = unique_bo_count;
+ int unique_bo_so_far = num_handles;
for (unsigned j = 0; j < cs->num_buffers; ++j) {
bool found = false;
for (unsigned k = 0; k < unique_bo_so_far; ++k) {
- if (handles[k] == cs->handles[j]) {
+ if (handles[k].bo_handle == cs->handles[j].bo_handle) {
found = true;
- priorities[k] = MAX2(priorities[k],
- cs->priorities[j]);
break;
}
}
if (!found) {
- handles[unique_bo_count] = cs->handles[j];
- priorities[unique_bo_count] = cs->priorities[j];
- ++unique_bo_count;
+ handles[num_handles] = cs->handles[j];
+ ++num_handles;
}
}
for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
bool found = false;
- for (unsigned m = 0; m < unique_bo_count; ++m) {
- if (handles[m] == bo->bo) {
+ for (unsigned m = 0; m < num_handles; ++m) {
+ if (handles[m].bo_handle == bo->bo_handle) {
found = true;
- priorities[m] = MAX2(priorities[m],
- cs->virtual_buffer_priorities[j]);
break;
}
}
if (!found) {
- handles[unique_bo_count] = bo->bo;
- priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
- ++unique_bo_count;
+ handles[num_handles].bo_handle = bo->bo_handle;
+ handles[num_handles].bo_priority = bo->priority;
+ ++num_handles;
}
}
}
}
- if (unique_bo_count > 0) {
- r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
- priorities, bo_list);
- } else {
- *bo_list = 0;
+ if (radv_bo_list) {
+ unsigned unique_bo_so_far = num_handles;
+ for (unsigned i = 0; i < radv_bo_list->count; ++i) {
+ struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
+ bool found = false;
+ for (unsigned j = 0; j < unique_bo_so_far; ++j) {
+ if (bo->bo_handle == handles[j].bo_handle) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ handles[num_handles].bo_handle = bo->bo_handle;
+ handles[num_handles].bo_priority = bo->priority;
+ ++num_handles;
+ }
+ }
}
-
- free(handles);
- free(priorities);
}
- return r;
+ *rhandles = handles;
+ *rnum_handles = num_handles;
+
+ return VK_SUCCESS;
}
static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
}
static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
- struct amdgpu_cs_request *request)
+ struct radv_amdgpu_cs_request *request)
{
radv_amdgpu_request_to_fence(ctx,
&ctx->last_submission[request->ip_type][request->ring],
request);
}
-static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
- int queue_idx,
- struct radv_winsys_sem_info *sem_info,
- struct radeon_winsys_cs **cs_array,
- unsigned cs_count,
- struct radeon_winsys_cs *initial_preamble_cs,
- struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_fence *_fence)
+static VkResult
+radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
+ int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
+ const struct radv_winsys_bo_list *radv_bo_list,
+ struct radeon_cmdbuf **cs_array,
+ unsigned cs_count,
+ struct radeon_cmdbuf *initial_preamble_cs,
+ struct radeon_cmdbuf *continue_preamble_cs,
+ struct radeon_winsys_fence *_fence)
{
- int r;
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_request request = {0};
+ struct drm_amdgpu_bo_list_entry *handles = NULL;
+ struct radv_amdgpu_cs_request request = {0};
struct amdgpu_cs_ib_info ibs[2];
+ unsigned number_of_ibs = 1;
+ unsigned num_handles = 0;
+ VkResult result;
for (unsigned i = cs_count; i--;) {
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
}
}
- r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
- if (r) {
- fprintf(stderr, "amdgpu: buffer list creation failed for the "
- "chained submission(%d)\n", r);
- return r;
+ /* Get the BO list. */
+ result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
+ initial_preamble_cs, radv_bo_list,
+ &num_handles, &handles);
+ if (result != VK_SUCCESS)
+ return result;
+
+ /* Configure the CS request. */
+ if (initial_preamble_cs) {
+ ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
+ ibs[1] = cs0->ib;
+ number_of_ibs++;
+ } else {
+ ibs[0] = cs0->ib;
}
request.ip_type = cs0->hw_ip;
request.ring = queue_idx;
- request.number_of_ibs = 1;
- request.ibs = &cs0->ib;
- request.resources = bo_list;
+ request.number_of_ibs = number_of_ibs;
+ request.ibs = ibs;
+ request.handles = handles;
+ request.num_handles = num_handles;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
- if (initial_preamble_cs) {
- request.ibs = ibs;
- request.number_of_ibs = 2;
- ibs[1] = cs0->ib;
- ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
- }
+ /* Submit the CS. */
+ result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
- r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
- if (r) {
- if (r == -ENOMEM)
- fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
- else
- fprintf(stderr, "amdgpu: The CS has been rejected, "
- "see dmesg for more information.\n");
- }
+ free(request.handles);
- if (bo_list)
- amdgpu_bo_list_destroy(bo_list);
+ if (result != VK_SUCCESS)
+ return result;
if (fence)
radv_amdgpu_request_to_fence(ctx, fence, &request);
radv_assign_last_submit(ctx, &request);
- return r;
+ return VK_SUCCESS;
}
-static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
- int queue_idx,
- struct radv_winsys_sem_info *sem_info,
- struct radeon_winsys_cs **cs_array,
- unsigned cs_count,
- struct radeon_winsys_cs *initial_preamble_cs,
- struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_fence *_fence)
+static VkResult
+radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
+ int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
+ const struct radv_winsys_bo_list *radv_bo_list,
+ struct radeon_cmdbuf **cs_array,
+ unsigned cs_count,
+ struct radeon_cmdbuf *initial_preamble_cs,
+ struct radeon_cmdbuf *continue_preamble_cs,
+ struct radeon_winsys_fence *_fence)
{
- int r;
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_request request;
- bool emit_signal_sem = sem_info->cs_emit_signal;
- assert(cs_count);
+ struct drm_amdgpu_bo_list_entry *handles = NULL;
+ struct radv_amdgpu_cs_request request = {};
+ struct amdgpu_cs_ib_info *ibs;
+ struct radv_amdgpu_cs *cs0;
+ unsigned num_handles = 0;
+ unsigned number_of_ibs;
+ VkResult result;
- for (unsigned i = 0; i < cs_count;) {
- struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
- struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
- struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
- unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
- cs_count - i);
+ assert(cs_count);
+ cs0 = radv_amdgpu_cs(cs_array[0]);
+
+ /* Compute the number of IBs for this submit. */
+ number_of_ibs = cs_count + !!initial_preamble_cs;
+
+ /* Get the BO list. */
+ result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
+ initial_preamble_cs, radv_bo_list,
+ &num_handles, &handles);
+ if (result != VK_SUCCESS)
+ return result;
+
+ ibs = malloc(number_of_ibs * sizeof(*ibs));
+ if (!ibs) {
+ free(request.handles);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
- memset(&request, 0, sizeof(request));
+ /* Configure the CS request. */
+ if (initial_preamble_cs)
+ ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
- r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
- preamble_cs, &bo_list);
- if (r) {
- fprintf(stderr, "amdgpu: buffer list creation failed "
- "for the fallback submission (%d)\n", r);
- return r;
- }
+ for (unsigned i = 0; i < cs_count; i++) {
+ struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
- request.ip_type = cs0->hw_ip;
- request.ring = queue_idx;
- request.resources = bo_list;
- request.number_of_ibs = cnt + !!preamble_cs;
- request.ibs = ibs;
- request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
+ ibs[i + !!initial_preamble_cs] = cs->ib;
- if (preamble_cs) {
- ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
+ if (cs->is_chained) {
+ *cs->ib_size_ptr -= 4;
+ cs->is_chained = false;
}
+ }
- for (unsigned j = 0; j < cnt; ++j) {
- struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
- ibs[j + !!preamble_cs] = cs->ib;
-
- if (cs->is_chained) {
- *cs->ib_size_ptr -= 4;
- cs->is_chained = false;
- }
- }
+ request.ip_type = cs0->hw_ip;
+ request.ring = queue_idx;
+ request.handles = handles;
+ request.num_handles = num_handles;
+ request.number_of_ibs = number_of_ibs;
+ request.ibs = ibs;
+ request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
- sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
- r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
- if (r) {
- if (r == -ENOMEM)
- fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
- else
- fprintf(stderr, "amdgpu: The CS has been rejected, "
- "see dmesg for more information.\n");
- }
+ /* Submit the CS. */
+ result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
- if (bo_list)
- amdgpu_bo_list_destroy(bo_list);
+ free(request.handles);
+ free(ibs);
- if (r)
- return r;
+ if (result != VK_SUCCESS)
+ return result;
- i += cnt;
- }
if (fence)
radv_amdgpu_request_to_fence(ctx, fence, &request);
radv_assign_last_submit(ctx, &request);
- return 0;
+ return VK_SUCCESS;
}
-static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
- int queue_idx,
- struct radv_winsys_sem_info *sem_info,
- struct radeon_winsys_cs **cs_array,
- unsigned cs_count,
- struct radeon_winsys_cs *initial_preamble_cs,
- struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_fence *_fence)
+static VkResult
+radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
+ int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
+ const struct radv_winsys_bo_list *radv_bo_list,
+ struct radeon_cmdbuf **cs_array,
+ unsigned cs_count,
+ struct radeon_cmdbuf *initial_preamble_cs,
+ struct radeon_cmdbuf *continue_preamble_cs,
+ struct radeon_winsys_fence *_fence)
{
- int r;
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_request request;
- uint32_t pad_word = 0xffff1000U;
+ struct radv_amdgpu_cs_request request;
+ uint32_t pad_word = PKT3_NOP_PAD;
bool emit_signal_sem = sem_info->cs_emit_signal;
+ VkResult result;
- if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
+ if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
pad_word = 0x80000000;
assert(cs_count);
for (unsigned i = 0; i < cs_count;) {
- struct amdgpu_cs_ib_info ib = {0};
- struct radeon_winsys_bo *bo = NULL;
- struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
+ struct amdgpu_cs_ib_info *ibs;
+ struct radeon_winsys_bo **bos;
+ struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
+ struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
+ struct drm_amdgpu_bo_list_entry *handles = NULL;
+ unsigned num_handles = 0;
+ unsigned number_of_ibs;
uint32_t *ptr;
unsigned cnt = 0;
unsigned size = 0;
unsigned pad_words = 0;
- if (preamble_cs)
- size += preamble_cs->cdw;
- while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
- size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
- ++cnt;
- }
+ /* Compute the number of IBs for this submit. */
+ number_of_ibs = cs->num_old_cs_buffers + 1;
+
+ ibs = malloc(number_of_ibs * sizeof(*ibs));
+ if (!ibs)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
- while(!size || (size & 7)) {
- size++;
- pad_words++;
+ bos = malloc(number_of_ibs * sizeof(*bos));
+ if (!bos) {
+ free(ibs);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
- assert(cnt);
- bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
- ptr = ws->buffer_map(bo);
+ if (number_of_ibs > 1) {
+ /* Special path when the maximum size in dwords has
+ * been reached because we need to handle more than one
+ * IB per submit.
+ */
+ struct radeon_cmdbuf **new_cs_array;
+ unsigned idx = 0;
- if (preamble_cs) {
- memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
- ptr += preamble_cs->cdw;
- }
+ new_cs_array = malloc(cs->num_old_cs_buffers *
+ sizeof(*new_cs_array));
+ assert(new_cs_array);
- for (unsigned j = 0; j < cnt; ++j) {
- struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
- memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
- ptr += cs->base.cdw;
+ for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
+ new_cs_array[idx++] = &cs->old_cs_buffers[j];
+ new_cs_array[idx++] = cs_array[i];
- }
+ for (unsigned j = 0; j < number_of_ibs; j++) {
+ struct radeon_cmdbuf *rcs = new_cs_array[j];
+ bool needs_preamble = preamble_cs && j == 0;
+ unsigned size = 0;
- for (unsigned j = 0; j < pad_words; ++j)
- *ptr++ = pad_word;
+ if (needs_preamble)
+ size += preamble_cs->cdw;
+ size += rcs->cdw;
- memset(&request, 0, sizeof(request));
+ assert(size < 0xffff8);
+ while (!size || (size & 7)) {
+ size++;
+ pad_words++;
+ }
- r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
- (struct radv_amdgpu_winsys_bo*)bo,
- preamble_cs, &bo_list);
- if (r) {
- fprintf(stderr, "amdgpu: buffer list creation failed "
- "for the sysmem submission (%d)\n", r);
- return r;
+ bos[j] = ws->buffer_create(ws, 4 * size, 4096,
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_CPU_ACCESS |
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY,
+ RADV_BO_PRIORITY_CS);
+ ptr = ws->buffer_map(bos[j]);
+
+ if (needs_preamble) {
+ memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
+ ptr += preamble_cs->cdw;
+ }
+
+ memcpy(ptr, rcs->buf, 4 * rcs->cdw);
+ ptr += rcs->cdw;
+
+ for (unsigned k = 0; k < pad_words; ++k)
+ *ptr++ = pad_word;
+
+ ibs[j].size = size;
+ ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
+ ibs[j].flags = 0;
+ }
+
+ cnt++;
+ free(new_cs_array);
+ } else {
+ if (preamble_cs)
+ size += preamble_cs->cdw;
+
+ while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
+ size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
+ ++cnt;
+ }
+
+ while (!size || (size & 7)) {
+ size++;
+ pad_words++;
+ }
+ assert(cnt);
+
+ bos[0] = ws->buffer_create(ws, 4 * size, 4096,
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_CPU_ACCESS |
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY,
+ RADV_BO_PRIORITY_CS);
+ ptr = ws->buffer_map(bos[0]);
+
+ if (preamble_cs) {
+ memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
+ ptr += preamble_cs->cdw;
+ }
+
+ for (unsigned j = 0; j < cnt; ++j) {
+ struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
+ memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
+ ptr += cs->base.cdw;
+
+ }
+
+ for (unsigned j = 0; j < pad_words; ++j)
+ *ptr++ = pad_word;
+
+ ibs[0].size = size;
+ ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
+ ibs[0].flags = 0;
+ }
+
+ result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
+ (struct radv_amdgpu_winsys_bo **)bos,
+ number_of_ibs, preamble_cs,
+ radv_bo_list,
+ &num_handles, &handles);
+ if (result != VK_SUCCESS) {
+ free(ibs);
+ free(bos);
+ return result;
}
- ib.size = size;
- ib.ib_mc_address = radv_buffer_get_va(bo);
+ memset(&request, 0, sizeof(request));
request.ip_type = cs0->hw_ip;
request.ring = queue_idx;
- request.resources = bo_list;
- request.number_of_ibs = 1;
- request.ibs = &ib;
+ request.handles = handles;
+ request.num_handles = num_handles;
+ request.number_of_ibs = number_of_ibs;
+ request.ibs = ibs;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
- r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
- if (r) {
- if (r == -ENOMEM)
- fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
- else
- fprintf(stderr, "amdgpu: The CS has been rejected, "
- "see dmesg for more information.\n");
+ result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
+
+ free(request.handles);
+
+ for (unsigned j = 0; j < number_of_ibs; j++) {
+ ws->buffer_destroy(bos[j]);
}
- if (bo_list)
- amdgpu_bo_list_destroy(bo_list);
+ free(ibs);
+ free(bos);
- ws->buffer_destroy(bo);
- if (r)
- return r;
+ if (result != VK_SUCCESS)
+ return result;
i += cnt;
}
radv_assign_last_submit(ctx, &request);
- return 0;
+ return VK_SUCCESS;
}
-static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
- int queue_idx,
- struct radeon_winsys_cs **cs_array,
- unsigned cs_count,
- struct radeon_winsys_cs *initial_preamble_cs,
- struct radeon_winsys_cs *continue_preamble_cs,
- struct radv_winsys_sem_info *sem_info,
- bool can_patch,
- struct radeon_winsys_fence *_fence)
+static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
+ int queue_idx,
+ struct radeon_cmdbuf **cs_array,
+ unsigned cs_count,
+ struct radeon_cmdbuf *initial_preamble_cs,
+ struct radeon_cmdbuf *continue_preamble_cs,
+ struct radv_winsys_sem_info *sem_info,
+ const struct radv_winsys_bo_list *bo_list,
+ bool can_patch,
+ struct radeon_winsys_fence *_fence)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
- int ret;
+ VkResult result;
assert(sem_info);
if (!cs->ws->use_ib_bos) {
- ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
- cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
- } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
- ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
- cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
- } else {
- ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
+ result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
+ } else if (can_patch) {
+ result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
+ cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
+ } else {
+ result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
+ cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
}
radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
- return ret;
+ return result;
}
static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
return ret;
}
-static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
+static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
FILE* file,
const int *trace_ids, int trace_id_count)
{
}
}
-static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
- enum radeon_ctx_priority priority)
+static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
+ enum radeon_ctx_priority priority,
+ struct radeon_winsys_ctx **rctx)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
+ VkResult result;
int r;
if (!ctx)
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
- if (r) {
+ if (r && r == -EACCES) {
+ result = VK_ERROR_NOT_PERMITTED_EXT;
+ goto fail_create;
+ } else if (r) {
fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
- goto error_create;
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto fail_create;
}
ctx->ws = ws;
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS|
- RADEON_FLAG_NO_INTERPROCESS_SHARING);
- if (ctx->fence_bo)
- ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
- if (ctx->fence_map)
- memset(ctx->fence_map, 0, 4096);
- return (struct radeon_winsys_ctx *)ctx;
-error_create:
+ RADEON_FLAG_CPU_ACCESS |
+ RADEON_FLAG_NO_INTERPROCESS_SHARING,
+ RADV_BO_PRIORITY_CS);
+ if (!ctx->fence_bo) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ goto fail_alloc;
+ }
+
+ ctx->fence_map = (uint64_t *)ws->base.buffer_map(ctx->fence_bo);
+ if (!ctx->fence_map) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ goto fail_map;
+ }
+
+ memset(ctx->fence_map, 0, 4096);
+
+ *rctx = (struct radeon_winsys_ctx *)ctx;
+ return VK_SUCCESS;
+
+fail_map:
+ ws->base.buffer_destroy(ctx->fence_bo);
+fail_alloc:
+ amdgpu_cs_ctx_free(ctx->ctx);
+fail_create:
FREE(ctx);
- return NULL;
+ return result;
}
static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
}
static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
+ const uint32_t *syncobj_override,
struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
{
+ const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
if (!syncobj)
return NULL;
for (unsigned i = 0; i < counts->syncobj_count; i++) {
struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
- sem->handle = counts->syncobj[i];
+ sem->handle = src[i];
}
chunk->chunk_id = chunk_id;
return syncobj;
}
-static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
- struct amdgpu_cs_request *request,
- struct radv_winsys_sem_info *sem_info)
+static int radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *dst)
+{
+ pthread_mutex_lock(&ws->syncobj_lock);
+ if (count > ws->syncobj_capacity) {
+ if (ws->syncobj_capacity > UINT32_MAX / 2)
+ goto fail;
+
+ unsigned new_capacity = MAX2(count, ws->syncobj_capacity * 2);
+ uint32_t *n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
+ if (!n)
+ goto fail;
+ ws->syncobj_capacity = new_capacity;
+ ws->syncobj = n;
+ }
+
+ while(ws->syncobj_count < count) {
+ int r = amdgpu_cs_create_syncobj(ws->dev, ws->syncobj + ws->syncobj_count);
+ if (r)
+ goto fail;
+ ++ws->syncobj_count;
+ }
+
+ for (unsigned i = 0; i < count; ++i)
+ dst[i] = ws->syncobj[--ws->syncobj_count];
+
+ pthread_mutex_unlock(&ws->syncobj_lock);
+ return 0;
+
+fail:
+ pthread_mutex_unlock(&ws->syncobj_lock);
+ return -ENOMEM;
+}
+
+static void radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *src)
+{
+ pthread_mutex_lock(&ws->syncobj_lock);
+
+ uint32_t cache_count = MIN2(count, UINT32_MAX - ws->syncobj_count);
+ if (cache_count + ws->syncobj_count > ws->syncobj_capacity) {
+ unsigned new_capacity = MAX2(ws->syncobj_count + cache_count, ws->syncobj_capacity * 2);
+ uint32_t* n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
+ if (n) {
+ ws->syncobj_capacity = new_capacity;
+ ws->syncobj = n;
+ }
+ }
+
+ for (unsigned i = 0; i < count; ++i) {
+ if (ws->syncobj_count < ws->syncobj_capacity)
+ ws->syncobj[ws->syncobj_count++] = src[i];
+ else
+ amdgpu_cs_destroy_syncobj(ws->dev, src[i]);
+ }
+
+ pthread_mutex_unlock(&ws->syncobj_lock);
+
+}
+
+static int radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys *ws,
+ struct radv_winsys_sem_counts *counts,
+ uint32_t **out_syncobjs)
+{
+ int r = 0;
+
+ if (!ws->info.has_timeline_syncobj || !counts->syncobj_count) {
+ *out_syncobjs = NULL;
+ return 0;
+ }
+
+ *out_syncobjs = malloc(counts->syncobj_count * sizeof(**out_syncobjs));
+ if (!*out_syncobjs)
+ return -ENOMEM;
+
+ r = radv_amdgpu_cache_alloc_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
+ if (r)
+ return r;
+
+ for (unsigned i = 0; i < counts->syncobj_count; ++i) {
+ r = amdgpu_cs_syncobj_transfer(ws->dev, (*out_syncobjs)[i], 0, counts->syncobj[i], 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT);
+ if (r)
+ goto fail;
+ }
+
+ r = amdgpu_cs_syncobj_reset(ws->dev, counts->syncobj, counts->syncobj_reset_count);
+ if (r)
+ goto fail;
+
+ return 0;
+fail:
+ radv_amdgpu_cache_free_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
+ free(*out_syncobjs);
+ *out_syncobjs = NULL;
+ return r;
+}
+
+static VkResult
+radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct radv_amdgpu_cs_request *request,
+ struct radv_winsys_sem_info *sem_info)
{
int r;
int num_chunks;
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
+ bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
+ struct drm_amdgpu_bo_list_in bo_list_in;
+ uint32_t *in_syncobjs = NULL;
int i;
struct amdgpu_cs_fence *sem;
+ uint32_t bo_list = 0;
+ VkResult result = VK_SUCCESS;
user_fence = (request->fence_info.handle != NULL);
- size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
+ size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
- chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
+ chunks = malloc(sizeof(chunks[0]) * size);
+ if (!chunks)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
size = request->number_of_ibs + (user_fence ? 1 : 0);
- chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
+ chunk_data = malloc(sizeof(chunk_data[0]) * size);
+ if (!chunk_data) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto error_out;
+ }
num_chunks = request->number_of_ibs;
for (i = 0; i < request->number_of_ibs; i++) {
}
if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
+ r = radv_amdgpu_cs_prepare_syncobjs(ctx->ws, &sem_info->wait, &in_syncobjs);
+ if (r)
+ goto error_out;
+
wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
+ in_syncobjs,
&chunks[num_chunks],
AMDGPU_CHUNK_ID_SYNCOBJ_IN);
if (!wait_syncobj) {
- r = -ENOMEM;
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto error_out;
}
num_chunks++;
}
if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
- sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
+ sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
if (!sem_dependencies) {
- r = -ENOMEM;
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto error_out;
}
+
int sem_count = 0;
+
for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
if (!sem->context)
if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
+ NULL,
&chunks[num_chunks],
AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
if (!signal_syncobj) {
- r = -ENOMEM;
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto error_out;
}
num_chunks++;
}
- r = amdgpu_cs_submit_raw(ctx->ws->dev,
+ if (use_bo_list_create) {
+ /* Legacy path creating the buffer list handle and passing it
+ * to the CS ioctl.
+ */
+ r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
+ request->handles, &bo_list);
+ if (r) {
+ if (r == -ENOMEM) {
+ fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ } else {
+ fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
+ result = VK_ERROR_UNKNOWN;
+ }
+ goto error_out;
+ }
+ } else {
+ /* Standard path passing the buffer list via the CS ioctl. */
+ bo_list_in.operation = ~0;
+ bo_list_in.list_handle = ~0;
+ bo_list_in.bo_number = request->num_handles;
+ bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
+
+ chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
+ chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
+ num_chunks++;
+ }
+
+ r = amdgpu_cs_submit_raw2(ctx->ws->dev,
ctx->ctx,
- request->resources,
+ bo_list,
num_chunks,
chunks,
&request->seq_no);
+
+ if (bo_list)
+ amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
+
+ if (r) {
+ if (r == -ENOMEM) {
+ fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
+ } else if (r == -ECANCELED) {
+ fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
+ result = VK_ERROR_DEVICE_LOST;
+ } else {
+ fprintf(stderr, "amdgpu: The CS has been rejected, "
+ "see dmesg for more information (%i).\n", r);
+ result = VK_ERROR_UNKNOWN;
+ }
+ }
+
error_out:
+ if (in_syncobjs) {
+ radv_amdgpu_cache_free_syncobjs(ctx->ws, sem_info->wait.syncobj_count, in_syncobjs);
+ free(in_syncobjs);
+ }
+ free(chunks);
+ free(chunk_data);
free(sem_dependencies);
free(wait_syncobj);
free(signal_syncobj);
- return r;
+ return result;
}
static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
+ bool create_signaled,
uint32_t *handle)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
- return amdgpu_cs_create_syncobj(ws->dev, handle);
+ uint32_t flags = 0;
+
+ if (create_signaled)
+ flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
+
+ return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
}
static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
amdgpu_cs_destroy_syncobj(ws->dev, handle);
}
+static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
+ uint32_t handle)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+ amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
+}
+
+static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
+ uint32_t handle)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+ amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
+}
+
+static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
+ uint32_t handle_count, bool wait_all, uint64_t timeout)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+ uint32_t tmp;
+
+ /* The timeouts are signed, while vulkan timeouts are unsigned. */
+ timeout = MIN2(timeout, INT64_MAX);
+
+ int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
+ &tmp);
+ if (ret == 0) {
+ return true;
+ } else if (ret == -ETIME) {
+ return false;
+ } else {
+ fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
+ return false;
+ }
+}
+
static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
uint32_t syncobj,
int *fd)
return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
}
+
+static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
+ uint32_t syncobj,
+ int *fd)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+
+ return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
+}
+
+static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
+ uint32_t syncobj,
+ int fd)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+
+ return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
+}
+
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
{
ws->base.ctx_create = radv_amdgpu_ctx_create;
ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
ws->base.create_fence = radv_amdgpu_create_fence;
ws->base.destroy_fence = radv_amdgpu_destroy_fence;
+ ws->base.reset_fence = radv_amdgpu_reset_fence;
+ ws->base.signal_fence = radv_amdgpu_signal_fence;
+ ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
ws->base.create_sem = radv_amdgpu_create_sem;
ws->base.destroy_sem = radv_amdgpu_destroy_sem;
ws->base.create_syncobj = radv_amdgpu_create_syncobj;
ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
+ ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
+ ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
+ ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
ws->base.export_syncobj = radv_amdgpu_export_syncobj;
ws->base.import_syncobj = radv_amdgpu_import_syncobj;
+ ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
+ ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
ws->base.fence_wait = radv_amdgpu_fence_wait;
+ ws->base.fences_wait = radv_amdgpu_fences_wait;
}