}
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array,
pSubmits[i].commandBufferCount,
+ (struct radeon_winsys_sem **)pSubmits[i].pWaitSemaphores,
+ pSubmits[i].waitSemaphoreCount,
+ (struct radeon_winsys_sem **)pSubmits[i].pSignalSemaphores,
+ pSubmits[i].signalSemaphoreCount,
can_patch, base_fence);
if (ret)
radv_loge("failed to submit CS %d\n", i);
if (fence) {
if (!submitCount)
ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, &queue->device->empty_cs,
- 1, false, base_fence);
+ 1, NULL, 0, NULL, 0, false, base_fence);
fence->submitted = true;
}
// Queue semaphore functions
VkResult radv_CreateSemaphore(
- VkDevice device,
+ VkDevice _device,
const VkSemaphoreCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore)
{
- /* The DRM execbuffer ioctl always execute in-oder, even between different
- * rings. As such, there's nothing to do for the user space semaphore.
- */
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ struct radeon_winsys_sem *sem;
- *pSemaphore = (VkSemaphore)1;
+ sem = device->ws->create_sem(device->ws);
+ if (!sem)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ *pSemaphore = (VkSemaphore)sem;
return VK_SUCCESS;
}
void radv_DestroySemaphore(
- VkDevice device,
- VkSemaphore semaphore,
+ VkDevice _device,
+ VkSemaphore _semaphore,
const VkAllocationCallbacks* pAllocator)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+ struct radeon_winsys_sem *sem;
+ if (!_semaphore)
+ return;
+
+ sem = (struct radeon_winsys_sem *)_semaphore;
+ device->ws->destroy_sem(sem);
}
VkResult radv_CreateEvent(
struct radeon_winsys_bo;
struct radeon_winsys_fence;
+struct radeon_winsys_sem;
struct radeon_winsys {
void (*destroy)(struct radeon_winsys *ws);
int queue_index,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
+ struct radeon_winsys_sem **wait_sem,
+ unsigned wait_sem_count,
+ struct radeon_winsys_sem **signal_sem,
+ unsigned signal_sem_count,
bool can_patch,
struct radeon_winsys_fence *fence);
struct radeon_winsys_fence *fence,
bool absolute,
uint64_t timeout);
+
+ struct radeon_winsys_sem *(*create_sem)(struct radeon_winsys *ws);
+ void (*destroy_sem)(struct radeon_winsys_sem *sem);
+
};
static inline void radeon_emit(struct radeon_winsys_cs *cs, uint32_t value)
int queue_idx,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
+ struct radeon_winsys_sem **wait_sem,
+ unsigned wait_sem_count,
+ struct radeon_winsys_sem **signal_sem,
+ unsigned signal_sem_count,
bool can_patch,
struct radeon_winsys_fence *_fence)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
+ struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
+ int ret;
+ int i;
+
+ for (i = 0; i < wait_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)wait_sem[i];
+ amdgpu_cs_wait_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
+ sem);
+ }
if (!cs->ws->use_ib_bos) {
- return radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, cs_array,
cs_count, _fence);
} else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && false) {
- return radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, cs_array,
cs_count, _fence);
} else {
- return radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, cs_array,
cs_count, _fence);
}
+
+ for (i = 0; i < signal_sem_count; i++) {
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)signal_sem[i];
+ amdgpu_cs_signal_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
+ sem);
+ }
+ return ret;
}
static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws)
return true;
}
+static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
+{
+ int ret;
+ amdgpu_semaphore_handle sem;
+
+ ret = amdgpu_cs_create_semaphore(&sem);
+ if (ret)
+ return NULL;
+ return (struct radeon_winsys_sem *)sem;
+}
+
+static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
+{
+ amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)_sem;
+ amdgpu_cs_destroy_semaphore(sem);
+}
+
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
{
ws->base.ctx_create = radv_amdgpu_ctx_create;
ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
ws->base.create_fence = radv_amdgpu_create_fence;
ws->base.destroy_fence = radv_amdgpu_destroy_fence;
+ ws->base.create_sem = radv_amdgpu_create_sem;
+ ws->base.destroy_sem = radv_amdgpu_destroy_sem;
ws->base.fence_wait = radv_amdgpu_fence_wait;
}