if (result != VK_SUCCESS)
return result;
}
- fence->submitted = true;
}
return VK_SUCCESS;
}
fence_emitted = true;
- if (fence)
- fence->submitted = true;
}
radv_free_sem_info(&sem_info);
if (result != VK_SUCCESS)
return result;
}
- fence->submitted = true;
}
return VK_SUCCESS;
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
fence->fence_wsi = NULL;
- fence->submitted = false;
- fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
fence->temp_syncobj = 0;
if (device->always_use_syncobj || handleTypes) {
int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
fence->syncobj = 0;
+ if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
+ device->ws->signal_fence(fence->fence);
}
*pFence = radv_fence_to_handle(fence);
}
-static bool radv_all_fences_plain_and_submitted(uint32_t fenceCount, const VkFence *pFences)
+static bool radv_all_fences_plain_and_submitted(struct radv_device *device,
+ uint32_t fenceCount, const VkFence *pFences)
{
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
if (fence->fence == NULL || fence->syncobj ||
- fence->temp_syncobj ||
- (!fence->signalled && !fence->submitted))
+ fence->temp_syncobj || fence->fence_wsi ||
+ (!device->ws->is_fence_waitable(fence->fence)))
return false;
}
return true;
if (!waitAll && fenceCount > 1) {
/* Not doing this by default for waitAll, due to needing to allocate twice. */
- if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(fenceCount, pFences)) {
+ if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(device, fenceCount, pFences)) {
uint32_t wait_count = 0;
struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
if (!fences)
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (fence->signalled) {
+ if (device->ws->fence_wait(device->ws, fence->fence, false, 0)) {
free(fences);
return VK_SUCCESS;
}
continue;
}
- if (fence->signalled)
- continue;
-
if (fence->fence) {
- if (!fence->submitted) {
- while(radv_get_current_time() <= timeout &&
- !fence->submitted)
+ if (!device->ws->is_fence_waitable(fence->fence)) {
+ while(!device->ws->is_fence_waitable(fence->fence) &&
+ radv_get_current_time() <= timeout)
/* Do nothing */;
-
- if (!fence->submitted)
- return VK_TIMEOUT;
-
- /* Recheck as it may have been set by
- * submitting operations. */
-
- if (fence->signalled)
- continue;
}
expired = device->ws->fence_wait(device->ws,
if (result != VK_SUCCESS)
return result;
}
-
- fence->signalled = true;
}
return VK_SUCCESS;
for (unsigned i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- fence->submitted = fence->signalled = false;
+ if (fence->fence)
+ device->ws->reset_fence(fence->fence);
/* Per spec, we first restore the permanent payload, and then reset, so
* having a temp syncobj should not skip resetting the permanent syncobj. */
return success ? VK_SUCCESS : VK_NOT_READY;
}
- if (fence->signalled)
- return VK_SUCCESS;
- if (!fence->submitted)
- return VK_NOT_READY;
if (fence->fence) {
if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
return VK_NOT_READY;
static struct radeon_winsys_fence *radv_amdgpu_create_fence()
{
struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
+ fence->fence.fence = UINT64_MAX;
return (struct radeon_winsys_fence*)fence;
}
free(fence);
}
+static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ fence->fence.fence = UINT64_MAX;
+}
+
+static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ fence->fence.fence = 0;
+}
+
+static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
+{
+ struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
+ return fence->fence.fence < UINT64_MAX;
+}
+
static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
struct radeon_winsys_fence *_fence,
bool absolute,
int r;
uint32_t expired = 0;
+ /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
+ if (fence->fence.fence == UINT64_MAX)
+ return false;
+
+ if (fence->fence.fence == 0)
+ return true;
+
if (fence->user_ptr) {
if (*fence->user_ptr >= fence->fence.fence)
return true;
ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
ws->base.create_fence = radv_amdgpu_create_fence;
ws->base.destroy_fence = radv_amdgpu_destroy_fence;
+ ws->base.reset_fence = radv_amdgpu_reset_fence;
+ ws->base.signal_fence = radv_amdgpu_signal_fence;
+ ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
ws->base.create_sem = radv_amdgpu_create_sem;
ws->base.destroy_sem = radv_amdgpu_destroy_sem;
ws->base.create_syncobj = radv_amdgpu_create_syncobj;