void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
+enum anv_fence_type {
+ ANV_FENCE_TYPE_NONE = 0,
+ ANV_FENCE_TYPE_BO,
+ ANV_FENCE_TYPE_SYNCOBJ,
+};
+
enum anv_fence_state {
/** Indicates that this is a new (or newly reset fence) */
ANV_FENCE_STATE_RESET,
ANV_FENCE_STATE_SIGNALED,
};
+struct anv_fence_impl {
+ enum anv_fence_type type;
+
+ union {
+ /** Fence implementation for BO fences
+ *
+ * These fences use a BO and a set of CPU-tracked state flags. The BO
+ * is added to the object list of the last execbuf call in a QueueSubmit
+ * and is marked EXEC_WRITE. The state flags track when the BO has been
+ * submitted to the kernel. We need to do this because Vulkan lets you
+ * wait on a fence that has not yet been submitted and I915_GEM_BUSY
+ * will say it's idle in this case.
+ */
+ struct {
+ struct anv_bo bo;
+ enum anv_fence_state state;
+ } bo;
+ };
+};
+
struct anv_fence {
- struct anv_bo bo;
- enum anv_fence_state state;
+ /* Permanent fence state. Every fence has some form of permanent state
+ * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
+ * cross-process fences0 or it could just be a dummy for use internally.
+ */
+ struct anv_fence_impl permanent;
+
+ /* Temporary fence state. A fence *may* have temporary state. That state
+ * is added to the fence by an import operation and is reset back to
+ * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
+ * state cannot be signaled because the fence must already be signaled
+ * before the temporary state can be exported from the fence in the other
+ * process and imported here.
+ */
+ struct anv_fence_impl temporary;
};
struct anv_event {
VkFence* pFence)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_bo fence_bo;
struct anv_fence *fence;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
- VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
+ fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (fence == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ fence->permanent.type = ANV_FENCE_TYPE_BO;
+
+ VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
+ &fence->permanent.bo.bo, 4096);
if (result != VK_SUCCESS)
return result;
- /* Fences are small. Just store the CPU data structure in the BO. */
- fence = fence_bo.map;
- fence->bo = fence_bo;
-
if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
- fence->state = ANV_FENCE_STATE_SIGNALED;
+ fence->permanent.bo.state = ANV_FENCE_STATE_SIGNALED;
} else {
- fence->state = ANV_FENCE_STATE_RESET;
+ fence->permanent.bo.state = ANV_FENCE_STATE_RESET;
}
*pFence = anv_fence_to_handle(fence);
return VK_SUCCESS;
}
+static void
+anv_fence_impl_cleanup(struct anv_device *device,
+ struct anv_fence_impl *impl)
+{
+ switch (impl->type) {
+ case ANV_FENCE_TYPE_NONE:
+ /* Dummy. Nothing to do */
+ return;
+
+ case ANV_FENCE_TYPE_BO:
+ anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
+ return;
+ }
+
+ unreachable("Invalid fence type");
+}
+
void anv_DestroyFence(
VkDevice _device,
VkFence _fence,
if (!fence)
return;
- assert(fence->bo.map == fence);
- anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
+ anv_fence_impl_cleanup(device, &fence->temporary);
+ anv_fence_impl_cleanup(device, &fence->permanent);
+
+ vk_free2(&device->alloc, pAllocator, fence);
}
VkResult anv_ResetFences(
{
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- fence->state = ANV_FENCE_STATE_RESET;
+
+ assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+ struct anv_fence_impl *impl = &fence->permanent;
+
+ switch (impl->type) {
+ case ANV_FENCE_TYPE_BO:
+ impl->bo.state = ANV_FENCE_STATE_RESET;
+ break;
+
+ default:
+ unreachable("Invalid fence type");
+ }
}
return VK_SUCCESS;
if (unlikely(device->lost))
return VK_ERROR_DEVICE_LOST;
- switch (fence->state) {
- case ANV_FENCE_STATE_RESET:
- /* If it hasn't even been sent off to the GPU yet, it's not ready */
- return VK_NOT_READY;
-
- case ANV_FENCE_STATE_SIGNALED:
- /* It's been signaled, return success */
- return VK_SUCCESS;
+ assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+ struct anv_fence_impl *impl = &fence->permanent;
- case ANV_FENCE_STATE_SUBMITTED: {
- VkResult result = anv_device_bo_busy(device, &fence->bo);
- if (result == VK_SUCCESS) {
- fence->state = ANV_FENCE_STATE_SIGNALED;
+ switch (impl->type) {
+ case ANV_FENCE_TYPE_BO:
+ switch (impl->bo.state) {
+ case ANV_FENCE_STATE_RESET:
+ /* If it hasn't even been sent off to the GPU yet, it's not ready */
+ return VK_NOT_READY;
+
+ case ANV_FENCE_STATE_SIGNALED:
+ /* It's been signaled, return success */
return VK_SUCCESS;
- } else {
- return result;
+
+ case ANV_FENCE_STATE_SUBMITTED: {
+ VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
+ if (result == VK_SUCCESS) {
+ impl->bo.state = ANV_FENCE_STATE_SIGNALED;
+ return VK_SUCCESS;
+ } else {
+ return result;
+ }
}
- }
+ default:
+ unreachable("Invalid fence status");
+ }
+
default:
- unreachable("Invalid fence status");
+ unreachable("Invalid fence type");
}
}
#define NSEC_PER_SEC 1000000000
#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
-VkResult anv_WaitForFences(
- VkDevice _device,
- uint32_t fenceCount,
- const VkFence* pFences,
- VkBool32 waitAll,
- uint64_t _timeout)
+static VkResult
+anv_wait_for_bo_fences(struct anv_device *device,
+ uint32_t fenceCount,
+ const VkFence *pFences,
+ bool waitAll,
+ uint64_t _timeout)
{
- ANV_FROM_HANDLE(anv_device, device, _device);
int ret;
- if (unlikely(device->lost))
- return VK_ERROR_DEVICE_LOST;
-
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
* to block indefinitely timeouts <= 0. Unfortunately, this was broken
* for a couple of kernel releases. Since there's no way to know
bool signaled_fences = false;
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- switch (fence->state) {
+
+ /* This function assumes that all fences are BO fences and that they
+ * have no temporary state. Since BO fences will never be exported,
+ * this should be a safe assumption.
+ */
+ assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
+ assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+ struct anv_fence_impl *impl = &fence->permanent;
+
+ switch (impl->bo.state) {
case ANV_FENCE_STATE_RESET:
/* This fence hasn't been submitted yet, we'll catch it the next
* time around. Yes, this may mean we dead-loop but, short of
/* These are the fences we really care about. Go ahead and wait
* on it until we hit a timeout.
*/
- result = anv_device_wait(device, &fence->bo, timeout);
+ result = anv_device_wait(device, &impl->bo.bo, timeout);
switch (result) {
case VK_SUCCESS:
- fence->state = ANV_FENCE_STATE_SIGNALED;
+ impl->bo.state = ANV_FENCE_STATE_SIGNALED;
signaled_fences = true;
if (!waitAll)
goto done;
uint32_t now_pending_fences = 0;
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- if (fence->state == ANV_FENCE_STATE_RESET)
+ if (fence->permanent.bo.state == ANV_FENCE_STATE_RESET)
now_pending_fences++;
}
assert(now_pending_fences <= pending_fences);
return result;
}
+VkResult anv_WaitForFences(
+ VkDevice _device,
+ uint32_t fenceCount,
+ const VkFence* pFences,
+ VkBool32 waitAll,
+ uint64_t timeout)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
+ return anv_wait_for_bo_fences(device, fenceCount, pFences, waitAll, timeout);
+}
+
// Queue semaphore functions
VkResult anv_CreateSemaphore(