#include <fcntl.h>
#include <unistd.h>
-#include <sys/eventfd.h>
#include "anv_private.h"
#include "vk_util.h"
#include "genxml/gen7_pack.h"
+uint64_t anv_gettime_ns(void)
+{
+ struct timespec current;
+ clock_gettime(CLOCK_MONOTONIC, ¤t);
+ return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
+}
+
+uint64_t anv_get_absolute_timeout(uint64_t timeout)
+{
+ if (timeout == 0)
+ return 0;
+ uint64_t current_time = anv_gettime_ns();
+ uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
+
+ timeout = MIN2(max_timeout, timeout);
+
+ return (current_time + timeout);
+}
+
+static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
+{
+ uint64_t now = anv_gettime_ns();
+
+ /* We don't want negative timeouts.
+ *
+ * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
+ * supposed to block indefinitely timeouts < 0. Unfortunately,
+ * this was broken for a couple of kernel releases. Since there's
+ * no way to know whether or not the kernel we're using is one of
+ * the broken ones, the best we can do is to clamp the timeout to
+ * INT64_MAX. This limits the maximum timeout from 584 years to
+ * 292 years - likely not a big deal.
+ */
+ if (abs_timeout < now)
+ return 0;
+
+ uint64_t rel_timeout = abs_timeout - now;
+ if (rel_timeout > (uint64_t) INT64_MAX)
+ rel_timeout = INT64_MAX;
+
+ return rel_timeout;
+}
+
VkResult
anv_device_execbuf(struct anv_device *device,
struct drm_i915_gem_execbuffer2 *execbuf,
struct anv_bo **execbuf_bos)
{
- int ret = anv_gem_execbuffer(device, execbuf);
+ int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
if (ret != 0) {
/* We don't know the real error. */
- device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
+ return anv_device_set_lost(device, "execbuf2 failed: %m");
}
struct drm_i915_gem_exec_object2 *objects =
(void *)(uintptr_t)execbuf->buffers_ptr;
- for (uint32_t k = 0; k < execbuf->buffer_count; k++)
+ for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
+ if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
+ assert(execbuf_bos[k]->offset == objects[k].offset);
execbuf_bos[k]->offset = objects[k].offset;
+ }
return VK_SUCCESS;
}
+VkResult
+anv_queue_init(struct anv_device *device, struct anv_queue *queue)
+{
+ queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ queue->device = device;
+ queue->flags = 0;
+
+ return VK_SUCCESS;
+}
+
+void
+anv_queue_finish(struct anv_queue *queue)
+{
+}
+
VkResult
anv_device_submit_simple_batch(struct anv_device *device,
struct anv_batch *batch)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec2_objects[1];
- struct anv_bo bo, *exec_bos[1];
+ struct anv_bo *bo;
VkResult result = VK_SUCCESS;
uint32_t size;
- /* Kernel driver requires 8 byte aligned batch length */
- size = align_u32(batch->next - batch->start, 8);
- result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
- if (result != VK_SUCCESS)
- return result;
+ if (batch) {
+ /* Kernel driver requires 8 byte aligned batch length */
+ size = align_u32(batch->next - batch->start, 8);
+ result = anv_bo_pool_alloc(&device->batch_bo_pool, size, &bo);
+ if (result != VK_SUCCESS)
+ return result;
- memcpy(bo.map, batch->start, size);
- if (!device->info.has_llc)
- gen_flush_range(bo.map, size);
+ memcpy(bo->map, batch->start, size);
+ if (!device->info.has_llc)
+ gen_flush_range(bo->map, size);
+ } else {
+ size = device->trivial_batch_bo->size;
+ bo = device->trivial_batch_bo;
+ }
- exec_bos[0] = &bo;
- exec2_objects[0].handle = bo.gem_handle;
+ exec2_objects[0].handle = bo->gem_handle;
exec2_objects[0].relocation_count = 0;
exec2_objects[0].relocs_ptr = 0;
exec2_objects[0].alignment = 0;
- exec2_objects[0].offset = bo.offset;
- exec2_objects[0].flags = 0;
+ exec2_objects[0].offset = bo->offset;
+ exec2_objects[0].flags = bo->flags;
exec2_objects[0].rsvd1 = 0;
exec2_objects[0].rsvd2 = 0;
execbuf.rsvd1 = device->context_id;
execbuf.rsvd2 = 0;
- result = anv_device_execbuf(device, &execbuf, exec_bos);
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ gen_print_batch(&device->decoder_ctx, bo->map,
+ bo->size, bo->offset, false);
+ }
+
+ result = anv_device_execbuf(device, &execbuf, &bo);
if (result != VK_SUCCESS)
goto fail;
- result = anv_device_wait(device, &bo, INT64_MAX);
+ result = anv_device_wait(device, bo, INT64_MAX);
fail:
- anv_bo_pool_free(&device->batch_bo_pool, &bo);
+ if (batch)
+ anv_bo_pool_free(&device->batch_bo_pool, bo);
return result;
}
* VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
* submit the same job again to this device.
*/
- result = vk_errorf(VK_ERROR_DEVICE_LOST, "vkQueueSubmit() failed");
- device->lost = true;
+ result = anv_device_set_lost(device, "vkQueueSubmit() failed");
}
pthread_mutex_unlock(&device->mutex);
} else {
fence->permanent.type = ANV_FENCE_TYPE_BO;
- VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
- &fence->permanent.bo.bo, 4096);
+ VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, 4096,
+ &fence->permanent.bo.bo);
if (result != VK_SUCCESS)
return result;
switch (impl->type) {
case ANV_FENCE_TYPE_NONE:
/* Dummy. Nothing to do */
- return;
+ break;
case ANV_FENCE_TYPE_BO:
- anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
- return;
+ anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
+ break;
case ANV_FENCE_TYPE_SYNCOBJ:
anv_gem_syncobj_destroy(device, impl->syncobj);
- return;
+ break;
+
+ case ANV_FENCE_TYPE_WSI:
+ impl->fence_wsi->destroy(impl->fence_wsi);
+ break;
+
+ default:
+ unreachable("Invalid fence type");
}
- unreachable("Invalid fence type");
+ impl->type = ANV_FENCE_TYPE_NONE;
}
void anv_DestroyFence(
* first restored. The remaining operations described therefore
* operate on the restored payload.
*/
- if (fence->temporary.type != ANV_FENCE_TYPE_NONE) {
+ if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
anv_fence_impl_cleanup(device, &fence->temporary);
- fence->temporary.type = ANV_FENCE_TYPE_NONE;
- }
struct anv_fence_impl *impl = &fence->permanent;
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
- if (unlikely(device->lost))
+ if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
struct anv_fence_impl *impl =
return VK_SUCCESS;
case ANV_BO_FENCE_STATE_SUBMITTED: {
- VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
+ VkResult result = anv_device_bo_busy(device, impl->bo.bo);
if (result == VK_SUCCESS) {
impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
return VK_SUCCESS;
return VK_NOT_READY;
} else {
/* We don't know the real error. */
- device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST,
- "drm_syncobj_wait failed: %m");
+ return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
}
} else {
return VK_SUCCESS;
}
}
-#define NSEC_PER_SEC 1000000000
-#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
-
-static uint64_t
-gettime_ns(void)
-{
- struct timespec current;
- clock_gettime(CLOCK_MONOTONIC, ¤t);
- return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
-}
-
static VkResult
anv_wait_for_syncobj_fences(struct anv_device *device,
uint32_t fenceCount,
const VkFence *pFences,
bool waitAll,
- uint64_t _timeout)
+ uint64_t abs_timeout_ns)
{
uint32_t *syncobjs = vk_zalloc(&device->alloc,
sizeof(*syncobjs) * fenceCount, 8,
syncobjs[i] = impl->syncobj;
}
- int64_t abs_timeout_ns = 0;
- if (_timeout > 0) {
- uint64_t current_ns = gettime_ns();
-
- /* Add but saturate to INT32_MAX */
- if (current_ns + _timeout < current_ns)
- abs_timeout_ns = INT64_MAX;
- else if (current_ns + _timeout > INT64_MAX)
- abs_timeout_ns = INT64_MAX;
- else
- abs_timeout_ns = current_ns + _timeout;
- }
-
/* The gem_syncobj_wait ioctl may return early due to an inherent
* limitation in the way it computes timeouts. Loop until we've actually
* passed the timeout.
do {
ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
abs_timeout_ns, waitAll);
- } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
+ } while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
vk_free(&device->alloc, syncobjs);
return VK_TIMEOUT;
} else {
/* We don't know the real error. */
- device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST,
- "drm_syncobj_wait failed: %m");
+ return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
}
} else {
return VK_SUCCESS;
uint32_t fenceCount,
const VkFence *pFences,
bool waitAll,
- uint64_t _timeout)
+ uint64_t abs_timeout_ns)
{
- int ret;
-
- /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
- * to block indefinitely timeouts <= 0. Unfortunately, this was broken
- * for a couple of kernel releases. Since there's no way to know
- * whether or not the kernel we're using is one of the broken ones, the
- * best we can do is to clamp the timeout to INT64_MAX. This limits the
- * maximum timeout from 584 years to 292 years - likely not a big deal.
- */
- int64_t timeout = MIN2(_timeout, INT64_MAX);
-
VkResult result = VK_SUCCESS;
uint32_t pending_fences = fenceCount;
while (pending_fences) {
/* These are the fences we really care about. Go ahead and wait
* on it until we hit a timeout.
*/
- result = anv_device_wait(device, &impl->bo.bo, timeout);
+ result = anv_device_wait(device, impl->bo.bo,
+ anv_get_relative_timeout(abs_timeout_ns));
switch (result) {
case VK_SUCCESS:
impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
assert(now_pending_fences <= pending_fences);
if (now_pending_fences == pending_fences) {
- struct timespec before;
- clock_gettime(CLOCK_MONOTONIC, &before);
-
- uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
- uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
- (timeout / NSEC_PER_SEC);
- abs_nsec %= NSEC_PER_SEC;
-
- /* Avoid roll-over in tv_sec on 32-bit systems if the user
- * provided timeout is UINT64_MAX
- */
- struct timespec abstime;
- abstime.tv_nsec = abs_nsec;
- abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
+ struct timespec abstime = {
+ .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
+ .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
+ };
+ ASSERTED int ret;
ret = pthread_cond_timedwait(&device->queue_submit,
&device->mutex, &abstime);
assert(ret != EINVAL);
-
- struct timespec after;
- clock_gettime(CLOCK_MONOTONIC, &after);
- uint64_t time_elapsed =
- ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
- ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
-
- if (time_elapsed >= timeout) {
+ if (anv_gettime_ns() >= abs_timeout_ns) {
pthread_mutex_unlock(&device->mutex);
result = VK_TIMEOUT;
goto done;
}
-
- timeout -= time_elapsed;
}
pthread_mutex_unlock(&device->mutex);
}
done:
- if (unlikely(device->lost))
+ if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
return result;
}
+static VkResult
+anv_wait_for_wsi_fence(struct anv_device *device,
+ const VkFence _fence,
+ uint64_t abs_timeout)
+{
+ ANV_FROM_HANDLE(anv_fence, fence, _fence);
+ struct anv_fence_impl *impl = &fence->permanent;
+
+ return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
+}
+
+static VkResult
+anv_wait_for_fences(struct anv_device *device,
+ uint32_t fenceCount,
+ const VkFence *pFences,
+ bool waitAll,
+ uint64_t abs_timeout)
+{
+ VkResult result = VK_SUCCESS;
+
+ if (fenceCount <= 1 || waitAll) {
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ switch (fence->permanent.type) {
+ case ANV_FENCE_TYPE_BO:
+ result = anv_wait_for_bo_fences(device, 1, &pFences[i],
+ true, abs_timeout);
+ break;
+ case ANV_FENCE_TYPE_SYNCOBJ:
+ result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
+ true, abs_timeout);
+ break;
+ case ANV_FENCE_TYPE_WSI:
+ result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
+ break;
+ case ANV_FENCE_TYPE_NONE:
+ result = VK_SUCCESS;
+ break;
+ }
+ if (result != VK_SUCCESS)
+ return result;
+ }
+ } else {
+ do {
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
+ return VK_SUCCESS;
+ }
+ } while (anv_gettime_ns() < abs_timeout);
+ result = VK_TIMEOUT;
+ }
+ return result;
+}
+
+static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
+{
+ for (uint32_t i = 0; i < fenceCount; ++i) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
+ return false;
+ }
+ return true;
+}
+
+static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
+{
+ for (uint32_t i = 0; i < fenceCount; ++i) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ if (fence->permanent.type != ANV_FENCE_TYPE_BO)
+ return false;
+ }
+ return true;
+}
+
VkResult anv_WaitForFences(
VkDevice _device,
uint32_t fenceCount,
{
ANV_FROM_HANDLE(anv_device, device, _device);
- if (unlikely(device->lost))
+ if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
- if (device->instance->physicalDevice.has_syncobj_wait) {
+ uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
+ if (anv_all_fences_syncobj(fenceCount, pFences)) {
return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
- waitAll, timeout);
- } else {
+ waitAll, abs_timeout);
+ } else if (anv_all_fences_bo(fenceCount, pFences)) {
return anv_wait_for_bo_fences(device, fenceCount, pFences,
- waitAll, timeout);
+ waitAll, abs_timeout);
+ } else {
+ return anv_wait_for_fences(device, fenceCount, pFences,
+ waitAll, abs_timeout);
}
}
-void anv_GetPhysicalDeviceExternalFencePropertiesKHR(
+void anv_GetPhysicalDeviceExternalFenceProperties(
VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
- VkExternalFencePropertiesKHR* pExternalFenceProperties)
+ const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
+ VkExternalFenceProperties* pExternalFenceProperties)
{
ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
switch (pExternalFenceInfo->handleType) {
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
if (device->has_syncobj_wait) {
pExternalFenceProperties->exportFromImportedHandleTypes =
- VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
- VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
pExternalFenceProperties->compatibleHandleTypes =
- VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR |
- VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
+ VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
pExternalFenceProperties->externalFenceFeatures =
- VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR;
+ VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
+ VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
return;
}
break;
};
switch (pImportFenceFdInfo->handleType) {
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
break;
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
/* Sync files are a bit tricky. Because we want to continue using the
* syncobj implementation of WaitForFences, we don't use the sync file
* directly but instead import it into a syncobj.
if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return vk_errorf(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
break;
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec:
*/
close(fd);
- if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
+ if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
anv_fence_impl_cleanup(device, &fence->temporary);
fence->temporary = new_impl;
} else {
assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
switch (pGetFdInfo->handleType) {
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
if (fd < 0)
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
break;
}
- case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR: {
+ case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
if (fd < 0)
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
if (semaphore == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- const VkExportSemaphoreCreateInfoKHR *export =
- vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
- VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
+ const VkExportSemaphoreCreateInfo *export =
+ vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
+ VkExternalSemaphoreHandleTypeFlags handleTypes =
export ? export->handleTypes : 0;
if (handleTypes == 0) {
* queue, a dummy no-op semaphore is a perfectly valid implementation.
*/
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
- } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
- assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
+ } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
+ assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
if (device->instance->physicalDevice.has_syncobj) {
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
}
} else {
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
- VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
- 4096, &semaphore->permanent.bo);
+ VkResult result = anv_device_alloc_bo(device, 4096,
+ ANV_BO_ALLOC_EXTERNAL |
+ ANV_BO_ALLOC_IMPLICIT_SYNC,
+ &semaphore->permanent.bo);
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, semaphore);
return result;
*/
assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
}
- } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
- assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR);
-
- semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
- semaphore->permanent.fd = -1;
+ } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
+ assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
+ if (device->instance->physicalDevice.has_syncobj) {
+ semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
+ semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
+ } else {
+ semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
+ semaphore->permanent.fd = -1;
+ }
} else {
assert(!"Unknown handle type");
vk_free2(&device->alloc, pAllocator, semaphore);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
case ANV_SEMAPHORE_TYPE_NONE:
case ANV_SEMAPHORE_TYPE_DUMMY:
/* Dummy. Nothing to do */
- return;
+ break;
case ANV_SEMAPHORE_TYPE_BO:
- anv_bo_cache_release(device, &device->bo_cache, impl->bo);
- return;
+ anv_device_release_bo(device, impl->bo);
+ break;
case ANV_SEMAPHORE_TYPE_SYNC_FILE:
close(impl->fd);
- return;
+ break;
case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
anv_gem_syncobj_destroy(device, impl->syncobj);
- return;
+ break;
+
+ default:
+ unreachable("Invalid semaphore type");
}
- unreachable("Invalid semaphore type");
+ impl->type = ANV_SEMAPHORE_TYPE_NONE;
}
void
return;
anv_semaphore_impl_cleanup(device, &semaphore->temporary);
- semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
}
void anv_DestroySemaphore(
vk_free2(&device->alloc, pAllocator, semaphore);
}
-void anv_GetPhysicalDeviceExternalSemaphorePropertiesKHR(
+void anv_GetPhysicalDeviceExternalSemaphoreProperties(
VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
- VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
+ const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+ VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
{
ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
switch (pExternalSemaphoreInfo->handleType) {
- case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
pExternalSemaphoreProperties->exportFromImportedHandleTypes =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
pExternalSemaphoreProperties->compatibleHandleTypes =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
pExternalSemaphoreProperties->externalSemaphoreFeatures =
- VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
return;
- case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
if (device->has_exec_fence) {
- pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes =
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
pExternalSemaphoreProperties->compatibleHandleTypes =
- VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
+ VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
pExternalSemaphoreProperties->externalSemaphoreFeatures =
- VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
- VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+ VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
return;
}
break;
};
switch (pImportSemaphoreFdInfo->handleType) {
- case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
if (device->instance->physicalDevice.has_syncobj) {
new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
-
- /* From the Vulkan spec:
- *
- * "Importing semaphore state from a file descriptor transfers
- * ownership of the file descriptor from the application to the
- * Vulkan implementation. The application must not perform any
- * operations on the file descriptor after a successful import."
- *
- * If the import fails, we leave the file descriptor open.
- */
- close(pImportSemaphoreFdInfo->fd);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
} else {
new_impl.type = ANV_SEMAPHORE_TYPE_BO;
- VkResult result = anv_bo_cache_import(device, &device->bo_cache,
- fd, 4096, &new_impl.bo);
+ VkResult result = anv_device_import_bo(device, fd,
+ ANV_BO_ALLOC_EXTERNAL |
+ ANV_BO_ALLOC_IMPLICIT_SYNC,
+ &new_impl.bo);
if (result != VK_SUCCESS)
return result;
+ if (new_impl.bo->size < 4096) {
+ anv_device_release_bo(device, new_impl.bo);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ }
+
/* If we're going to use this as a fence, we need to *not* have the
* EXEC_OBJECT_ASYNC bit set.
*/
assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
}
+
+ /* From the Vulkan spec:
+ *
+ * "Importing semaphore state from a file descriptor transfers
+ * ownership of the file descriptor from the application to the
+ * Vulkan implementation. The application must not perform any
+ * operations on the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd);
break;
- case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
- new_impl = (struct anv_semaphore_impl) {
- .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
- .fd = fd,
- };
+ case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ if (device->instance->physicalDevice.has_syncobj) {
+ new_impl = (struct anv_semaphore_impl) {
+ .type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
+ .syncobj = anv_gem_syncobj_create(device, 0),
+ };
+ if (!new_impl.syncobj)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
+ anv_gem_syncobj_destroy(device, new_impl.syncobj);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ "syncobj sync file import failed: %m");
+ }
+ /* Ownership of the FD is transfered to Anv. Since we don't need it
+ * anymore because the associated fence has been put into a syncobj,
+ * we must close the FD.
+ */
+ close(fd);
+ } else {
+ new_impl = (struct anv_semaphore_impl) {
+ .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
+ .fd = fd,
+ };
+ }
break;
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
- if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
+ if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
anv_semaphore_impl_cleanup(device, &semaphore->temporary);
semaphore->temporary = new_impl;
} else {
switch (impl->type) {
case ANV_SEMAPHORE_TYPE_BO:
- result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
+ result = anv_device_export_bo(device, impl->bo, pFd);
if (result != VK_SUCCESS)
return result;
break;
return VK_SUCCESS;
case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
- fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
+ if (pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)
+ fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
+ else {
+ assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
+ fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
+ }
if (fd < 0)
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec: