&timeline->free_points, link) {
list_del(&point->link);
anv_device_release_bo(device, point->bo);
- vk_free(&device->alloc, point);
+ vk_free(&device->vk.alloc, point);
}
list_for_each_entry_safe(struct anv_timeline_point, point,
&timeline->points, link) {
list_del(&point->link);
anv_device_release_bo(device, point->bo);
- vk_free(&device->alloc, point);
+ vk_free(&device->vk.alloc, point);
}
}
if (list_is_empty(&timeline->free_points)) {
*point =
- vk_zalloc(&device->alloc, sizeof(**point),
+ vk_zalloc(&device->vk.alloc, sizeof(**point),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!(*point))
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
0 /* explicit_address */,
&(*point)->bo);
if (result != VK_SUCCESS)
- vk_free(&device->alloc, *point);
+ vk_free(&device->vk.alloc, *point);
}
} else {
*point = list_first_entry(&timeline->free_points,
}
static VkResult
-_anv_queue_submit(struct anv_queue *queue, struct anv_queue_submit **_submit)
+_anv_queue_submit(struct anv_queue *queue, struct anv_queue_submit **_submit,
+ bool flush_queue)
{
struct anv_queue_submit *submit = *_submit;
pthread_mutex_lock(&queue->device->mutex);
list_addtail(&submit->link, &queue->queued_submits);
VkResult result = anv_device_submit_deferred_locked(queue->device);
+ if (flush_queue) {
+ while (result == VK_SUCCESS && !list_is_empty(&queue->queued_submits)) {
+ int ret = pthread_cond_wait(&queue->device->queue_submit,
+ &queue->device->mutex);
+ if (ret != 0) {
+ result = anv_device_set_lost(queue->device, "wait timeout");
+ break;
+ }
+
+ result = anv_device_submit_deferred_locked(queue->device);
+ }
+ }
pthread_mutex_unlock(&queue->device->mutex);
return result;
}
VkResult
anv_queue_init(struct anv_device *device, struct anv_queue *queue)
{
- queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ vk_object_base_init(&device->vk, &queue->base, VK_OBJECT_TYPE_QUEUE);
queue->device = device;
queue->flags = 0;
void
anv_queue_finish(struct anv_queue *queue)
{
+ vk_object_base_finish(&queue->base);
}
static VkResult
}
static struct anv_queue_submit *
-anv_queue_submit_alloc(struct anv_device *device)
+anv_queue_submit_alloc(struct anv_device *device, int perf_query_pass)
{
- const VkAllocationCallbacks *alloc = &device->alloc;
+ const VkAllocationCallbacks *alloc = &device->vk.alloc;
VkSystemAllocationScope alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE;
struct anv_queue_submit *submit = vk_zalloc(alloc, sizeof(*submit), 8, alloc_scope);
submit->alloc_scope = alloc_scope;
submit->in_fence = -1;
submit->out_fence = -1;
+ submit->perf_query_pass = perf_query_pass;
return submit;
}
anv_queue_submit_simple_batch(struct anv_queue *queue,
struct anv_batch *batch)
{
+ if (queue->device->no_hw)
+ return VK_SUCCESS;
+
struct anv_device *device = queue->device;
- struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
+ struct anv_queue_submit *submit = anv_queue_submit_alloc(device, -1);
if (!submit)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- bool has_syncobj_wait = device->instance->physicalDevice.has_syncobj_wait;
+ bool has_syncobj_wait = device->physical->has_syncobj_wait;
VkResult result;
uint32_t syncobj;
struct anv_bo *batch_bo, *sync_bo;
submit->simple_bo_size = size;
}
- result = _anv_queue_submit(queue, &submit);
+ result = _anv_queue_submit(queue, &submit, true);
if (result == VK_SUCCESS) {
if (has_syncobj_wait) {
const uint64_t *out_values,
uint32_t num_out_semaphores,
struct anv_bo *wsi_signal_bo,
- VkFence _fence)
+ VkFence _fence,
+ int perf_query_pass)
{
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
- UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
- struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
+ UNUSED struct anv_physical_device *pdevice = device->physical;
+ struct anv_queue_submit *submit = anv_queue_submit_alloc(device, perf_query_pass);
if (!submit)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto error;
break;
+ case ANV_SEMAPHORE_TYPE_WSI_BO:
+ /* When using a window-system buffer as a semaphore, always enable
+ * EXEC_OBJECT_WRITE. This gives us a WaR hazard with the display or
+ * compositor's read of the buffer and enforces that we don't start
+ * rendering until they are finished. This is exactly the
+ * synchronization we want with vkAcquireNextImage.
+ */
+ result = anv_queue_submit_add_fence_bo(submit, impl->bo, true /* signal */);
+ if (result != VK_SUCCESS)
+ goto error;
+ break;
+
case ANV_SEMAPHORE_TYPE_SYNC_FILE:
assert(!pdevice->has_syncobj);
if (submit->in_fence == -1) {
}
}
- result = _anv_queue_submit(queue, &submit);
+ result = _anv_queue_submit(queue, &submit, false);
if (result != VK_SUCCESS)
goto error;
if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
- /* BO fences can't be shared, so they can't be temporary. */
+ /* If we have permanent BO fence, the only type of temporary possible
+ * would be BO_WSI (because BO fences are not shareable). The Vulkan spec
+ * also requires that the fence passed to vkQueueSubmit() be :
+ *
+ * * unsignaled
+ * * not be associated with any other queue command that has not yet
+ * completed execution on that queue
+ *
+ * So the only acceptable type for the temporary is NONE.
+ */
assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
/* Once the execbuf has returned, we need to set the fence state to
{
ANV_FROM_HANDLE(anv_queue, queue, _queue);
+ if (queue->device->no_hw)
+ return VK_SUCCESS;
+
/* Query for device status prior to submitting. Technically, we don't need
* to do this. However, if we have a client that's submitting piles of
* garbage, we would rather break as early as possible to keep the GPU
* common case.
*/
result = anv_queue_submit(queue, NULL, NULL, NULL, 0, NULL, NULL, 0,
- NULL, fence);
+ NULL, fence, -1);
goto out;
}
const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
vk_find_struct_const(pSubmits[i].pNext,
TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
+ const VkPerformanceQuerySubmitInfoKHR *perf_info =
+ vk_find_struct_const(pSubmits[i].pNext,
+ PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
const uint64_t *wait_values =
timeline_info && timeline_info->waitSemaphoreValueCount ?
timeline_info->pWaitSemaphoreValues : NULL;
signal_values,
pSubmits[i].signalSemaphoreCount,
wsi_signal_bo,
- submit_fence);
+ submit_fence,
+ -1);
if (result != VK_SUCCESS)
goto out;
result = anv_queue_submit(queue, cmd_buffer,
in_semaphores, in_values, num_in_semaphores,
out_semaphores, out_values, num_out_semaphores,
- wsi_signal_bo, execbuf_fence);
+ wsi_signal_bo, execbuf_fence,
+ perf_info ? perf_info->counterPassIndex : 0);
if (result != VK_SUCCESS)
goto out;
}
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
- fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
+ fence = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (fence == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- if (device->instance->physicalDevice.has_syncobj_wait) {
+ vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
+
+ if (device->physical->has_syncobj_wait) {
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
uint32_t create_flags = 0;
anv_bo_pool_free(&device->batch_bo_pool, impl->bo.bo);
break;
+ case ANV_FENCE_TYPE_WSI_BO:
+ anv_device_release_bo(device, impl->bo.bo);
+ break;
+
case ANV_FENCE_TYPE_SYNCOBJ:
anv_gem_syncobj_destroy(device, impl->syncobj);
break;
anv_fence_impl_cleanup(device, &fence->temporary);
anv_fence_impl_cleanup(device, &fence->permanent);
- vk_free2(&device->alloc, pAllocator, fence);
+ vk_object_base_finish(&fence->base);
+ vk_free2(&device->vk.alloc, pAllocator, fence);
}
VkResult anv_ResetFences(
switch (impl->type) {
case ANV_FENCE_TYPE_BO:
- /* BO fences don't support import/export */
- assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
+ case ANV_FENCE_TYPE_WSI_BO:
switch (impl->bo.state) {
case ANV_BO_FENCE_STATE_RESET:
/* If it hasn't even been sent off to the GPU yet, it's not ready */
bool waitAll,
uint64_t abs_timeout_ns)
{
- uint32_t *syncobjs = vk_zalloc(&device->alloc,
+ uint32_t *syncobjs = vk_zalloc(&device->vk.alloc,
sizeof(*syncobjs) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!syncobjs)
abs_timeout_ns, waitAll);
} while (ret == -1 && errno == ETIME && anv_gettime_ns() < abs_timeout_ns);
- vk_free(&device->alloc, syncobjs);
+ vk_free(&device->vk.alloc, syncobjs);
if (ret == -1) {
if (errno == ETIME) {
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- /* This function assumes that all fences are BO fences and that they
- * have no temporary state. Since BO fences will never be exported,
- * this should be a safe assumption.
- */
- assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
- assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
- struct anv_fence_impl *impl = &fence->permanent;
+ struct anv_fence_impl *impl =
+ fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+ &fence->temporary : &fence->permanent;
+ assert(impl->type == ANV_FENCE_TYPE_BO ||
+ impl->type == ANV_FENCE_TYPE_WSI_BO);
switch (impl->bo.state) {
case ANV_BO_FENCE_STATE_RESET:
static VkResult
anv_wait_for_wsi_fence(struct anv_device *device,
- const VkFence _fence,
+ struct anv_fence_impl *impl,
uint64_t abs_timeout)
{
- ANV_FROM_HANDLE(anv_fence, fence, _fence);
- struct anv_fence_impl *impl = &fence->permanent;
-
return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
}
if (fenceCount <= 1 || waitAll) {
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- switch (fence->permanent.type) {
+ struct anv_fence_impl *impl =
+ fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+ &fence->temporary : &fence->permanent;
+
+ switch (impl->type) {
case ANV_FENCE_TYPE_BO:
+ case ANV_FENCE_TYPE_WSI_BO:
result = anv_wait_for_bo_fences(device, 1, &pFences[i],
true, abs_timeout);
break;
true, abs_timeout);
break;
case ANV_FENCE_TYPE_WSI:
- result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
+ result = anv_wait_for_wsi_fence(device, impl, abs_timeout);
break;
case ANV_FENCE_TYPE_NONE:
result = VK_SUCCESS;
{
for (uint32_t i = 0; i < fenceCount; ++i) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
+ struct anv_fence_impl *impl =
+ fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (impl->type != ANV_FENCE_TYPE_SYNCOBJ)
return false;
}
return true;
{
for (uint32_t i = 0; i < fenceCount; ++i) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- if (fence->permanent.type != ANV_FENCE_TYPE_BO)
+ struct anv_fence_impl *impl =
+ fence->temporary.type != ANV_FENCE_TYPE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (impl->type != ANV_FENCE_TYPE_BO &&
+ impl->type != ANV_FENCE_TYPE_WSI_BO)
return false;
}
return true;
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ if (device->no_hw)
+ return VK_SUCCESS;
+
if (anv_device_is_lost(device))
return VK_ERROR_DEVICE_LOST;
if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
break;
struct anv_semaphore_impl *impl,
bool exportable)
{
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
uint64_t timeline_value = 0;
VkSemaphoreTypeKHR sem_type = get_semaphore_type(pCreateInfo->pNext, &timeline_value);
- semaphore = vk_alloc(&device->alloc, sizeof(*semaphore), 8,
+ semaphore = vk_alloc(&device->vk.alloc, sizeof(*semaphore), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (semaphore == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &semaphore->base, VK_OBJECT_TYPE_SEMAPHORE);
+
p_atomic_set(&semaphore->refcount, 1);
const VkExportSemaphoreCreateInfo *export =
else
result = timeline_semaphore_create(device, &semaphore->permanent, timeline_value);
if (result != VK_SUCCESS) {
- vk_free2(&device->alloc, pAllocator, semaphore);
+ vk_free2(&device->vk.alloc, pAllocator, semaphore);
return result;
}
} else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
result = binary_semaphore_create(device, &semaphore->permanent, true);
if (result != VK_SUCCESS) {
- vk_free2(&device->alloc, pAllocator, semaphore);
+ vk_free2(&device->vk.alloc, pAllocator, semaphore);
return result;
}
} else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
if (!semaphore->permanent.syncobj) {
- vk_free2(&device->alloc, pAllocator, semaphore);
+ vk_free2(&device->vk.alloc, pAllocator, semaphore);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else {
}
} else {
assert(!"Unknown handle type");
- vk_free2(&device->alloc, pAllocator, semaphore);
+ vk_free2(&device->vk.alloc, pAllocator, semaphore);
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
break;
case ANV_SEMAPHORE_TYPE_BO:
+ case ANV_SEMAPHORE_TYPE_WSI_BO:
anv_device_release_bo(device, impl->bo);
break;
case ANV_SEMAPHORE_TYPE_SYNC_FILE:
- close(impl->fd);
+ if (impl->fd >= 0)
+ close(impl->fd);
break;
case ANV_SEMAPHORE_TYPE_TIMELINE:
anv_semaphore_impl_cleanup(device, &semaphore->temporary);
anv_semaphore_impl_cleanup(device, &semaphore->permanent);
- vk_free(&device->alloc, semaphore);
+
+ vk_object_base_finish(&semaphore->base);
+ vk_free(&device->vk.alloc, semaphore);
}
void anv_DestroySemaphore(
switch (pImportSemaphoreFdInfo->handleType) {
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
break;
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
- if (device->instance->physicalDevice.has_syncobj) {
+ if (device->physical->has_syncobj) {
new_impl = (struct anv_semaphore_impl) {
.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
.syncobj = anv_gem_syncobj_create(device, 0),
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return vk_errorf(device->instance, NULL,
- VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
/* Ownership of the FD is transfered to Anv. Since we don't need it
return VK_SUCCESS;
}
-VkResult anv_GetSemaphoreCounterValueKHR(
+VkResult anv_GetSemaphoreCounterValue(
VkDevice _device,
VkSemaphore _semaphore,
uint64_t* pValue)
uint64_t abs_timeout_ns)
{
if (!wait_all && n_timelines > 1) {
+ pthread_mutex_lock(&device->mutex);
+
while (1) {
VkResult result;
- pthread_mutex_lock(&device->mutex);
for (uint32_t i = 0; i < n_timelines; i++) {
result =
anv_timeline_wait_locked(device, timelines[i], serials[i], 0);
}
}
-VkResult anv_WaitSemaphoresKHR(
+VkResult anv_WaitSemaphores(
VkDevice _device,
const VkSemaphoreWaitInfoKHR* pWaitInfo,
uint64_t timeout)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ if (device->no_hw)
+ return VK_SUCCESS;
+
struct anv_timeline **timelines =
- vk_alloc(&device->alloc,
+ vk_alloc(&device->vk.alloc,
pWaitInfo->semaphoreCount * sizeof(*timelines),
8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!timelines)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- uint64_t *values = vk_alloc(&device->alloc,
+ uint64_t *values = vk_alloc(&device->vk.alloc,
pWaitInfo->semaphoreCount * sizeof(*values),
8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!values) {
- vk_free(&device->alloc, timelines);
+ vk_free(&device->vk.alloc, timelines);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (handle_count > 0) {
result = anv_timelines_wait(device, timelines, values, handle_count,
!(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR),
- timeout);
+ anv_get_absolute_timeout(timeout));
}
- vk_free(&device->alloc, timelines);
- vk_free(&device->alloc, values);
+ vk_free(&device->vk.alloc, timelines);
+ vk_free(&device->vk.alloc, values);
return result;
}
-VkResult anv_SignalSemaphoreKHR(
+VkResult anv_SignalSemaphore(
VkDevice _device,
const VkSemaphoreSignalInfoKHR* pSignalInfo)
{