ANV_FROM_HANDLE(anv_queue, queue, _queue);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
VkResult result = VK_SUCCESS;
/* We lock around QueueSubmit for three main reasons:
VkDevice _device)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
struct anv_batch batch;
uint32_t cmds[8];
}
VkResult anv_QueueBindSparse(
- VkQueue queue,
+ VkQueue _queue,
uint32_t bindInfoCount,
const VkBindSparseInfo* pBindInfo,
VkFence fence)
{
+ ANV_FROM_HANDLE(anv_queue, queue, _queue);
+ if (unlikely(queue->device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
+
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
int64_t t = 0;
int ret;
ANV_FROM_HANDLE(anv_device, device, _device);
int ret;
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
* to block indefinitely timeouts <= 0. Unfortunately, this was broken
* for a couple of kernel releases. Since there's no way to know
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
if (!device->info.has_llc) {
/* Invalidate read cache before reading event written by GPU. */
__builtin_ia32_clflush(event);