#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
+#include <xf86drm.h>
#include "anv_private.h"
#include "util/strtod.h"
#include "util/debug.h"
#include "util/build_id.h"
+#include "util/mesa-sha1.h"
#include "util/vk_util.h"
#include "genxml/gen7_pack.h"
va_end(args);
}
-static bool
-anv_device_get_cache_uuid(void *uuid)
+static VkResult
+anv_compute_heap_size(int fd, uint64_t *heap_size)
+{
+ uint64_t gtt_size;
+ if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
+ >t_size) == -1) {
+ /* If, for whatever reason, we can't actually get the GTT size from the
+ * kernel (too old?) fall back to the aperture size.
+ */
+ anv_perf_warn("Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
+
+ if (anv_gem_get_aperture(fd, >t_size) == -1) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get aperture size: %m");
+ }
+ }
+
+ /* Query the total ram from the system */
+ struct sysinfo info;
+ sysinfo(&info);
+
+ uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
+
+ /* We don't want to burn too much ram with the GPU. If the user has 4GiB
+ * or less, we use at most half. If they have more than 4GiB, we use 3/4.
+ */
+ uint64_t available_ram;
+ if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
+ available_ram = total_ram / 2;
+ else
+ available_ram = total_ram * 3 / 4;
+
+ /* We also want to leave some padding for things we allocate in the driver,
+ * so don't go over 3/4 of the GTT either.
+ */
+ uint64_t available_gtt = gtt_size * 3 / 4;
+
+ *heap_size = MIN2(available_ram, available_gtt);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+anv_physical_device_init_uuids(struct anv_physical_device *device)
{
const struct build_id_note *note = build_id_find_nhdr("libvulkan_intel.so");
- if (!note)
- return false;
+ if (!note) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "Failed to find build-id");
+ }
- unsigned len = build_id_length(note);
- if (len < VK_UUID_SIZE)
- return false;
+ unsigned build_id_len = build_id_length(note);
+ if (build_id_len < 20) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "build-id too short. It needs to be a SHA");
+ }
+
+ struct mesa_sha1 sha1_ctx;
+ uint8_t sha1[20];
+ STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
+
+ _mesa_sha1_init(&sha1_ctx);
+ _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
+ _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
+ sizeof(device->chipset_id));
+ _mesa_sha1_final(&sha1_ctx, sha1);
+ memcpy(device->uuid, sha1, VK_UUID_SIZE);
- memcpy(uuid, build_id_data(note), VK_UUID_SIZE);
- return true;
+ return VK_SUCCESS;
}
static VkResult
}
}
- if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
- "failed to get aperture size: %m");
- goto fail;
- }
-
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
"kernel missing gem wait");
goto fail;
}
- if (!anv_device_get_cache_uuid(device->uuid)) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
- "cannot generate UUID");
+ device->supports_48bit_addresses = anv_gem_supports_48b_addresses(fd);
+
+ result = anv_compute_heap_size(fd, &device->heap_size);
+ if (result != VK_SUCCESS)
goto fail;
- }
+
+ device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
+
+ result = anv_physical_device_init_uuids(device);
+ if (result != VK_SUCCESS)
+ goto fail;
+
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
/* GENs prior to 8 do not support EU/Subslice info */
{
.extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
.specVersion = 1,
- }
+ },
+ {
+ .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static void *
vk_free(&instance->alloc, instance);
}
+static VkResult
+anv_enumerate_devices(struct anv_instance *instance)
+{
+ /* TODO: Check for more devices ? */
+ drmDevicePtr devices[8];
+ VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
+ int max_devices;
+
+ instance->physicalDeviceCount = 0;
+
+ max_devices = drmGetDevices2(0, devices, sizeof(devices));
+ if (max_devices < 1)
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+
+ for (unsigned i = 0; i < (unsigned)max_devices; i++) {
+ if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
+ devices[i]->bustype == DRM_BUS_PCI &&
+ devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
+
+ result = anv_physical_device_init(&instance->physicalDevice,
+ instance,
+ devices[i]->nodes[DRM_NODE_RENDER]);
+ if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ break;
+ }
+ }
+
+ if (result == VK_SUCCESS)
+ instance->physicalDeviceCount = 1;
+
+ return result;
+}
+
+
VkResult anv_EnumeratePhysicalDevices(
VkInstance _instance,
uint32_t* pPhysicalDeviceCount,
VkResult result;
if (instance->physicalDeviceCount < 0) {
- char path[20];
- for (unsigned i = 0; i < 8; i++) {
- snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
- result = anv_physical_device_init(&instance->physicalDevice,
- instance, path);
- if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
- break;
- }
-
- if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
- instance->physicalDeviceCount = 0;
- } else if (result == VK_SUCCESS) {
- instance->physicalDeviceCount = 1;
- } else {
+ result = anv_enumerate_devices(instance);
+ if (result != VK_SUCCESS &&
+ result != VK_ERROR_INCOMPATIBLE_DRIVER)
return result;
- }
}
- vk_outarray_append(&out, i) {
- *i = anv_physical_device_to_handle(&instance->physicalDevice);
+ if (instance->physicalDeviceCount > 0) {
+ assert(instance->physicalDeviceCount == 1);
+ vk_outarray_append(&out, i) {
+ *i = anv_physical_device_to_handle(&instance->physicalDevice);
+ }
}
return vk_outarray_status(&out);
.textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
.textureCompressionBC = true,
.occlusionQueryPrecise = true,
- .pipelineStatisticsQuery = false,
+ .pipelineStatisticsQuery = true,
.fragmentStoresAndAtomics = true,
.shaderTessellationAndGeometryPointSize = true,
.shaderImageGatherExtended = true,
.shaderInt16 = false,
.shaderResourceMinLod = false,
.variableMultisampleRate = false,
- .inheritedQueries = false,
+ .inheritedQueries = true,
};
/* We can't do image stores in vec4 shaders */
vk_foreach_struct(ext, pFeatures->pNext) {
switch (ext->sType) {
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
- VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
- (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
-
- properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
- break;
- }
-
default:
anv_debug_ignored_stype(ext->sType);
break;
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
const struct gen_device_info *devinfo = &pdevice->info;
- const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
-
/* See assertions made when programming the buffer surface state. */
const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
(1ul << 30) : (1ul << 27);
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
.timestampComputeAndGraphics = false,
- .timestampPeriod = time_stamp_base,
+ .timestampPeriod = devinfo->timebase_scale,
.maxClipDistances = 8,
.maxCullDistances = 8,
.maxCombinedClipAndCullDistances = 8,
vk_foreach_struct(ext, pProperties->pNext) {
switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
+ VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
+ (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
+
+ properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
}
}
-static void
-anv_get_queue_family_properties(struct anv_physical_device *phys_dev,
- VkQueueFamilyProperties *props)
-{
- *props = (VkQueueFamilyProperties) {
- .queueFlags = VK_QUEUE_GRAPHICS_BIT |
- VK_QUEUE_COMPUTE_BIT |
- VK_QUEUE_TRANSFER_BIT,
- .queueCount = 1,
- .timestampValidBits = 36, /* XXX: Real value here */
- .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
- };
-}
+/* We support exactly one queue family. */
+static const VkQueueFamilyProperties
+anv_queue_family_properties = {
+ .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+ VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = 1,
+ .timestampValidBits = 36, /* XXX: Real value here */
+ .minImageTransferGranularity = { 1, 1, 1 },
+};
void anv_GetPhysicalDeviceQueueFamilyProperties(
VkPhysicalDevice physicalDevice,
uint32_t* pCount,
VkQueueFamilyProperties* pQueueFamilyProperties)
{
- ANV_FROM_HANDLE(anv_physical_device, phys_dev, physicalDevice);
+ VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
- if (pQueueFamilyProperties == NULL) {
- *pCount = 1;
- return;
+ vk_outarray_append(&out, p) {
+ *p = anv_queue_family_properties;
}
-
- /* The spec implicitly allows the incoming count to be 0. From the Vulkan
- * 1.0.38 spec, Section 4.1 Physical Devices:
- *
- * If the value referenced by pQueueFamilyPropertyCount is not 0 [then
- * do stuff].
- */
- if (*pCount == 0)
- return;
-
- *pCount = 1;
- anv_get_queue_family_properties(phys_dev, pQueueFamilyProperties);
}
void anv_GetPhysicalDeviceQueueFamilyProperties2KHR(
VkQueueFamilyProperties2KHR* pQueueFamilyProperties)
{
- ANV_FROM_HANDLE(anv_physical_device, phys_dev, physicalDevice);
+ VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
- if (pQueueFamilyProperties == NULL) {
- *pQueueFamilyPropertyCount = 1;
- return;
- }
-
- /* The spec implicitly allows the incoming count to be 0. From the Vulkan
- * 1.0.38 spec, Section 4.1 Physical Devices:
- *
- * If the value referenced by pQueueFamilyPropertyCount is not 0 [then
- * do stuff].
- */
- if (*pQueueFamilyPropertyCount == 0)
- return;
-
- /* We support exactly one queue family. So need to traverse only the first
- * array element's pNext chain.
- */
- *pQueueFamilyPropertyCount = 1;
- anv_get_queue_family_properties(phys_dev,
- &pQueueFamilyProperties->queueFamilyProperties);
+ vk_outarray_append(&out, p) {
+ p->queueFamilyProperties = anv_queue_family_properties;
- vk_foreach_struct(ext, pQueueFamilyProperties->pNext) {
- switch (ext->sType) {
- default:
- anv_debug_ignored_stype(ext->sType);
- break;
+ vk_foreach_struct(s, p->pNext) {
+ anv_debug_ignored_stype(s->sType);
}
}
}
VkPhysicalDeviceMemoryProperties* pMemoryProperties)
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
- VkDeviceSize heap_size;
-
- /* Reserve some wiggle room for the driver by exposing only 75% of the
- * aperture to the heap.
- */
- heap_size = 3 * physical_device->aperture_size / 4;
if (physical_device->info.has_llc) {
/* Big core GPUs share LLC with the CPU and thus one memory type can be
pMemoryProperties->memoryHeapCount = 1;
pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
- .size = heap_size,
+ .size = physical_device->heap_size,
.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
};
}
struct anv_bo bo, *exec_bos[1];
VkResult result = VK_SUCCESS;
uint32_t size;
- int64_t timeout;
- int ret;
/* Kernel driver requires 8 byte aligned batch length */
size = align_u32(batch->next - batch->start, 8);
if (result != VK_SUCCESS)
goto fail;
- timeout = INT64_MAX;
- ret = anv_gem_wait(device, bo.gem_handle, &timeout);
- if (ret != 0) {
- /* We don't know the real error. */
- result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
- goto fail;
- }
+ result = anv_device_wait(device, &bo, INT64_MAX);
fail:
anv_bo_pool_free(&device->batch_bo_pool, &bo);
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
device->chipset_id = physical_device->chipset_id;
+ device->lost = false;
if (pAllocator)
device->alloc = *pAllocator;
device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
pCreateInfo->pEnabledFeatures->robustBufferAccess;
- pthread_mutex_init(&device->mutex, NULL);
+ if (pthread_mutex_init(&device->mutex, NULL) != 0) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_context_id;
+ }
pthread_condattr_t condattr;
- pthread_condattr_init(&condattr);
- pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC);
- pthread_cond_init(&device->queue_submit, NULL);
+ if (pthread_condattr_init(&condattr) != 0) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
+ if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
+ pthread_condattr_destroy(&condattr);
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
+ if (pthread_cond_init(&device->queue_submit, NULL) != 0) {
+ pthread_condattr_destroy(&condattr);
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
pthread_condattr_destroy(&condattr);
anv_bo_pool_init(&device->batch_bo_pool, device);
- anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
+ result = anv_block_pool_init(&device->dynamic_state_block_pool, device,
+ 16384);
+ if (result != VK_SUCCESS)
+ goto fail_batch_bo_pool;
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 1024 * 1024);
+ result = anv_block_pool_init(&device->instruction_block_pool, device,
+ 1024 * 1024);
+ if (result != VK_SUCCESS)
+ goto fail_dynamic_state_pool;
+
anv_state_pool_init(&device->instruction_state_pool,
&device->instruction_block_pool);
- anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
+ result = anv_block_pool_init(&device->surface_state_block_pool, device,
+ 4096);
+ if (result != VK_SUCCESS)
+ goto fail_instruction_state_pool;
anv_state_pool_init(&device->surface_state_pool,
&device->surface_state_block_pool);
- anv_bo_init_new(&device->workaround_bo, device, 1024);
+ result = anv_bo_init_new(&device->workaround_bo, device, 1024);
+ if (result != VK_SUCCESS)
+ goto fail_surface_state_pool;
anv_scratch_pool_init(device, &device->scratch_pool);
unreachable("unhandled gen");
}
if (result != VK_SUCCESS)
- goto fail_fd;
+ goto fail_workaround_bo;
anv_device_init_blorp(device);
return VK_SUCCESS;
+ fail_workaround_bo:
+ anv_queue_finish(&device->queue);
+ anv_scratch_pool_finish(device, &device->scratch_pool);
+ anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+ anv_gem_close(device, device->workaround_bo.gem_handle);
+ fail_surface_state_pool:
+ anv_state_pool_finish(&device->surface_state_pool);
+ anv_block_pool_finish(&device->surface_state_block_pool);
+ fail_instruction_state_pool:
+ anv_state_pool_finish(&device->instruction_state_pool);
+ anv_block_pool_finish(&device->instruction_block_pool);
+ fail_dynamic_state_pool:
+ anv_state_pool_finish(&device->dynamic_state_pool);
+ anv_block_pool_finish(&device->dynamic_state_block_pool);
+ fail_batch_bo_pool:
+ anv_bo_pool_finish(&device->batch_bo_pool);
+ pthread_cond_destroy(&device->queue_submit);
+ fail_mutex:
+ pthread_mutex_destroy(&device->mutex);
+ fail_context_id:
+ anv_gem_destroy_context(device, device->context_id);
fail_fd:
close(device->fd);
fail_device:
int ret = anv_gem_execbuffer(device, execbuf);
if (ret != 0) {
/* We don't know the real error. */
+ device->lost = true;
return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
}
return VK_SUCCESS;
}
+VkResult
+anv_device_query_status(struct anv_device *device)
+{
+ /* This isn't likely as most of the callers of this function already check
+ * for it. However, it doesn't hurt to check and it potentially lets us
+ * avoid an ioctl.
+ */
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
+ uint32_t active, pending;
+ int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
+ if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "get_reset_stats failed: %m");
+ }
+
+ if (active) {
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST,
+ "GPU hung on one of our command buffers");
+ } else if (pending) {
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST,
+ "GPU hung with commands in-flight");
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
+{
+ /* Note: This only returns whether or not the BO is in use by an i915 GPU.
+ * Other usages of the BO (such as on different hardware) will not be
+ * flagged as "busy" by this ioctl. Use with care.
+ */
+ int ret = anv_gem_busy(device, bo->gem_handle);
+ if (ret == 1) {
+ return VK_NOT_READY;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ }
+
+ /* Query for device status after the busy call. If the BO we're checking
+ * got caught in a GPU hang we don't want to return VK_SUCCESS to the
+ * client because it clearly doesn't have valid data. Yes, this most
+ * likely means an ioctl, but we just did an ioctl to query the busy status
+ * so it's no great loss.
+ */
+ return anv_device_query_status(device);
+}
+
+VkResult
+anv_device_wait(struct anv_device *device, struct anv_bo *bo,
+ int64_t timeout)
+{
+ int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
+ if (ret == -1 && errno == ETIME) {
+ return VK_TIMEOUT;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ }
+
+ /* Query for device status after the wait. If the BO we're waiting on got
+ * caught in a GPU hang we don't want to return VK_SUCCESS to the client
+ * because it clearly doesn't have valid data. Yes, this most likely means
+ * an ioctl, but we just did an ioctl to wait so it's no great loss.
+ */
+ return anv_device_query_status(device);
+}
+
VkResult anv_QueueSubmit(
VkQueue _queue,
uint32_t submitCount,
ANV_FROM_HANDLE(anv_queue, queue, _queue);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
- VkResult result = VK_SUCCESS;
+
+ /* Query for device status prior to submitting. Technically, we don't need
+ * to do this. However, if we have a client that's submitting piles of
+ * garbage, we would rather break as early as possible to keep the GPU
+ * hanging contained. If we don't check here, we'll either be waiting for
+ * the kernel to kick us or we'll have to wait until the client waits on a
+ * fence before we actually know whether or not we've hung.
+ */
+ VkResult result = anv_device_query_status(device);
+ if (result != VK_SUCCESS)
+ return result;
/* We lock around QueueSubmit for three main reasons:
*
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(!anv_batch_has_error(&cmd_buffer->batch));
result = anv_cmd_buffer_execbuf(device, cmd_buffer);
if (result != VK_SUCCESS)
}
out:
+ if (result != VK_SUCCESS) {
+ /* In the case that something has gone wrong we may end up with an
+ * inconsistent state from which it may not be trivial to recover.
+ * For example, we might have computed address relocations and
+ * any future attempt to re-submit this job will need to know about
+ * this and avoid computing relocation addresses again.
+ *
+ * To avoid this sort of issues, we assume that if something was
+ * wrong during submission we must already be in a really bad situation
+ * anyway (such us being out of memory) and return
+ * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
+ * submit the same job again to this device.
+ */
+ result = VK_ERROR_DEVICE_LOST;
+ device->lost = true;
+
+ /* If we return VK_ERROR_DEVICE LOST here, we need to ensure that
+ * vkWaitForFences() and vkGetFenceStatus() return a valid result
+ * (VK_SUCCESS or VK_ERROR_DEVICE_LOST) in a finite amount of time.
+ * Setting the fence status to SIGNALED ensures this will happen in
+ * any case.
+ */
+ if (fence)
+ fence->state = ANV_FENCE_STATE_SIGNALED;
+ }
+
pthread_mutex_unlock(&device->mutex);
return result;
VkDevice _device)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
struct anv_batch batch;
uint32_t cmds[8];
anv_bo_init(bo, gem_handle, size);
+ if (device->instance->physicalDevice.supports_48bit_addresses)
+ bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (device->instance->physicalDevice.has_exec_async)
+ bo->flags |= EXEC_OBJECT_ASYNC;
+
return VK_SUCCESS;
}
assert(pAllocateInfo->memoryTypeIndex == 0 ||
(!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
+ /* The kernel relocation API has a limitation of a 32-bit delta value
+ * applied to the address before it is written which, in spite of it being
+ * unsigned, is treated as signed . Because of the way that this maps to
+ * the Vulkan API, we cannot handle an offset into a buffer that does not
+ * fit into a signed 32 bits. The only mechanism we have for dealing with
+ * this at the moment is to limit all VkDeviceMemory objects to a maximum
+ * of 2GB each. The Vulkan spec allows us to do this:
+ *
+ * "Some platforms may have a limit on the maximum size of a single
+ * allocation. For example, certain systems may fail to create
+ * allocations with a size greater than or equal to 4GB. Such a limit is
+ * implementation-dependent, and if such a failure occurs then the error
+ * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
+ *
+ * We don't use vk_error here because it's not an error so much as an
+ * indication to the application that the allocation is too large.
+ */
+ if (pAllocateInfo->allocationSize > (1ull << 31))
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
/* FINISHME: Fail if allocation request exceeds heap size. */
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
}
VkResult anv_QueueBindSparse(
- VkQueue queue,
+ VkQueue _queue,
uint32_t bindInfoCount,
const VkBindSparseInfo* pBindInfo,
VkFence fence)
{
+ ANV_FROM_HANDLE(anv_queue, queue, _queue);
+ if (unlikely(queue->device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
- int64_t t = 0;
- int ret;
+
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
switch (fence->state) {
case ANV_FENCE_STATE_RESET:
/* It's been signaled, return success */
return VK_SUCCESS;
- case ANV_FENCE_STATE_SUBMITTED:
- /* It's been submitted to the GPU but we don't know if it's done yet. */
- ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
- if (ret == 0) {
+ case ANV_FENCE_STATE_SUBMITTED: {
+ VkResult result = anv_device_bo_busy(device, &fence->bo);
+ if (result == VK_SUCCESS) {
fence->state = ANV_FENCE_STATE_SIGNALED;
return VK_SUCCESS;
} else {
- return VK_NOT_READY;
+ return result;
}
+ }
default:
unreachable("Invalid fence status");
}
ANV_FROM_HANDLE(anv_device, device, _device);
int ret;
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
* to block indefinitely timeouts <= 0. Unfortunately, this was broken
* for a couple of kernel releases. Since there's no way to know
*/
int64_t timeout = MIN2(_timeout, INT64_MAX);
+ VkResult result = VK_SUCCESS;
uint32_t pending_fences = fenceCount;
while (pending_fences) {
pending_fences = 0;
/* This fence is not pending. If waitAll isn't set, we can return
* early. Otherwise, we have to keep going.
*/
- if (!waitAll)
- return VK_SUCCESS;
+ if (!waitAll) {
+ result = VK_SUCCESS;
+ goto done;
+ }
continue;
case ANV_FENCE_STATE_SUBMITTED:
/* These are the fences we really care about. Go ahead and wait
* on it until we hit a timeout.
*/
- ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
- if (ret == -1 && errno == ETIME) {
- return VK_TIMEOUT;
- } else if (ret == -1) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
- } else {
+ result = anv_device_wait(device, &fence->bo, timeout);
+ switch (result) {
+ case VK_SUCCESS:
fence->state = ANV_FENCE_STATE_SIGNALED;
signaled_fences = true;
if (!waitAll)
- return VK_SUCCESS;
- continue;
+ goto done;
+ break;
+
+ case VK_TIMEOUT:
+ goto done;
+
+ default:
+ return result;
}
}
}
if (time_elapsed >= timeout) {
pthread_mutex_unlock(&device->mutex);
- return VK_TIMEOUT;
+ result = VK_TIMEOUT;
+ goto done;
}
timeout -= time_elapsed;
}
}
- return VK_SUCCESS;
+done:
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
+ return result;
}
// Queue semaphore functions
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
if (!device->info.has_llc) {
/* Invalidate read cache before reading event written by GPU. */
__builtin_ia32_clflush(event);