#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
#include <xf86drm.h>
#include "util/strtod.h"
#include "util/debug.h"
#include "util/build_id.h"
+#include "util/mesa-sha1.h"
#include "util/vk_util.h"
#include "genxml/gen7_pack.h"
va_end(args);
}
-static bool
-anv_device_get_cache_uuid(void *uuid)
+static VkResult
+anv_compute_heap_size(int fd, uint64_t *heap_size)
+{
+ uint64_t gtt_size;
+ if (anv_gem_get_context_param(fd, 0, I915_CONTEXT_PARAM_GTT_SIZE,
+ >t_size) == -1) {
+ /* If, for whatever reason, we can't actually get the GTT size from the
+ * kernel (too old?) fall back to the aperture size.
+ */
+ anv_perf_warn("Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
+
+ if (anv_gem_get_aperture(fd, >t_size) == -1) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get aperture size: %m");
+ }
+ }
+
+ /* Query the total ram from the system */
+ struct sysinfo info;
+ sysinfo(&info);
+
+ uint64_t total_ram = (uint64_t)info.totalram * (uint64_t)info.mem_unit;
+
+ /* We don't want to burn too much ram with the GPU. If the user has 4GiB
+ * or less, we use at most half. If they have more than 4GiB, we use 3/4.
+ */
+ uint64_t available_ram;
+ if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
+ available_ram = total_ram / 2;
+ else
+ available_ram = total_ram * 3 / 4;
+
+ /* We also want to leave some padding for things we allocate in the driver,
+ * so don't go over 3/4 of the GTT either.
+ */
+ uint64_t available_gtt = gtt_size * 3 / 4;
+
+ *heap_size = MIN2(available_ram, available_gtt);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+anv_physical_device_init_uuids(struct anv_physical_device *device)
{
const struct build_id_note *note = build_id_find_nhdr("libvulkan_intel.so");
- if (!note)
- return false;
+ if (!note) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "Failed to find build-id");
+ }
+
+ unsigned build_id_len = build_id_length(note);
+ if (build_id_len < 20) {
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "build-id too short. It needs to be a SHA");
+ }
- unsigned len = build_id_length(note);
- if (len < VK_UUID_SIZE)
- return false;
+ struct mesa_sha1 sha1_ctx;
+ uint8_t sha1[20];
+ STATIC_ASSERT(VK_UUID_SIZE <= sizeof(sha1));
- memcpy(uuid, build_id_data(note), VK_UUID_SIZE);
- return true;
+ _mesa_sha1_init(&sha1_ctx);
+ _mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
+ _mesa_sha1_update(&sha1_ctx, &device->chipset_id,
+ sizeof(device->chipset_id));
+ _mesa_sha1_final(&sha1_ctx, sha1);
+ memcpy(device->uuid, sha1, VK_UUID_SIZE);
+
+ return VK_SUCCESS;
}
static VkResult
}
}
- if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
- "failed to get aperture size: %m");
- goto fail;
- }
-
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
"kernel missing gem wait");
goto fail;
}
- if (!anv_device_get_cache_uuid(device->uuid)) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
- "cannot generate UUID");
+ device->supports_48bit_addresses = anv_gem_supports_48b_addresses(fd);
+
+ result = anv_compute_heap_size(fd, &device->heap_size);
+ if (result != VK_SUCCESS)
goto fail;
- }
+
+ device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
+
+ result = anv_physical_device_init_uuids(device);
+ if (result != VK_SUCCESS)
+ goto fail;
+
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
/* GENs prior to 8 do not support EU/Subslice info */
{
.extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
.specVersion = 1,
- }
+ },
+ {
+ .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static void *
VkPhysicalDeviceMemoryProperties* pMemoryProperties)
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
- VkDeviceSize heap_size;
-
- /* Reserve some wiggle room for the driver by exposing only 75% of the
- * aperture to the heap.
- */
- heap_size = 3 * physical_device->aperture_size / 4;
if (physical_device->info.has_llc) {
/* Big core GPUs share LLC with the CPU and thus one memory type can be
pMemoryProperties->memoryHeapCount = 1;
pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
- .size = heap_size,
+ .size = physical_device->heap_size,
.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
};
}
struct anv_bo bo, *exec_bos[1];
VkResult result = VK_SUCCESS;
uint32_t size;
- int64_t timeout;
- int ret;
/* Kernel driver requires 8 byte aligned batch length */
size = align_u32(batch->next - batch->start, 8);
if (result != VK_SUCCESS)
goto fail;
- timeout = INT64_MAX;
- ret = anv_gem_wait(device, bo.gem_handle, &timeout);
- if (ret != 0) {
- /* We don't know the real error. */
- device->lost = true;
- result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
- goto fail;
- }
+ result = anv_device_wait(device, &bo, INT64_MAX);
fail:
anv_bo_pool_free(&device->batch_bo_pool, &bo);
return VK_SUCCESS;
}
+VkResult
+anv_device_query_status(struct anv_device *device)
+{
+ /* This isn't likely as most of the callers of this function already check
+ * for it. However, it doesn't hurt to check and it potentially lets us
+ * avoid an ioctl.
+ */
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
+ uint32_t active, pending;
+ int ret = anv_gem_gpu_get_reset_stats(device, &active, &pending);
+ if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "get_reset_stats failed: %m");
+ }
+
+ if (active) {
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST,
+ "GPU hung on one of our command buffers");
+ } else if (pending) {
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST,
+ "GPU hung with commands in-flight");
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo)
+{
+ /* Note: This only returns whether or not the BO is in use by an i915 GPU.
+ * Other usages of the BO (such as on different hardware) will not be
+ * flagged as "busy" by this ioctl. Use with care.
+ */
+ int ret = anv_gem_busy(device, bo->gem_handle);
+ if (ret == 1) {
+ return VK_NOT_READY;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ }
+
+ /* Query for device status after the busy call. If the BO we're checking
+ * got caught in a GPU hang we don't want to return VK_SUCCESS to the
+ * client because it clearly doesn't have valid data. Yes, this most
+ * likely means an ioctl, but we just did an ioctl to query the busy status
+ * so it's no great loss.
+ */
+ return anv_device_query_status(device);
+}
+
+VkResult
+anv_device_wait(struct anv_device *device, struct anv_bo *bo,
+ int64_t timeout)
+{
+ int ret = anv_gem_wait(device, bo->gem_handle, &timeout);
+ if (ret == -1 && errno == ETIME) {
+ return VK_TIMEOUT;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ }
+
+ /* Query for device status after the wait. If the BO we're waiting on got
+ * caught in a GPU hang we don't want to return VK_SUCCESS to the client
+ * because it clearly doesn't have valid data. Yes, this most likely means
+ * an ioctl, but we just did an ioctl to wait so it's no great loss.
+ */
+ return anv_device_query_status(device);
+}
+
VkResult anv_QueueSubmit(
VkQueue _queue,
uint32_t submitCount,
ANV_FROM_HANDLE(anv_queue, queue, _queue);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
- VkResult result = VK_SUCCESS;
+
+ /* Query for device status prior to submitting. Technically, we don't need
+ * to do this. However, if we have a client that's submitting piles of
+ * garbage, we would rather break as early as possible to keep the GPU
+ * hanging contained. If we don't check here, we'll either be waiting for
+ * the kernel to kick us or we'll have to wait until the client waits on a
+ * fence before we actually know whether or not we've hung.
+ */
+ VkResult result = anv_device_query_status(device);
+ if (result != VK_SUCCESS)
+ return result;
/* We lock around QueueSubmit for three main reasons:
*
VkDevice _device)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
struct anv_batch batch;
uint32_t cmds[8];
anv_bo_init(bo, gem_handle, size);
+ if (device->instance->physicalDevice.supports_48bit_addresses)
+ bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (device->instance->physicalDevice.has_exec_async)
+ bo->flags |= EXEC_OBJECT_ASYNC;
+
return VK_SUCCESS;
}
assert(pAllocateInfo->memoryTypeIndex == 0 ||
(!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
+ /* The kernel relocation API has a limitation of a 32-bit delta value
+ * applied to the address before it is written which, in spite of it being
+ * unsigned, is treated as signed . Because of the way that this maps to
+ * the Vulkan API, we cannot handle an offset into a buffer that does not
+ * fit into a signed 32 bits. The only mechanism we have for dealing with
+ * this at the moment is to limit all VkDeviceMemory objects to a maximum
+ * of 2GB each. The Vulkan spec allows us to do this:
+ *
+ * "Some platforms may have a limit on the maximum size of a single
+ * allocation. For example, certain systems may fail to create
+ * allocations with a size greater than or equal to 4GB. Such a limit is
+ * implementation-dependent, and if such a failure occurs then the error
+ * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
+ *
+ * We don't use vk_error here because it's not an error so much as an
+ * indication to the application that the allocation is too large.
+ */
+ if (pAllocateInfo->allocationSize > (1ull << 31))
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
/* FINISHME: Fail if allocation request exceeds heap size. */
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
}
VkResult anv_QueueBindSparse(
- VkQueue queue,
+ VkQueue _queue,
uint32_t bindInfoCount,
const VkBindSparseInfo* pBindInfo,
VkFence fence)
{
+ ANV_FROM_HANDLE(anv_queue, queue, _queue);
+ if (unlikely(queue->device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
- int64_t t = 0;
- int ret;
+
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
switch (fence->state) {
case ANV_FENCE_STATE_RESET:
/* It's been signaled, return success */
return VK_SUCCESS;
- case ANV_FENCE_STATE_SUBMITTED:
- /* It's been submitted to the GPU but we don't know if it's done yet. */
- ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
- if (ret == 0) {
+ case ANV_FENCE_STATE_SUBMITTED: {
+ VkResult result = anv_device_bo_busy(device, &fence->bo);
+ if (result == VK_SUCCESS) {
fence->state = ANV_FENCE_STATE_SIGNALED;
return VK_SUCCESS;
} else {
- return VK_NOT_READY;
+ return result;
}
+ }
default:
unreachable("Invalid fence status");
}
ANV_FROM_HANDLE(anv_device, device, _device);
int ret;
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
* to block indefinitely timeouts <= 0. Unfortunately, this was broken
* for a couple of kernel releases. Since there's no way to know
*/
int64_t timeout = MIN2(_timeout, INT64_MAX);
+ VkResult result = VK_SUCCESS;
uint32_t pending_fences = fenceCount;
while (pending_fences) {
pending_fences = 0;
/* This fence is not pending. If waitAll isn't set, we can return
* early. Otherwise, we have to keep going.
*/
- if (!waitAll)
- return VK_SUCCESS;
+ if (!waitAll) {
+ result = VK_SUCCESS;
+ goto done;
+ }
continue;
case ANV_FENCE_STATE_SUBMITTED:
/* These are the fences we really care about. Go ahead and wait
* on it until we hit a timeout.
*/
- ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
- if (ret == -1 && errno == ETIME) {
- return VK_TIMEOUT;
- } else if (ret == -1) {
- /* We don't know the real error. */
- device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
- } else {
+ result = anv_device_wait(device, &fence->bo, timeout);
+ switch (result) {
+ case VK_SUCCESS:
fence->state = ANV_FENCE_STATE_SIGNALED;
signaled_fences = true;
if (!waitAll)
- return VK_SUCCESS;
- continue;
+ goto done;
+ break;
+
+ case VK_TIMEOUT:
+ goto done;
+
+ default:
+ return result;
}
}
}
if (time_elapsed >= timeout) {
pthread_mutex_unlock(&device->mutex);
- return VK_TIMEOUT;
+ result = VK_TIMEOUT;
+ goto done;
}
timeout -= time_elapsed;
}
}
- return VK_SUCCESS;
+done:
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
+ return result;
}
// Queue semaphore functions
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
if (!device->info.has_llc) {
/* Invalidate read cache before reading event written by GPU. */
__builtin_ia32_clflush(event);