#include <assert.h>
#include <stdbool.h>
#include <string.h>
+#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
+#include <xf86drm.h>
#include "anv_private.h"
-#include "anv_timestamp.h"
#include "util/strtod.h"
#include "util/debug.h"
+#include "util/build_id.h"
+#include "util/vk_util.h"
#include "genxml/gen7_pack.h"
-struct anv_dispatch_table dtable;
-
static void
compiler_debug_log(void *data, const char *fmt, ...)
{ }
va_end(args);
}
+static bool
+anv_device_get_cache_uuid(void *uuid)
+{
+ const struct build_id_note *note = build_id_find_nhdr("libvulkan_intel.so");
+ if (!note)
+ return false;
+
+ unsigned len = build_id_length(note);
+ if (len < VK_UUID_SIZE)
+ return false;
+
+ memcpy(uuid, build_id_data(note), VK_UUID_SIZE);
+ return true;
+}
+
static VkResult
anv_physical_device_init(struct anv_physical_device *device,
struct anv_instance *instance,
goto fail;
}
+ if (!anv_device_get_cache_uuid(device->uuid)) {
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "cannot generate UUID");
+ goto fail;
+ }
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
/* GENs prior to 8 do not support EU/Subslice info */
device->info.max_cs_threads = max_cs_threads;
}
- close(fd);
-
brw_process_intel_debug_variable();
device->compiler = brw_compiler_create(NULL, &device->info);
device->compiler->shader_perf_log = compiler_perf_log;
result = anv_init_wsi(device);
- if (result != VK_SUCCESS)
- goto fail;
+ if (result != VK_SUCCESS) {
+ ralloc_free(device->compiler);
+ goto fail;
+ }
- /* XXX: Actually detect bit6 swizzling */
isl_device_init(&device->isl_dev, &device->info, swizzled);
+ device->local_fd = fd;
return VK_SUCCESS;
fail:
{
anv_finish_wsi(device);
ralloc_free(device->compiler);
+ close(device->local_fd);
}
static const VkExtensionProperties global_extensions[] = {
#ifdef VK_USE_PLATFORM_XCB_KHR
{
.extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
- .specVersion = 5,
+ .specVersion = 6,
},
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
{
.extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
- .specVersion = 5,
+ .specVersion = 6,
},
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
{
.extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
- .specVersion = 4,
+ .specVersion = 5,
},
#endif
+ {
+ .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static const VkExtensionProperties device_extensions[] = {
{
.extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
- .specVersion = 67,
+ .specVersion = 68,
+ },
+ {
+ .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
+ .specVersion = 1,
},
+ {
+ .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
+ .specVersion = 1,
+ }
};
static void *
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
- instance = anv_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
+ instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
{
ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ if (!instance)
+ return;
+
if (instance->physicalDeviceCount > 0) {
/* We support at most one physical device. */
assert(instance->physicalDeviceCount == 1);
_mesa_locale_fini();
- anv_free(&instance->alloc, instance);
+ vk_free(&instance->alloc, instance);
}
+static VkResult
+anv_enumerate_devices(struct anv_instance *instance)
+{
+ /* TODO: Check for more devices ? */
+ drmDevicePtr devices[8];
+ VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
+ int max_devices;
+
+ instance->physicalDeviceCount = 0;
+
+ max_devices = drmGetDevices2(0, devices, sizeof(devices));
+ if (max_devices < 1)
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+
+ for (unsigned i = 0; i < (unsigned)max_devices; i++) {
+ if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
+ devices[i]->bustype == DRM_BUS_PCI &&
+ devices[i]->deviceinfo.pci->vendor_id == 0x8086) {
+
+ result = anv_physical_device_init(&instance->physicalDevice,
+ instance,
+ devices[i]->nodes[DRM_NODE_RENDER]);
+ if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ break;
+ }
+ }
+
+ if (result == VK_SUCCESS)
+ instance->physicalDeviceCount = 1;
+
+ return result;
+}
+
+
VkResult anv_EnumeratePhysicalDevices(
VkInstance _instance,
uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices)
{
ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
VkResult result;
if (instance->physicalDeviceCount < 0) {
- char path[20];
- for (unsigned i = 0; i < 8; i++) {
- snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
- result = anv_physical_device_init(&instance->physicalDevice,
- instance, path);
- if (result == VK_SUCCESS)
- break;
- }
-
- if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
- instance->physicalDeviceCount = 0;
- } else if (result == VK_SUCCESS) {
- instance->physicalDeviceCount = 1;
- } else {
+ result = anv_enumerate_devices(instance);
+ if (result != VK_SUCCESS &&
+ result != VK_ERROR_INCOMPATIBLE_DRIVER)
return result;
- }
}
- /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
- * otherwise it's an inout parameter.
- *
- * The Vulkan spec (git aaed022) says:
- *
- * pPhysicalDeviceCount is a pointer to an unsigned integer variable
- * that is initialized with the number of devices the application is
- * prepared to receive handles to. pname:pPhysicalDevices is pointer to
- * an array of at least this many VkPhysicalDevice handles [...].
- *
- * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
- * overwrites the contents of the variable pointed to by
- * pPhysicalDeviceCount with the number of physical devices in in the
- * instance; otherwise, vkEnumeratePhysicalDevices overwrites
- * pPhysicalDeviceCount with the number of physical handles written to
- * pPhysicalDevices.
- */
- if (!pPhysicalDevices) {
- *pPhysicalDeviceCount = instance->physicalDeviceCount;
- } else if (*pPhysicalDeviceCount >= 1) {
- pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
- *pPhysicalDeviceCount = 1;
- } else if (*pPhysicalDeviceCount < instance->physicalDeviceCount) {
- return VK_INCOMPLETE;
- } else {
- *pPhysicalDeviceCount = 0;
+ if (instance->physicalDeviceCount > 0) {
+ assert(instance->physicalDeviceCount == 1);
+ vk_outarray_append(&out, i) {
+ *i = anv_physical_device_to_handle(&instance->physicalDevice);
+ }
}
- return VK_SUCCESS;
+ return vk_outarray_status(&out);
}
void anv_GetPhysicalDeviceFeatures(
*pFeatures = (VkPhysicalDeviceFeatures) {
.robustBufferAccess = true,
.fullDrawIndexUint32 = true,
- .imageCubeArray = false,
+ .imageCubeArray = true,
.independentBlend = true,
.geometryShader = true,
- .tessellationShader = false,
+ .tessellationShader = true,
.sampleRateShading = true,
.dualSrcBlend = true,
.logicOp = true,
.multiDrawIndirect = false,
- .drawIndirectFirstInstance = false,
+ .drawIndirectFirstInstance = true,
.depthClamp = true,
- .depthBiasClamp = false,
+ .depthBiasClamp = true,
.fillModeNonSolid = true,
.depthBounds = false,
.wideLines = true,
.pipelineStatisticsQuery = false,
.fragmentStoresAndAtomics = true,
.shaderTessellationAndGeometryPointSize = true,
- .shaderImageGatherExtended = false,
- .shaderStorageImageExtendedFormats = false,
+ .shaderImageGatherExtended = true,
+ .shaderStorageImageExtendedFormats = true,
.shaderStorageImageMultisample = false,
+ .shaderStorageImageReadWithoutFormat = false,
+ .shaderStorageImageWriteWithoutFormat = true,
.shaderUniformBufferArrayDynamicIndexing = true,
.shaderSampledImageArrayDynamicIndexing = true,
.shaderStorageBufferArrayDynamicIndexing = true,
.shaderStorageImageArrayDynamicIndexing = true,
- .shaderStorageImageReadWithoutFormat = false,
- .shaderStorageImageWriteWithoutFormat = true,
- .shaderClipDistance = false,
- .shaderCullDistance = false,
- .shaderFloat64 = false,
- .shaderInt64 = false,
+ .shaderClipDistance = true,
+ .shaderCullDistance = true,
+ .shaderFloat64 = pdevice->info.gen >= 8,
+ .shaderInt64 = pdevice->info.gen >= 8,
.shaderInt16 = false,
- .alphaToOne = true,
+ .shaderResourceMinLod = false,
.variableMultisampleRate = false,
.inheritedQueries = false,
};
pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
}
-void
-anv_device_get_cache_uuid(void *uuid)
+void anv_GetPhysicalDeviceFeatures2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceFeatures2KHR* pFeatures)
{
- memset(uuid, 0, VK_UUID_SIZE);
- snprintf(uuid, VK_UUID_SIZE, "anv-%s", ANV_TIMESTAMP);
+ anv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
+
+ vk_foreach_struct(ext, pFeatures->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
+ VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
+ (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
+
+ properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
}
void anv_GetPhysicalDeviceProperties(
.maxPerStageResources = 128,
.maxDescriptorSetSamplers = 256,
.maxDescriptorSetUniformBuffers = 256,
- .maxDescriptorSetUniformBuffersDynamic = 256,
+ .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
.maxDescriptorSetStorageBuffers = 256,
- .maxDescriptorSetStorageBuffersDynamic = 256,
+ .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
.maxDescriptorSetSampledImages = 256,
.maxDescriptorSetStorageImages = 256,
.maxDescriptorSetInputAttachments = 256,
- .maxVertexInputAttributes = 32,
- .maxVertexInputBindings = 32,
+ .maxVertexInputAttributes = MAX_VBS,
+ .maxVertexInputBindings = MAX_VBS,
.maxVertexInputAttributeOffset = 2047,
.maxVertexInputBindingStride = 2048,
.maxVertexOutputComponents = 128,
- .maxTessellationGenerationLevel = 0,
- .maxTessellationPatchSize = 0,
- .maxTessellationControlPerVertexInputComponents = 0,
- .maxTessellationControlPerVertexOutputComponents = 0,
- .maxTessellationControlPerPatchOutputComponents = 0,
- .maxTessellationControlTotalOutputComponents = 0,
- .maxTessellationEvaluationInputComponents = 0,
- .maxTessellationEvaluationOutputComponents = 0,
+ .maxTessellationGenerationLevel = 64,
+ .maxTessellationPatchSize = 32,
+ .maxTessellationControlPerVertexInputComponents = 128,
+ .maxTessellationControlPerVertexOutputComponents = 128,
+ .maxTessellationControlPerPatchOutputComponents = 128,
+ .maxTessellationControlTotalOutputComponents = 2048,
+ .maxTessellationEvaluationInputComponents = 128,
+ .maxTessellationEvaluationOutputComponents = 128,
.maxGeometryShaderInvocations = 32,
.maxGeometryInputComponents = 64,
.maxGeometryOutputComponents = 128,
.maxGeometryTotalOutputComponents = 1024,
.maxFragmentInputComponents = 128,
.maxFragmentOutputAttachments = 8,
- .maxFragmentDualSrcAttachments = 2,
+ .maxFragmentDualSrcAttachments = 1,
.maxFragmentCombinedOutputResources = 8,
.maxComputeSharedMemorySize = 32768,
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
.viewportSubPixelBits = 13, /* We take a float? */
.minMemoryMapAlignment = 4096, /* A page */
.minTexelBufferOffsetAlignment = 1,
- .minUniformBufferOffsetAlignment = 1,
- .minStorageBufferOffsetAlignment = 1,
+ .minUniformBufferOffsetAlignment = 16,
+ .minStorageBufferOffsetAlignment = 4,
.minTexelOffset = -8,
.maxTexelOffset = 7,
- .minTexelGatherOffset = -8,
- .maxTexelGatherOffset = 7,
+ .minTexelGatherOffset = -32,
+ .maxTexelGatherOffset = 31,
.minInterpolationOffset = -0.5,
.maxInterpolationOffset = 0.4375,
.subPixelInterpolationOffsetBits = 4,
.maxFramebufferWidth = (1 << 14),
.maxFramebufferHeight = (1 << 14),
- .maxFramebufferLayers = (1 << 10),
+ .maxFramebufferLayers = (1 << 11),
.framebufferColorSampleCounts = sample_counts,
.framebufferDepthSampleCounts = sample_counts,
.framebufferStencilSampleCounts = sample_counts,
.maxSampleMaskWords = 1,
.timestampComputeAndGraphics = false,
.timestampPeriod = time_stamp_base,
- .maxClipDistances = 0 /* FIXME */,
- .maxCullDistances = 0 /* FIXME */,
- .maxCombinedClipAndCullDistances = 0 /* FIXME */,
+ .maxClipDistances = 8,
+ .maxCullDistances = 8,
+ .maxCombinedClipAndCullDistances = 8,
.discreteQueuePriorities = 1,
.pointSizeRange = { 0.125, 255.875 },
.lineWidthRange = { 0.0, 7.9921875 },
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(1, 0, 5),
+ .apiVersion = VK_MAKE_VERSION(1, 0, 42),
.driverVersion = 1,
.vendorID = 0x8086,
.deviceID = pdevice->chipset_id,
};
strcpy(pProperties->deviceName, pdevice->name);
- anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
+ memcpy(pProperties->pipelineCacheUUID, pdevice->uuid, VK_UUID_SIZE);
+}
+
+void anv_GetPhysicalDeviceProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties2KHR* pProperties)
+{
+ anv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+
+ vk_foreach_struct(ext, pProperties->pNext) {
+ switch (ext->sType) {
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
}
+/* We support exactly one queue family. */
+static const VkQueueFamilyProperties
+anv_queue_family_properties = {
+ .queueFlags = VK_QUEUE_GRAPHICS_BIT |
+ VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT,
+ .queueCount = 1,
+ .timestampValidBits = 36, /* XXX: Real value here */
+ .minImageTransferGranularity = { 1, 1, 1 },
+};
+
void anv_GetPhysicalDeviceQueueFamilyProperties(
VkPhysicalDevice physicalDevice,
uint32_t* pCount,
VkQueueFamilyProperties* pQueueFamilyProperties)
{
- if (pQueueFamilyProperties == NULL) {
- *pCount = 1;
- return;
+ VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pCount);
+
+ vk_outarray_append(&out, p) {
+ *p = anv_queue_family_properties;
}
+}
- assert(*pCount >= 1);
+void anv_GetPhysicalDeviceQueueFamilyProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties)
+{
- *pQueueFamilyProperties = (VkQueueFamilyProperties) {
- .queueFlags = VK_QUEUE_GRAPHICS_BIT |
- VK_QUEUE_COMPUTE_BIT |
- VK_QUEUE_TRANSFER_BIT,
- .queueCount = 1,
- .timestampValidBits = 36, /* XXX: Real value here */
- .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
- };
+ VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
+
+ vk_outarray_append(&out, p) {
+ p->queueFamilyProperties = anv_queue_family_properties;
+
+ vk_foreach_struct(s, p->pNext) {
+ anv_debug_ignored_stype(s->sType);
+ }
+ }
}
void anv_GetPhysicalDeviceMemoryProperties(
};
}
+void anv_GetPhysicalDeviceMemoryProperties2KHR(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2KHR* pMemoryProperties)
+{
+ anv_GetPhysicalDeviceMemoryProperties(physicalDevice,
+ &pMemoryProperties->memoryProperties);
+
+ vk_foreach_struct(ext, pMemoryProperties->pNext) {
+ switch (ext->sType) {
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
+}
+
PFN_vkVoidFunction anv_GetInstanceProcAddr(
VkInstance instance,
const char* pName)
return anv_lookup_entrypoint(&device->info, pName);
}
-static VkResult
+static void
anv_queue_init(struct anv_device *device, struct anv_queue *queue)
{
queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
queue->device = device;
queue->pool = &device->surface_state_pool;
-
- return VK_SUCCESS;
}
static void
state = anv_state_pool_alloc(pool, size, align);
memcpy(state.map, p, size);
- if (!pool->block_pool->device->info.has_llc)
- anv_state_clflush(state);
+ anv_state_flush(pool->block_pool->device, state);
return state;
}
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec2_objects[1];
- struct anv_bo bo;
+ struct anv_bo bo, *exec_bos[1];
VkResult result = VK_SUCCESS;
uint32_t size;
int64_t timeout;
memcpy(bo.map, batch->start, size);
if (!device->info.has_llc)
- anv_clflush_range(bo.map, size);
+ anv_flush_range(bo.map, size);
+ exec_bos[0] = &bo;
exec2_objects[0].handle = bo.gem_handle;
exec2_objects[0].relocation_count = 0;
exec2_objects[0].relocs_ptr = 0;
execbuf.rsvd1 = device->context_id;
execbuf.rsvd2 = 0;
- ret = anv_gem_execbuffer(device, &execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+ result = anv_device_execbuf(device, &execbuf, exec_bos);
+ if (result != VK_SUCCESS)
goto fail;
- }
timeout = INT64_MAX;
ret = anv_gem_wait(device, bo.gem_handle, &timeout);
if (ret != 0) {
/* We don't know the real error. */
- result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+ result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
goto fail;
}
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
- device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
+ device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
pCreateInfo->pEnabledFeatures->robustBufferAccess;
- pthread_mutex_init(&device->mutex, NULL);
+ if (pthread_mutex_init(&device->mutex, NULL) != 0) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_context_id;
+ }
+
+ pthread_condattr_t condattr;
+ if (pthread_condattr_init(&condattr) != 0) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
+ if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
+ pthread_condattr_destroy(&condattr);
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
+ if (pthread_cond_init(&device->queue_submit, NULL) != 0) {
+ pthread_condattr_destroy(&condattr);
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ goto fail_mutex;
+ }
+ pthread_condattr_destroy(&condattr);
anv_bo_pool_init(&device->batch_bo_pool, device);
- anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
+ result = anv_block_pool_init(&device->dynamic_state_block_pool, device,
+ 16384);
+ if (result != VK_SUCCESS)
+ goto fail_batch_bo_pool;
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
+ result = anv_block_pool_init(&device->instruction_block_pool, device,
+ 1024 * 1024);
+ if (result != VK_SUCCESS)
+ goto fail_dynamic_state_pool;
+
anv_state_pool_init(&device->instruction_state_pool,
&device->instruction_block_pool);
- anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
+ result = anv_block_pool_init(&device->surface_state_block_pool, device,
+ 4096);
+ if (result != VK_SUCCESS)
+ goto fail_instruction_state_pool;
anv_state_pool_init(&device->surface_state_pool,
&device->surface_state_block_pool);
- anv_bo_init_new(&device->workaround_bo, device, 1024);
+ result = anv_bo_init_new(&device->workaround_bo, device, 1024);
+ if (result != VK_SUCCESS)
+ goto fail_surface_state_pool;
anv_scratch_pool_init(device, &device->scratch_pool);
unreachable("unhandled gen");
}
if (result != VK_SUCCESS)
- goto fail_fd;
+ goto fail_workaround_bo;
anv_device_init_blorp(device);
return VK_SUCCESS;
+ fail_workaround_bo:
+ anv_queue_finish(&device->queue);
+ anv_scratch_pool_finish(device, &device->scratch_pool);
+ anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+ anv_gem_close(device, device->workaround_bo.gem_handle);
+ fail_surface_state_pool:
+ anv_state_pool_finish(&device->surface_state_pool);
+ anv_block_pool_finish(&device->surface_state_block_pool);
+ fail_instruction_state_pool:
+ anv_state_pool_finish(&device->instruction_state_pool);
+ anv_block_pool_finish(&device->instruction_block_pool);
+ fail_dynamic_state_pool:
+ anv_state_pool_finish(&device->dynamic_state_pool);
+ anv_block_pool_finish(&device->dynamic_state_block_pool);
+ fail_batch_bo_pool:
+ anv_bo_pool_finish(&device->batch_bo_pool);
+ pthread_cond_destroy(&device->queue_submit);
+ fail_mutex:
+ pthread_mutex_destroy(&device->mutex);
+ fail_context_id:
+ anv_gem_destroy_context(device, device->context_id);
fail_fd:
close(device->fd);
fail_device:
- anv_free(&device->alloc, device);
+ vk_free(&device->alloc, device);
return result;
}
{
ANV_FROM_HANDLE(anv_device, device, _device);
- anv_queue_finish(&device->queue);
+ if (!device)
+ return;
anv_device_finish_blorp(device);
+ anv_queue_finish(&device->queue);
+
#ifdef HAVE_VALGRIND
/* We only need to free these to prevent valgrind errors. The backing
* BO will go away in a couple of lines so we don't actually leak.
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
#endif
+ anv_scratch_pool_finish(device, &device->scratch_pool);
+
anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
anv_gem_close(device, device->workaround_bo.gem_handle);
- anv_bo_pool_finish(&device->batch_bo_pool);
- anv_state_pool_finish(&device->dynamic_state_pool);
- anv_block_pool_finish(&device->dynamic_state_block_pool);
- anv_state_pool_finish(&device->instruction_state_pool);
- anv_block_pool_finish(&device->instruction_block_pool);
anv_state_pool_finish(&device->surface_state_pool);
anv_block_pool_finish(&device->surface_state_block_pool);
- anv_scratch_pool_finish(device, &device->scratch_pool);
+ anv_state_pool_finish(&device->instruction_state_pool);
+ anv_block_pool_finish(&device->instruction_block_pool);
+ anv_state_pool_finish(&device->dynamic_state_pool);
+ anv_block_pool_finish(&device->dynamic_state_block_pool);
- close(device->fd);
+ anv_bo_pool_finish(&device->batch_bo_pool);
+ pthread_cond_destroy(&device->queue_submit);
pthread_mutex_destroy(&device->mutex);
- anv_free(&device->alloc, device);
+ anv_gem_destroy_context(device, device->context_id);
+
+ close(device->fd);
+
+ vk_free(&device->alloc, device);
}
VkResult anv_EnumerateInstanceExtensionProperties(
return VK_SUCCESS;
}
- assert(*pPropertyCount >= ARRAY_SIZE(global_extensions));
+ *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(global_extensions));
+ typed_memcpy(pProperties, global_extensions, *pPropertyCount);
- *pPropertyCount = ARRAY_SIZE(global_extensions);
- memcpy(pProperties, global_extensions, sizeof(global_extensions));
+ if (*pPropertyCount < ARRAY_SIZE(global_extensions))
+ return VK_INCOMPLETE;
return VK_SUCCESS;
}
return VK_SUCCESS;
}
- assert(*pPropertyCount >= ARRAY_SIZE(device_extensions));
+ *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(device_extensions));
+ typed_memcpy(pProperties, device_extensions, *pPropertyCount);
- *pPropertyCount = ARRAY_SIZE(device_extensions);
- memcpy(pProperties, device_extensions, sizeof(device_extensions));
+ if (*pPropertyCount < ARRAY_SIZE(device_extensions))
+ return VK_INCOMPLETE;
return VK_SUCCESS;
}
*pQueue = anv_queue_to_handle(&device->queue);
}
+VkResult
+anv_device_execbuf(struct anv_device *device,
+ struct drm_i915_gem_execbuffer2 *execbuf,
+ struct anv_bo **execbuf_bos)
+{
+ int ret = anv_gem_execbuffer(device, execbuf);
+ if (ret != 0) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
+ }
+
+ struct drm_i915_gem_exec_object2 *objects =
+ (void *)(uintptr_t)execbuf->buffers_ptr;
+ for (uint32_t k = 0; k < execbuf->buffer_count; k++)
+ execbuf_bos[k]->offset = objects[k].offset;
+
+ return VK_SUCCESS;
+}
+
VkResult anv_QueueSubmit(
VkQueue _queue,
uint32_t submitCount,
ANV_FROM_HANDLE(anv_queue, queue, _queue);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
struct anv_device *device = queue->device;
- int ret;
+ VkResult result = VK_SUCCESS;
+
+ /* We lock around QueueSubmit for three main reasons:
+ *
+ * 1) When a block pool is resized, we create a new gem handle with a
+ * different size and, in the case of surface states, possibly a
+ * different center offset but we re-use the same anv_bo struct when
+ * we do so. If this happens in the middle of setting up an execbuf,
+ * we could end up with our list of BOs out of sync with our list of
+ * gem handles.
+ *
+ * 2) The algorithm we use for building the list of unique buffers isn't
+ * thread-safe. While the client is supposed to syncronize around
+ * QueueSubmit, this would be extremely difficult to debug if it ever
+ * came up in the wild due to a broken app. It's better to play it
+ * safe and just lock around QueueSubmit.
+ *
+ * 3) The anv_cmd_buffer_execbuf function may perform relocations in
+ * userspace. Due to the fact that the surface state buffer is shared
+ * between batches, we can't afford to have that happen from multiple
+ * threads at the same time. Even though the user is supposed to
+ * ensure this doesn't happen, we play it safe as in (2) above.
+ *
+ * Since the only other things that ever take the device lock such as block
+ * pool resize only rarely happen, this will almost never be contended so
+ * taking a lock isn't really an expensive operation in this case.
+ */
+ pthread_mutex_lock(&device->mutex);
for (uint32_t i = 0; i < submitCount; i++) {
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ assert(!anv_batch_has_error(&cmd_buffer->batch));
- ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "execbuf2 failed: %m");
- }
-
- for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
- cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
+ result = anv_cmd_buffer_execbuf(device, cmd_buffer);
+ if (result != VK_SUCCESS)
+ goto out;
}
}
if (fence) {
- ret = anv_gem_execbuffer(device, &fence->execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "execbuf2 failed: %m");
- }
+ struct anv_bo *fence_bo = &fence->bo;
+ result = anv_device_execbuf(device, &fence->execbuf, &fence_bo);
+ if (result != VK_SUCCESS)
+ goto out;
+
+ /* Update the fence and wake up any waiters */
+ assert(fence->state == ANV_FENCE_STATE_RESET);
+ fence->state = ANV_FENCE_STATE_SUBMITTED;
+ pthread_cond_broadcast(&device->queue_submit);
}
- return VK_SUCCESS;
+out:
+ pthread_mutex_unlock(&device->mutex);
+
+ return result;
}
VkResult anv_QueueWaitIdle(
VkResult
anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
{
- bo->gem_handle = anv_gem_create(device, size);
- if (!bo->gem_handle)
+ uint32_t gem_handle = anv_gem_create(device, size);
+ if (!gem_handle)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
- bo->map = NULL;
- bo->index = 0;
- bo->offset = 0;
- bo->size = size;
- bo->is_winsys_bo = false;
+ anv_bo_init(bo, gem_handle, size);
return VK_SUCCESS;
}
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
- if (pAllocateInfo->allocationSize == 0) {
- /* Apparently, this is allowed */
- *pMem = VK_NULL_HANDLE;
- return VK_SUCCESS;
- }
+ /* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
+ assert(pAllocateInfo->allocationSize > 0);
/* We support exactly one memory heap. */
assert(pAllocateInfo->memoryTypeIndex == 0 ||
/* FINISHME: Fail if allocation request exceeds heap size. */
- mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+ mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
mem->type_index = pAllocateInfo->memoryTypeIndex;
+ mem->map = NULL;
+ mem->map_size = 0;
+
*pMem = anv_device_memory_to_handle(mem);
return VK_SUCCESS;
fail:
- anv_free2(&device->alloc, pAllocator, mem);
+ vk_free2(&device->alloc, pAllocator, mem);
return result;
}
if (mem == NULL)
return;
+ if (mem->map)
+ anv_UnmapMemory(_device, _mem);
+
if (mem->bo.map)
anv_gem_munmap(mem->bo.map, mem->bo.size);
if (mem->bo.gem_handle != 0)
anv_gem_close(device, mem->bo.gem_handle);
- anv_free2(&device->alloc, pAllocator, mem);
+ vk_free2(&device->alloc, pAllocator, mem);
}
VkResult anv_MapMemory(
if (size == VK_WHOLE_SIZE)
size = mem->bo.size - offset;
+ /* From the Vulkan spec version 1.0.32 docs for MapMemory:
+ *
+ * * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
+ * assert(size != 0);
+ * * If size is not equal to VK_WHOLE_SIZE, size must be less than or
+ * equal to the size of the memory minus offset
+ */
+ assert(size > 0);
+ assert(offset + size <= mem->bo.size);
+
/* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
* takes a VkDeviceMemory pointer, it seems like only one map of the memory
* at a time is valid. We could just mmap up front and return an offset
/* Let's map whole pages */
map_size = align_u64(map_size, 4096);
- mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
- map_offset, map_size, gem_flags);
+ void *map = anv_gem_mmap(device, mem->bo.gem_handle,
+ map_offset, map_size, gem_flags);
+ if (map == MAP_FAILED)
+ return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
+
+ mem->map = map;
mem->map_size = map_size;
*ppData = mem->map + (offset - map_offset);
return;
anv_gem_munmap(mem->map, mem->map_size);
+
+ mem->map = NULL;
+ mem->map_size = 0;
}
static void
{
for (uint32_t i = 0; i < count; i++) {
ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
- void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
- void *end;
-
- if (ranges[i].offset + ranges[i].size > mem->map_size)
- end = mem->map + mem->map_size;
- else
- end = mem->map + ranges[i].offset + ranges[i].size;
+ if (ranges[i].offset >= mem->map_size)
+ continue;
- while (p < end) {
- __builtin_ia32_clflush(p);
- p += CACHELINE_SIZE;
- }
+ anv_clflush_range(mem->map + ranges[i].offset,
+ MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
}
}
}
void anv_GetBufferMemoryRequirements(
- VkDevice device,
+ VkDevice _device,
VkBuffer _buffer,
VkMemoryRequirements* pMemoryRequirements)
{
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ ANV_FROM_HANDLE(anv_device, device, _device);
/* The Vulkan spec (git aaed022) says:
*
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
* structure for the physical device is supported.
*
- * We support exactly one memory type.
+ * We support exactly one memory type on LLC, two on non-LLC.
*/
- pMemoryRequirements->memoryTypeBits = 1;
+ pMemoryRequirements->memoryTypeBits = device->info.has_llc ? 1 : 3;
pMemoryRequirements->size = buffer->size;
pMemoryRequirements->alignment = 16;
}
void anv_GetImageMemoryRequirements(
- VkDevice device,
+ VkDevice _device,
VkImage _image,
VkMemoryRequirements* pMemoryRequirements)
{
ANV_FROM_HANDLE(anv_image, image, _image);
+ ANV_FROM_HANDLE(anv_device, device, _device);
/* The Vulkan spec (git aaed022) says:
*
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
* structure for the physical device is supported.
*
- * We support exactly one memory type.
+ * We support exactly one memory type on LLC, two on non-LLC.
*/
- pMemoryRequirements->memoryTypeBits = 1;
+ pMemoryRequirements->memoryTypeBits = device->info.has_llc ? 1 : 3;
pMemoryRequirements->size = image->size;
pMemoryRequirements->alignment = image->alignment;
uint32_t* pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
{
- stub();
+ *pSparseMemoryRequirementCount = 0;
}
void anv_GetDeviceMemoryCommitment(
const VkBindSparseInfo* pBindInfo,
VkFence fence)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult anv_CreateFence(
fence->execbuf.rsvd1 = device->context_id;
fence->execbuf.rsvd2 = 0;
- fence->ready = false;
+ if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
+ fence->state = ANV_FENCE_STATE_SIGNALED;
+ } else {
+ fence->state = ANV_FENCE_STATE_RESET;
+ }
*pFence = anv_fence_to_handle(fence);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_fence, fence, _fence);
+ if (!fence)
+ return;
+
assert(fence->bo.map == fence);
anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
}
{
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- fence->ready = false;
+ fence->state = ANV_FENCE_STATE_RESET;
}
return VK_SUCCESS;
int64_t t = 0;
int ret;
- if (fence->ready)
- return VK_SUCCESS;
+ switch (fence->state) {
+ case ANV_FENCE_STATE_RESET:
+ /* If it hasn't even been sent off to the GPU yet, it's not ready */
+ return VK_NOT_READY;
- ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
- if (ret == 0) {
- fence->ready = true;
+ case ANV_FENCE_STATE_SIGNALED:
+ /* It's been signaled, return success */
return VK_SUCCESS;
- }
- return VK_NOT_READY;
+ case ANV_FENCE_STATE_SUBMITTED:
+ /* It's been submitted to the GPU but we don't know if it's done yet. */
+ ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
+ if (ret == 0) {
+ fence->state = ANV_FENCE_STATE_SIGNALED;
+ return VK_SUCCESS;
+ } else {
+ return VK_NOT_READY;
+ }
+ default:
+ unreachable("Invalid fence status");
+ }
}
+#define NSEC_PER_SEC 1000000000
+#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
+
VkResult anv_WaitForFences(
VkDevice _device,
uint32_t fenceCount,
const VkFence* pFences,
VkBool32 waitAll,
- uint64_t timeout)
+ uint64_t _timeout)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ int ret;
/* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
* to block indefinitely timeouts <= 0. Unfortunately, this was broken
* best we can do is to clamp the timeout to INT64_MAX. This limits the
* maximum timeout from 584 years to 292 years - likely not a big deal.
*/
- if (timeout > INT64_MAX)
- timeout = INT64_MAX;
-
- int64_t t = timeout;
+ int64_t timeout = MIN2(_timeout, INT64_MAX);
+
+ uint32_t pending_fences = fenceCount;
+ while (pending_fences) {
+ pending_fences = 0;
+ bool signaled_fences = false;
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ switch (fence->state) {
+ case ANV_FENCE_STATE_RESET:
+ /* This fence hasn't been submitted yet, we'll catch it the next
+ * time around. Yes, this may mean we dead-loop but, short of
+ * lots of locking and a condition variable, there's not much that
+ * we can do about that.
+ */
+ pending_fences++;
+ continue;
+
+ case ANV_FENCE_STATE_SIGNALED:
+ /* This fence is not pending. If waitAll isn't set, we can return
+ * early. Otherwise, we have to keep going.
+ */
+ if (!waitAll)
+ return VK_SUCCESS;
+ continue;
+
+ case ANV_FENCE_STATE_SUBMITTED:
+ /* These are the fences we really care about. Go ahead and wait
+ * on it until we hit a timeout.
+ */
+ ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
+ if (ret == -1 && errno == ETIME) {
+ return VK_TIMEOUT;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ } else {
+ fence->state = ANV_FENCE_STATE_SIGNALED;
+ signaled_fences = true;
+ if (!waitAll)
+ return VK_SUCCESS;
+ continue;
+ }
+ }
+ }
- /* FIXME: handle !waitAll */
+ if (pending_fences && !signaled_fences) {
+ /* If we've hit this then someone decided to vkWaitForFences before
+ * they've actually submitted any of them to a queue. This is a
+ * fairly pessimal case, so it's ok to lock here and use a standard
+ * pthreads condition variable.
+ */
+ pthread_mutex_lock(&device->mutex);
+
+ /* It's possible that some of the fences have changed state since the
+ * last time we checked. Now that we have the lock, check for
+ * pending fences again and don't wait if it's changed.
+ */
+ uint32_t now_pending_fences = 0;
+ for (uint32_t i = 0; i < fenceCount; i++) {
+ ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
+ if (fence->state == ANV_FENCE_STATE_RESET)
+ now_pending_fences++;
+ }
+ assert(now_pending_fences <= pending_fences);
+
+ if (now_pending_fences == pending_fences) {
+ struct timespec before;
+ clock_gettime(CLOCK_MONOTONIC, &before);
+
+ uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
+ uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
+ (timeout / NSEC_PER_SEC);
+ abs_nsec %= NSEC_PER_SEC;
+
+ /* Avoid roll-over in tv_sec on 32-bit systems if the user
+ * provided timeout is UINT64_MAX
+ */
+ struct timespec abstime;
+ abstime.tv_nsec = abs_nsec;
+ abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
+
+ ret = pthread_cond_timedwait(&device->queue_submit,
+ &device->mutex, &abstime);
+ assert(ret != EINVAL);
+
+ struct timespec after;
+ clock_gettime(CLOCK_MONOTONIC, &after);
+ uint64_t time_elapsed =
+ ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
+ ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
+
+ if (time_elapsed >= timeout) {
+ pthread_mutex_unlock(&device->mutex);
+ return VK_TIMEOUT;
+ }
+
+ timeout -= time_elapsed;
+ }
- for (uint32_t i = 0; i < fenceCount; i++) {
- ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
- if (ret == -1 && errno == ETIME) {
- return VK_TIMEOUT;
- } else if (ret == -1) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "gem wait failed: %m");
+ pthread_mutex_unlock(&device->mutex);
}
}
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_event, event, _event);
+ if (!event)
+ return;
+
anv_state_pool_free(&device->dynamic_state_pool, event->state);
}
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
- buffer = anv_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
+ buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- anv_free2(&device->alloc, pAllocator, buffer);
+ if (!buffer)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, buffer);
}
void
.format = format,
.stride = stride);
- if (!device->info.has_llc)
- anv_state_clflush(state);
+ anv_state_flush(device, state);
}
void anv_DestroySampler(
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
- anv_free2(&device->alloc, pAllocator, sampler);
+ if (!sampler)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, sampler);
}
VkResult anv_CreateFramebuffer(
size_t size = sizeof(*framebuffer) +
sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
- framebuffer = anv_alloc2(&device->alloc, pAllocator, size, 8,
+ framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (framebuffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
- anv_free2(&device->alloc, pAllocator, fb);
+ if (!fb)
+ return;
+
+ vk_free2(&device->alloc, pAllocator, fb);
+}
+
+/* vk_icd.h does not declare this function, so we declare it here to
+ * suppress Wmissing-prototypes.
+ */
+PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion);
+
+PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
+vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pSupportedVersion)
+{
+ /* For the full details on loader interface versioning, see
+ * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
+ * What follows is a condensed summary, to help you navigate the large and
+ * confusing official doc.
+ *
+ * - Loader interface v0 is incompatible with later versions. We don't
+ * support it.
+ *
+ * - In loader interface v1:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
+ * entrypoint.
+ * - The ICD must statically expose no other Vulkan symbol unless it is
+ * linked with -Bsymbolic.
+ * - Each dispatchable Vulkan handle created by the ICD must be
+ * a pointer to a struct whose first member is VK_LOADER_DATA. The
+ * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
+ * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
+ * vkDestroySurfaceKHR(). The ICD must be capable of working with
+ * such loader-managed surfaces.
+ *
+ * - Loader interface v2 differs from v1 in:
+ * - The first ICD entrypoint called by the loader is
+ * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
+ * statically expose this entrypoint.
+ *
+ * - Loader interface v3 differs from v2 in:
+ * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
+ * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
+ * because the loader no longer does so.
+ */
+ *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
+ return VK_SUCCESS;
}