anv: pCreateInfo->pApplicationInfo parameter to vkCreateInstance may be NULL
[mesa.git] / src / vulkan / anv_device.c
index c3dcd9f56ccabb5def015d6f6257d97c78b67344..68639068324bcf8a8e7b3506c43139301b22060e 100644 (file)
@@ -30,6 +30,7 @@
 #include "anv_private.h"
 #include "mesa/main/git_sha1.h"
 #include "util/strtod.h"
+#include "util/debug.h"
 
 #include "gen7_pack.h"
 
@@ -87,12 +88,13 @@ anv_physical_device_init(struct anv_physical_device *device,
       fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
    } else if (device->info->gen == 7 && !device->info->is_baytrail) {
       fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
-   } else if (device->info->gen == 9) {
-      fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
-   } else if (device->info->gen == 8 && !device->info->is_cherryview) {
-      /* Broadwell is as fully supported as anything */
+   } else if (device->info->gen == 7 && device->info->is_baytrail) {
+      fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
+   } else if (device->info->gen >= 8) {
+      /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
+       * supported as anything */
    } else {
-      result = vk_errorf(VK_UNSUPPORTED,
+      result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
                          "Vulkan not yet supported on %s", device->name);
       goto fail;
    }
@@ -115,12 +117,15 @@ anv_physical_device_init(struct anv_physical_device *device,
       goto fail;
    }
 
-   if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
+   if (!device->info->has_llc &&
+       anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
-                         "non-llc gpu");
+                         "kernel missing wc mmap");
       goto fail;
    }
 
+   bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
+
    close(fd);
 
    brw_process_intel_debug_variable();
@@ -133,7 +138,8 @@ anv_physical_device_init(struct anv_physical_device *device,
    device->compiler->shader_debug_log = compiler_debug_log;
    device->compiler->shader_perf_log = compiler_perf_log;
 
-   isl_device_init(&device->isl_dev, device->info);
+   /* XXX: Actually detect bit6 swizzling */
+   isl_device_init(&device->isl_dev, device->info, swizzled);
 
    return VK_SUCCESS;
 
@@ -148,56 +154,79 @@ anv_physical_device_finish(struct anv_physical_device *device)
    ralloc_free(device->compiler);
 }
 
-static void *default_alloc(
-    void*                                       pUserData,
-    size_t                                      size,
-    size_t                                      alignment,
-    VkSystemAllocType                           allocType)
-{
-   return malloc(size);
-}
-
-static void default_free(
-    void*                                       pUserData,
-    void*                                       pMem)
-{
-   free(pMem);
-}
-
-static const VkAllocCallbacks default_alloc_callbacks = {
-   .pUserData = NULL,
-   .pfnAlloc = default_alloc,
-   .pfnFree = default_free
-};
-
 static const VkExtensionProperties global_extensions[] = {
    {
-      .extensionName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
-      .specVersion = 17,
+      .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
+      .specVersion = 25,
    },
+   {
+      .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
+      .specVersion = 5,
+   },
+#ifdef HAVE_WAYLAND_PLATFORM
+   {
+      .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+      .specVersion = 4,
+   },
+#endif
 };
 
 static const VkExtensionProperties device_extensions[] = {
    {
-      .extensionName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
-      .specVersion = 53,
+      .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+      .specVersion = 67,
    },
 };
 
+static void *
+default_alloc_func(void *pUserData, size_t size, size_t align, 
+                   VkSystemAllocationScope allocationScope)
+{
+   return malloc(size);
+}
+
+static void *
+default_realloc_func(void *pUserData, void *pOriginal, size_t size,
+                     size_t align, VkSystemAllocationScope allocationScope)
+{
+   return realloc(pOriginal, size);
+}
+
+static void
+default_free_func(void *pUserData, void *pMemory)
+{
+   free(pMemory);
+}
+
+static const VkAllocationCallbacks default_alloc = {
+   .pUserData = NULL,
+   .pfnAllocation = default_alloc_func,
+   .pfnReallocation = default_realloc_func,
+   .pfnFree = default_free_func,
+};
+
 VkResult anv_CreateInstance(
     const VkInstanceCreateInfo*                 pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkInstance*                                 pInstance)
 {
    struct anv_instance *instance;
-   const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
-   void *user_data = NULL;
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
 
-   if (pCreateInfo->pAppInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
-      return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+   uint32_t client_version = pCreateInfo->pApplicationInfo ?
+                             pCreateInfo->pApplicationInfo->apiVersion :
+                             VK_MAKE_VERSION(1, 0, 0);
+   if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
+       client_version > VK_MAKE_VERSION(1, 0, 3)) {
+      return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+                       "Client requested version %d.%d.%d",
+                       VK_VERSION_MAJOR(client_version),
+                       VK_VERSION_MINOR(client_version),
+                       VK_VERSION_PATCH(client_version));
+   }
 
-   for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
       bool found = false;
       for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
@@ -210,20 +239,19 @@ VkResult anv_CreateInstance(
          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
    }
 
-   if (pCreateInfo->pAllocCb) {
-      alloc_callbacks = pCreateInfo->pAllocCb;
-      user_data = pCreateInfo->pAllocCb->pUserData;
-   }
-   instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
-                                        VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   instance = anv_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
+                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
    if (!instance)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
-   instance->pAllocUserData = alloc_callbacks->pUserData;
-   instance->pfnAlloc = alloc_callbacks->pfnAlloc;
-   instance->pfnFree = alloc_callbacks->pfnFree;
-   instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
+
+   if (pAllocator)
+      instance->alloc = *pAllocator;
+   else
+      instance->alloc = default_alloc;
+
+   instance->apiVersion = client_version;
    instance->physicalDeviceCount = -1;
 
    _mesa_locale_init();
@@ -238,7 +266,8 @@ VkResult anv_CreateInstance(
 }
 
 void anv_DestroyInstance(
-    VkInstance                                  _instance)
+    VkInstance                                  _instance,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_instance, instance, _instance);
 
@@ -254,31 +283,7 @@ void anv_DestroyInstance(
 
    _mesa_locale_fini();
 
-   instance->pfnFree(instance->pAllocUserData, instance);
-}
-
-void *
-anv_instance_alloc(struct anv_instance *instance, size_t size,
-                   size_t alignment, VkSystemAllocType allocType)
-{
-   void *mem = instance->pfnAlloc(instance->pAllocUserData,
-                                  size, alignment, allocType);
-   if (mem) {
-      VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
-      VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
-   }
-   return mem;
-}
-
-void
-anv_instance_free(struct anv_instance *instance, void *mem)
-{
-   if (mem == NULL)
-      return;
-
-   VG(VALGRIND_MEMPOOL_FREE(instance, mem));
-
-   instance->pfnFree(instance->pAllocUserData, mem);
+   anv_free(&instance->alloc, instance);
 }
 
 VkResult anv_EnumeratePhysicalDevices(
@@ -292,7 +297,7 @@ VkResult anv_EnumeratePhysicalDevices(
    if (instance->physicalDeviceCount < 0) {
       result = anv_physical_device_init(&instance->physicalDevice,
                                         instance, "/dev/dri/renderD128");
-      if (result == VK_UNSUPPORTED) {
+      if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
          instance->physicalDeviceCount = 0;
       } else if (result == VK_SUCCESS) {
          instance->physicalDeviceCount = 1;
@@ -337,33 +342,34 @@ void anv_GetPhysicalDeviceFeatures(
    anv_finishme("Get correct values for PhysicalDeviceFeatures");
 
    *pFeatures = (VkPhysicalDeviceFeatures) {
-      .robustBufferAccess                       = false,
+      .robustBufferAccess                       = true,
       .fullDrawIndexUint32                      = false,
       .imageCubeArray                           = false,
       .independentBlend                         = false,
       .geometryShader                           = true,
       .tessellationShader                       = false,
       .sampleRateShading                        = false,
-      .dualSourceBlend                          = true,
+      .dualSrcBlend                             = true,
       .logicOp                                  = true,
-      .multiDrawIndirect                        = true,
-      .depthClip                                = false,
+      .multiDrawIndirect                        = false,
+      .drawIndirectFirstInstance                = false,
+      .depthClamp                               = false,
       .depthBiasClamp                           = false,
       .fillModeNonSolid                         = true,
       .depthBounds                              = false,
       .wideLines                                = true,
       .largePoints                              = true,
+      .alphaToOne                               = true,
+      .multiViewport                            = true,
+      .samplerAnisotropy                        = false, /* FINISHME */
       .textureCompressionETC2                   = true,
       .textureCompressionASTC_LDR               = true,
       .textureCompressionBC                     = true,
-      .occlusionQueryNonConservative            = false, /* FINISHME */
+      .occlusionQueryPrecise                    = false, /* FINISHME */
       .pipelineStatisticsQuery                  = true,
-      .vertexSideEffects                        = false,
-      .tessellationSideEffects                  = false,
-      .geometrySideEffects                      = false,
-      .fragmentSideEffects                      = false,
-      .shaderTessellationPointSize              = false,
-      .shaderGeometryPointSize                  = true,
+      .vertexPipelineStoresAndAtomics           = false,
+      .fragmentStoresAndAtomics                 = true,
+      .shaderTessellationAndGeometryPointSize   = true,
       .shaderImageGatherExtended                = true,
       .shaderStorageImageExtendedFormats        = false,
       .shaderStorageImageMultisample            = false,
@@ -371,15 +377,26 @@ void anv_GetPhysicalDeviceFeatures(
       .shaderSampledImageArrayDynamicIndexing   = false,
       .shaderStorageBufferArrayDynamicIndexing  = false,
       .shaderStorageImageArrayDynamicIndexing   = false,
+      .shaderStorageImageReadWithoutFormat      = false,
+      .shaderStorageImageWriteWithoutFormat     = true,
       .shaderClipDistance                       = false,
       .shaderCullDistance                       = false,
       .shaderFloat64                            = false,
       .shaderInt64                              = false,
       .shaderInt16                              = false,
       .alphaToOne                               = true,
+      .variableMultisampleRate                  = false,
+      .inheritedQueries                         = false,
    };
 }
 
+void
+anv_device_get_cache_uuid(void *uuid)
+{
+   memset(uuid, 0, VK_UUID_SIZE);
+   snprintf(uuid, VK_UUID_SIZE, "anv-%s", MESA_GIT_SHA1 + 4);
+}
+
 void anv_GetPhysicalDeviceProperties(
     VkPhysicalDevice                            physicalDevice,
     VkPhysicalDeviceProperties*                 pProperties)
@@ -389,30 +406,33 @@ void anv_GetPhysicalDeviceProperties(
 
    anv_finishme("Get correct values for VkPhysicalDeviceLimits");
 
+   const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
+
+   VkSampleCountFlags sample_counts =
+      isl_device_get_sample_counts(&pdevice->isl_dev);
+
    VkPhysicalDeviceLimits limits = {
       .maxImageDimension1D                      = (1 << 14),
       .maxImageDimension2D                      = (1 << 14),
       .maxImageDimension3D                      = (1 << 10),
       .maxImageDimensionCube                    = (1 << 14),
       .maxImageArrayLayers                      = (1 << 10),
-
-      /* Broadwell supports 1, 2, 4, and 8 samples. */
-      .sampleCounts                             = 4,
-
-      .maxTexelBufferSize                       = (1 << 14),
-      .maxUniformBufferSize                     = UINT32_MAX,
-      .maxStorageBufferSize                     = UINT32_MAX,
+      .maxTexelBufferElements                   = 128 * 1024 * 1024,
+      .maxUniformBufferRange                    = UINT32_MAX,
+      .maxStorageBufferRange                    = UINT32_MAX,
       .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
       .maxMemoryAllocationCount                 = UINT32_MAX,
+      .maxSamplerAllocationCount                = 64 * 1024,
       .bufferImageGranularity                   = 64, /* A cache line */
       .sparseAddressSpaceSize                   = 0,
       .maxBoundDescriptorSets                   = MAX_SETS,
-      .maxDescriptorSets                        = UINT32_MAX,
       .maxPerStageDescriptorSamplers            = 64,
       .maxPerStageDescriptorUniformBuffers      = 64,
       .maxPerStageDescriptorStorageBuffers      = 64,
       .maxPerStageDescriptorSampledImages       = 64,
       .maxPerStageDescriptorStorageImages       = 64,
+      .maxPerStageDescriptorInputAttachments    = 64,
+      .maxPerStageResources                     = 128,
       .maxDescriptorSetSamplers                 = 256,
       .maxDescriptorSetUniformBuffers           = 256,
       .maxDescriptorSetUniformBuffersDynamic    = 256,
@@ -420,34 +440,31 @@ void anv_GetPhysicalDeviceProperties(
       .maxDescriptorSetStorageBuffersDynamic    = 256,
       .maxDescriptorSetSampledImages            = 256,
       .maxDescriptorSetStorageImages            = 256,
+      .maxDescriptorSetInputAttachments         = 256,
       .maxVertexInputAttributes                 = 32,
       .maxVertexInputBindings                   = 32,
-      .maxVertexInputAttributeOffset            = 256,
-      .maxVertexInputBindingStride              = 256,
-      .maxVertexOutputComponents                = 32,
-      .maxTessGenLevel                          = 0,
-      .maxTessPatchSize                         = 0,
-      .maxTessControlPerVertexInputComponents   = 0,
-      .maxTessControlPerVertexOutputComponents  = 0,
-      .maxTessControlPerPatchOutputComponents   = 0,
-      .maxTessControlTotalOutputComponents      = 0,
-      .maxTessEvaluationInputComponents         = 0,
-      .maxTessEvaluationOutputComponents        = 0,
-      .maxGeometryShaderInvocations             = 6,
-      .maxGeometryInputComponents               = 16,
-      .maxGeometryOutputComponents              = 16,
-      .maxGeometryOutputVertices                = 16,
-      .maxGeometryTotalOutputComponents         = 16,
-      .maxFragmentInputComponents               = 16,
-      .maxFragmentOutputBuffers                 = 8,
-      .maxFragmentDualSourceBuffers             = 2,
+      .maxVertexInputAttributeOffset            = 2047,
+      .maxVertexInputBindingStride              = 2048,
+      .maxVertexOutputComponents                = 128,
+      .maxTessellationGenerationLevel           = 0,
+      .maxTessellationPatchSize                 = 0,
+      .maxTessellationControlPerVertexInputComponents = 0,
+      .maxTessellationControlPerVertexOutputComponents = 0,
+      .maxTessellationControlPerPatchOutputComponents = 0,
+      .maxTessellationControlTotalOutputComponents = 0,
+      .maxTessellationEvaluationInputComponents = 0,
+      .maxTessellationEvaluationOutputComponents = 0,
+      .maxGeometryShaderInvocations             = 32,
+      .maxGeometryInputComponents               = 64,
+      .maxGeometryOutputComponents              = 128,
+      .maxGeometryOutputVertices                = 256,
+      .maxGeometryTotalOutputComponents         = 1024,
+      .maxFragmentInputComponents               = 128,
+      .maxFragmentOutputAttachments             = 8,
+      .maxFragmentDualSrcAttachments            = 2,
       .maxFragmentCombinedOutputResources       = 8,
-      .maxComputeSharedMemorySize               = 1024,
-      .maxComputeWorkGroupCount = {
-         16 * devinfo->max_cs_threads,
-         16 * devinfo->max_cs_threads,
-         16 * devinfo->max_cs_threads,
-      },
+      .maxComputeSharedMemorySize               = 32768,
+      .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
       .maxComputeWorkGroupInvocations           = 16 * devinfo->max_cs_threads,
       .maxComputeWorkGroupSize = {
          16 * devinfo->max_cs_threads,
@@ -458,49 +475,57 @@ void anv_GetPhysicalDeviceProperties(
       .subTexelPrecisionBits                    = 4 /* FIXME */,
       .mipmapPrecisionBits                      = 4 /* FIXME */,
       .maxDrawIndexedIndexValue                 = UINT32_MAX,
-      .maxDrawIndirectInstanceCount             = UINT32_MAX,
-      .primitiveRestartForPatches               = UINT32_MAX,
+      .maxDrawIndirectCount                     = UINT32_MAX,
       .maxSamplerLodBias                        = 16,
       .maxSamplerAnisotropy                     = 16,
       .maxViewports                             = MAX_VIEWPORTS,
       .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
-      .viewportBoundsRange                      = { -1.0, 1.0 }, /* FIXME */
+      .viewportBoundsRange                      = { -16384.0, 16384.0 },
       .viewportSubPixelBits                     = 13, /* We take a float? */
-      .minMemoryMapAlignment                    = 64, /* A cache line */
+      .minMemoryMapAlignment                    = 4096, /* A page */
       .minTexelBufferOffsetAlignment            = 1,
       .minUniformBufferOffsetAlignment          = 1,
       .minStorageBufferOffsetAlignment          = 1,
-      .minTexelOffset                           = 0, /* FIXME */
-      .maxTexelOffset                           = 0, /* FIXME */
-      .minTexelGatherOffset                     = 0, /* FIXME */
-      .maxTexelGatherOffset                     = 0, /* FIXME */
+      .minTexelOffset                           = -8,
+      .maxTexelOffset                           = 7,
+      .minTexelGatherOffset                     = -8,
+      .maxTexelGatherOffset                     = 7,
       .minInterpolationOffset                   = 0, /* FIXME */
       .maxInterpolationOffset                   = 0, /* FIXME */
       .subPixelInterpolationOffsetBits          = 0, /* FIXME */
       .maxFramebufferWidth                      = (1 << 14),
       .maxFramebufferHeight                     = (1 << 14),
       .maxFramebufferLayers                     = (1 << 10),
-      .maxFramebufferColorSamples               = 8,
-      .maxFramebufferDepthSamples               = 8,
-      .maxFramebufferStencilSamples             = 8,
+      .framebufferColorSampleCounts             = sample_counts,
+      .framebufferDepthSampleCounts             = sample_counts,
+      .framebufferStencilSampleCounts           = sample_counts,
+      .framebufferNoAttachmentsSampleCounts     = sample_counts,
       .maxColorAttachments                      = MAX_RTS,
-      .maxSampledImageColorSamples              = 8,
-      .maxSampledImageDepthSamples              = 8,
-      .maxSampledImageIntegerSamples            = 1,
-      .maxStorageImageSamples                   = 1,
+      .sampledImageColorSampleCounts            = sample_counts,
+      .sampledImageIntegerSampleCounts          = VK_SAMPLE_COUNT_1_BIT,
+      .sampledImageDepthSampleCounts            = sample_counts,
+      .sampledImageStencilSampleCounts          = sample_counts,
+      .storageImageSampleCounts                 = VK_SAMPLE_COUNT_1_BIT,
       .maxSampleMaskWords                       = 1,
-      .timestampFrequency                       = 1000 * 1000 * 1000 / 80,
+      .timestampComputeAndGraphics              = false,
+      .timestampPeriod                          = time_stamp_base / (1000 * 1000 * 1000),
       .maxClipDistances                         = 0 /* FIXME */,
       .maxCullDistances                         = 0 /* FIXME */,
       .maxCombinedClipAndCullDistances          = 0 /* FIXME */,
+      .discreteQueuePriorities                  = 1,
       .pointSizeRange                           = { 0.125, 255.875 },
       .lineWidthRange                           = { 0.0, 7.9921875 },
       .pointSizeGranularity                     = (1.0 / 8.0),
       .lineWidthGranularity                     = (1.0 / 128.0),
+      .strictLines                              = false, /* FINISHME */
+      .standardSampleLocations                  = true,
+      .optimalBufferCopyOffsetAlignment         = 128,
+      .optimalBufferCopyRowPitchAlignment       = 128,
+      .nonCoherentAtomSize                      = 64,
    };
 
    *pProperties = (VkPhysicalDeviceProperties) {
-      .apiVersion = VK_MAKE_VERSION(0, 170, 2),
+      .apiVersion = VK_MAKE_VERSION(1, 0, 2),
       .driverVersion = 1,
       .vendorID = 0x8086,
       .deviceID = pdevice->chipset_id,
@@ -510,8 +535,7 @@ void anv_GetPhysicalDeviceProperties(
    };
 
    strcpy(pProperties->deviceName, pdevice->name);
-   snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_SIZE,
-            "anv-%s", MESA_GIT_SHA1 + 4);
+   anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
 }
 
 void anv_GetPhysicalDeviceQueueFamilyProperties(
@@ -531,7 +555,8 @@ void anv_GetPhysicalDeviceQueueFamilyProperties(
                     VK_QUEUE_COMPUTE_BIT |
                     VK_QUEUE_TRANSFER_BIT,
       .queueCount = 1,
-      .supportsTimestamps = true,
+      .timestampValidBits = 36, /* XXX: Real value here */
+      .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
    };
 }
 
@@ -547,17 +572,43 @@ void anv_GetPhysicalDeviceMemoryProperties(
     */
    heap_size = 3 * physical_device->aperture_size / 4;
 
-   /* The property flags below are valid only for llc platforms. */
-   pMemoryProperties->memoryTypeCount = 1;
-   pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
-      .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
-      .heapIndex = 1,
-   };
+   if (physical_device->info->has_llc) {
+      /* Big core GPUs share LLC with the CPU and thus one memory type can be
+       * both cached and coherent at the same time.
+       */
+      pMemoryProperties->memoryTypeCount = 1;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 0,
+      };
+   } else {
+      /* The spec requires that we expose a host-visible, coherent memory
+       * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+       * to give the application a choice between cached, but not coherent and
+       * coherent but uncached (WC though).
+       */
+      pMemoryProperties->memoryTypeCount = 2;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+         .heapIndex = 0,
+      };
+      pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 0,
+      };
+   }
 
    pMemoryProperties->memoryHeapCount = 1;
    pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
       .size = heap_size,
-      .flags = VK_MEMORY_HEAP_HOST_LOCAL_BIT,
+      .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
    };
 }
 
@@ -568,6 +619,20 @@ PFN_vkVoidFunction anv_GetInstanceProcAddr(
    return anv_lookup_entrypoint(pName);
 }
 
+/* The loader wants us to expose a second GetInstanceProcAddr function
+ * to work around certain LD_PRELOAD issues seen in apps.
+ */
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName);
+
+VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
+    VkInstance                                  instance,
+    const char*                                 pName)
+{
+   return anv_GetInstanceProcAddr(instance, pName);
+}
+
 PFN_vkVoidFunction anv_GetDeviceProcAddr(
     VkDevice                                    device,
     const char*                                 pName)
@@ -590,10 +655,33 @@ anv_queue_finish(struct anv_queue *queue)
 {
 }
 
+static struct anv_state
+anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
+{
+   struct anv_state state;
+
+   state = anv_state_pool_alloc(pool, size, align);
+   memcpy(state.map, p, size);
+
+   if (!pool->block_pool->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
+struct gen8_border_color {
+   union {
+      float float32[4];
+      uint32_t uint32[4];
+   };
+   /* Pad out to 64 bytes */
+   uint32_t _pad[12];
+};
+
 static void
 anv_device_init_border_colors(struct anv_device *device)
 {
-   static const VkClearColorValue border_colors[] = {
+   static const struct gen8_border_color border_colors[] = {
       [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] =  { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
       [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] =       { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
       [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] =       { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
@@ -602,24 +690,91 @@ anv_device_init_border_colors(struct anv_device *device)
       [VK_BORDER_COLOR_INT_OPAQUE_WHITE] =         { .uint32 = { 1, 1, 1, 1 } },
    };
 
-   device->border_colors =
-      anv_state_pool_alloc(&device->dynamic_state_pool,
-                           sizeof(border_colors), 32);
-   memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
+   device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
+                                                    sizeof(border_colors), 64,
+                                                    border_colors);
+}
+
+VkResult
+anv_device_submit_simple_batch(struct anv_device *device,
+                               struct anv_batch *batch)
+{
+   struct drm_i915_gem_execbuffer2 execbuf;
+   struct drm_i915_gem_exec_object2 exec2_objects[1];
+   struct anv_bo bo;
+   VkResult result = VK_SUCCESS;
+   uint32_t size;
+   int64_t timeout;
+   int ret;
+
+   /* Kernel driver requires 8 byte aligned batch length */
+   size = align_u32(batch->next - batch->start, 8);
+   assert(size < device->batch_bo_pool.bo_size);
+   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo);
+   if (result != VK_SUCCESS)
+      return result;
+
+   memcpy(bo.map, batch->start, size);
+   if (!device->info.has_llc)
+      anv_clflush_range(bo.map, size);
+
+   exec2_objects[0].handle = bo.gem_handle;
+   exec2_objects[0].relocation_count = 0;
+   exec2_objects[0].relocs_ptr = 0;
+   exec2_objects[0].alignment = 0;
+   exec2_objects[0].offset = bo.offset;
+   exec2_objects[0].flags = 0;
+   exec2_objects[0].rsvd1 = 0;
+   exec2_objects[0].rsvd2 = 0;
+
+   execbuf.buffers_ptr = (uintptr_t) exec2_objects;
+   execbuf.buffer_count = 1;
+   execbuf.batch_start_offset = 0;
+   execbuf.batch_len = size;
+   execbuf.cliprects_ptr = 0;
+   execbuf.num_cliprects = 0;
+   execbuf.DR1 = 0;
+   execbuf.DR4 = 0;
+
+   execbuf.flags =
+      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
+   execbuf.rsvd1 = device->context_id;
+   execbuf.rsvd2 = 0;
+
+   ret = anv_gem_execbuffer(device, &execbuf);
+   if (ret != 0) {
+      /* We don't know the real error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+      goto fail;
+   }
+
+   timeout = INT64_MAX;
+   ret = anv_gem_wait(device, bo.gem_handle, &timeout);
+   if (ret != 0) {
+      /* We don't know the real error. */
+      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
+      goto fail;
+   }
+
+ fail:
+   anv_bo_pool_free(&device->batch_bo_pool, &bo);
+
+   return result;
 }
 
 VkResult anv_CreateDevice(
     VkPhysicalDevice                            physicalDevice,
     const VkDeviceCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkDevice*                                   pDevice)
 {
    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
-   struct anv_instance *instance = physical_device->instance;
+   VkResult result;
    struct anv_device *device;
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
 
-   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) {
+   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
       bool found = false;
       for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
@@ -634,33 +789,49 @@ VkResult anv_CreateDevice(
 
    anv_set_dispatch_devinfo(physical_device->info);
 
-   device = anv_instance_alloc(instance, sizeof(*device), 8,
-                               VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
+                       sizeof(*device), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
    if (!device)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
    device->instance = physical_device->instance;
+   device->chipset_id = physical_device->chipset_id;
+
+   if (pAllocator)
+      device->alloc = *pAllocator;
+   else
+      device->alloc = physical_device->instance->alloc;
 
    /* XXX(chadv): Can we dup() physicalDevice->fd here? */
    device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
-   if (device->fd == -1)
+   if (device->fd == -1) {
+      result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
       goto fail_device;
+   }
 
    device->context_id = anv_gem_create_context(device);
-   if (device->context_id == -1)
+   if (device->context_id == -1) {
+      result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
       goto fail_fd;
+   }
+
+   device->info = *physical_device->info;
+   device->isl_dev = physical_device->isl_dev;
 
    pthread_mutex_init(&device->mutex, NULL);
 
    anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
 
-   anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
+   anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
 
    anv_state_pool_init(&device->dynamic_state_pool,
                        &device->dynamic_state_block_pool);
 
-   anv_block_pool_init(&device->instruction_block_pool, device, 4096);
+   anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
+   anv_pipeline_cache_init(&device->default_pipeline_cache, device);
+
    anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
 
    anv_state_pool_init(&device->surface_state_pool,
@@ -670,12 +841,32 @@ VkResult anv_CreateDevice(
 
    anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
 
-   device->info = *physical_device->info;
-   device->isl_dev = physical_device->isl_dev;
-
    anv_queue_init(device, &device->queue);
 
-   anv_device_init_meta(device);
+   switch (device->info.gen) {
+   case 7:
+      if (!device->info.is_haswell)
+         result = gen7_init_device_state(device);
+      else
+         result = gen75_init_device_state(device);
+      break;
+   case 8:
+      result = gen8_init_device_state(device);
+      break;
+   case 9:
+      result = gen9_init_device_state(device);
+      break;
+   default:
+      /* Shouldn't get here as we don't create physical devices for any other
+       * gens. */
+      unreachable("unhandled gen");
+   }
+   if (result != VK_SUCCESS)
+      goto fail_fd;
+
+   result = anv_device_init_meta(device);
+   if (result != VK_SUCCESS)
+      goto fail_fd;
 
    anv_device_init_border_colors(device);
 
@@ -686,13 +877,14 @@ VkResult anv_CreateDevice(
  fail_fd:
    close(device->fd);
  fail_device:
-   anv_device_free(device, device);
+   anv_free(&device->alloc, device);
 
-   return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+   return result;
 }
 
 void anv_DestroyDevice(
-    VkDevice                                    _device)
+    VkDevice                                    _device,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
 
@@ -720,7 +912,9 @@ void anv_DestroyDevice(
 
    close(device->fd);
 
-   anv_instance_free(device->instance, device);
+   pthread_mutex_destroy(&device->mutex);
+
+   anv_free(&device->alloc, device);
 }
 
 VkResult anv_EnumerateInstanceExtensionProperties(
@@ -802,8 +996,8 @@ void anv_GetDeviceQueue(
 
 VkResult anv_QueueSubmit(
     VkQueue                                     _queue,
-    uint32_t                                    commandBufferCount,
-    const VkCommandBuffer*                      pCommandBuffers,
+    uint32_t                                    submitCount,
+    const VkSubmitInfo*                         pSubmits,
     VkFence                                     _fence)
 {
    ANV_FROM_HANDLE(anv_queue, queue, _queue);
@@ -811,29 +1005,31 @@ VkResult anv_QueueSubmit(
    struct anv_device *device = queue->device;
    int ret;
 
-   for (uint32_t i = 0; i < commandBufferCount; i++) {
-      ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
-
-      assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+   for (uint32_t i = 0; i < submitCount; i++) {
+      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
+         ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
+                         pSubmits[i].pCommandBuffers[j]);
+         assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
 
-      ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
-      if (ret != 0) {
-         /* We don't know the real error. */
-         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
-                          "execbuf2 failed: %m");
-      }
-
-      if (fence) {
-         ret = anv_gem_execbuffer(device, &fence->execbuf);
+         ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
          if (ret != 0) {
             /* We don't know the real error. */
             return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
                              "execbuf2 failed: %m");
          }
+
+         for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
+            cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
       }
+   }
 
-      for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
-         cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
+   if (fence) {
+      ret = anv_gem_execbuffer(device, &fence->execbuf);
+      if (ret != 0) {
+         /* We don't know the real error. */
+         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                          "execbuf2 failed: %m");
+      }
    }
 
    return VK_SUCCESS;
@@ -851,84 +1047,16 @@ VkResult anv_DeviceWaitIdle(
     VkDevice                                    _device)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
-   struct anv_state state;
    struct anv_batch batch;
-   struct drm_i915_gem_execbuffer2 execbuf;
-   struct drm_i915_gem_exec_object2 exec2_objects[1];
-   struct anv_bo *bo = NULL;
-   VkResult result;
-   int64_t timeout;
-   int ret;
 
-   state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
-   bo = &device->dynamic_state_pool.block_pool->bo;
-   batch.start = batch.next = state.map;
-   batch.end = state.map + 32;
+   uint32_t cmds[8];
+   batch.start = batch.next = cmds;
+   batch.end = (void *) cmds + sizeof(cmds);
+
    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
    anv_batch_emit(&batch, GEN7_MI_NOOP);
 
-   exec2_objects[0].handle = bo->gem_handle;
-   exec2_objects[0].relocation_count = 0;
-   exec2_objects[0].relocs_ptr = 0;
-   exec2_objects[0].alignment = 0;
-   exec2_objects[0].offset = bo->offset;
-   exec2_objects[0].flags = 0;
-   exec2_objects[0].rsvd1 = 0;
-   exec2_objects[0].rsvd2 = 0;
-
-   execbuf.buffers_ptr = (uintptr_t) exec2_objects;
-   execbuf.buffer_count = 1;
-   execbuf.batch_start_offset = state.offset;
-   execbuf.batch_len = batch.next - state.map;
-   execbuf.cliprects_ptr = 0;
-   execbuf.num_cliprects = 0;
-   execbuf.DR1 = 0;
-   execbuf.DR4 = 0;
-
-   execbuf.flags =
-      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
-   execbuf.rsvd1 = device->context_id;
-   execbuf.rsvd2 = 0;
-
-   ret = anv_gem_execbuffer(device, &execbuf);
-   if (ret != 0) {
-      /* We don't know the real error. */
-      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
-      goto fail;
-   }
-
-   timeout = INT64_MAX;
-   ret = anv_gem_wait(device, bo->gem_handle, &timeout);
-   if (ret != 0) {
-      /* We don't know the real error. */
-      result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
-      goto fail;
-   }
-
-   anv_state_pool_free(&device->dynamic_state_pool, state);
-
-   return VK_SUCCESS;
-
- fail:
-   anv_state_pool_free(&device->dynamic_state_pool, state);
-
-   return result;
-}
-
-void *
-anv_device_alloc(struct anv_device *            device,
-                 size_t                         size,
-                 size_t                         alignment,
-                 VkSystemAllocType              allocType)
-{
-   return anv_instance_alloc(device->instance, size, alignment, allocType);
-}
-
-void
-anv_device_free(struct anv_device *             device,
-                void *                          mem)
-{
-   anv_instance_free(device->instance, mem);
+   return anv_device_submit_simple_batch(device, &batch);
 }
 
 VkResult
@@ -946,55 +1074,72 @@ anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
    return VK_SUCCESS;
 }
 
-VkResult anv_AllocMemory(
+VkResult anv_AllocateMemory(
     VkDevice                                    _device,
-    const VkMemoryAllocInfo*                    pAllocInfo,
+    const VkMemoryAllocateInfo*                 pAllocateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkDeviceMemory*                             pMem)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    struct anv_device_memory *mem;
    VkResult result;
 
-   assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
+   assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+
+   if (pAllocateInfo->allocationSize == 0) {
+      /* Apparently, this is allowed */
+      *pMem = VK_NULL_HANDLE;
+      return VK_SUCCESS;
+   }
 
    /* We support exactly one memory heap. */
-   assert(pAllocInfo->memoryTypeIndex == 0);
+   assert(pAllocateInfo->memoryTypeIndex == 0 ||
+          (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
 
    /* FINISHME: Fail if allocation request exceeds heap size. */
 
-   mem = anv_device_alloc(device, sizeof(*mem), 8,
-                          VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
+                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (mem == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
+   /* The kernel is going to give us whole pages anyway */
+   uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+
+   result = anv_bo_init_new(&mem->bo, device, alloc_size);
    if (result != VK_SUCCESS)
       goto fail;
 
+   mem->type_index = pAllocateInfo->memoryTypeIndex;
+
    *pMem = anv_device_memory_to_handle(mem);
 
    return VK_SUCCESS;
 
  fail:
-   anv_device_free(device, mem);
+   anv_free2(&device->alloc, pAllocator, mem);
 
    return result;
 }
 
 void anv_FreeMemory(
     VkDevice                                    _device,
-    VkDeviceMemory                              _mem)
+    VkDeviceMemory                              _mem,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
 
+   if (mem == NULL)
+      return;
+
    if (mem->bo.map)
       anv_gem_munmap(mem->bo.map, mem->bo.size);
 
    if (mem->bo.gem_handle != 0)
       anv_gem_close(device, mem->bo.gem_handle);
 
-   anv_device_free(device, mem);
+   anv_free2(&device->alloc, pAllocator, mem);
 }
 
 VkResult anv_MapMemory(
@@ -1008,16 +1153,37 @@ VkResult anv_MapMemory(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
 
+   if (mem == NULL) {
+      *ppData = NULL;
+      return VK_SUCCESS;
+   }
+
+   if (size == VK_WHOLE_SIZE)
+      size = mem->bo.size - offset;
+
    /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
     * takes a VkDeviceMemory pointer, it seems like only one map of the memory
     * at a time is valid. We could just mmap up front and return an offset
     * pointer here, but that may exhaust virtual memory on 32 bit
     * userspace. */
 
-   mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
-   mem->map_size = size;
+   uint32_t gem_flags = 0;
+   if (!device->info.has_llc && mem->type_index == 0)
+      gem_flags |= I915_MMAP_WC;
+
+   /* GEM will fail to map if the offset isn't 4k-aligned.  Round down. */
+   uint64_t map_offset = offset & ~4095ull;
+   assert(offset >= map_offset);
+   uint64_t map_size = (offset + size) - map_offset;
+
+   /* Let's map whole pages */
+   map_size = align_u64(map_size, 4096);
 
-   *ppData = mem->map;
+   mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
+                           map_offset, map_size, gem_flags);
+   mem->map_size = map_size;
+
+   *ppData = mem->map + (offset - map_offset);
 
    return VK_SUCCESS;
 }
@@ -1028,25 +1194,68 @@ void anv_UnmapMemory(
 {
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
 
+   if (mem == NULL)
+      return;
+
    anv_gem_munmap(mem->map, mem->map_size);
 }
 
+static void
+clflush_mapped_ranges(struct anv_device         *device,
+                      uint32_t                   count,
+                      const VkMappedMemoryRange *ranges)
+{
+   for (uint32_t i = 0; i < count; i++) {
+      ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
+      void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
+      void *end;
+
+      if (ranges[i].offset + ranges[i].size > mem->map_size)
+         end = mem->map + mem->map_size;
+      else
+         end = mem->map + ranges[i].offset + ranges[i].size;
+
+      while (p < end) {
+         __builtin_ia32_clflush(p);
+         p += CACHELINE_SIZE;
+      }
+   }
+}
+
 VkResult anv_FlushMappedMemoryRanges(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     uint32_t                                    memoryRangeCount,
     const VkMappedMemoryRange*                  pMemoryRanges)
 {
-   /* clflush here for !llc platforms */
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   /* Make sure the writes we're flushing have landed. */
+   __builtin_ia32_mfence();
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
 
    return VK_SUCCESS;
 }
 
 VkResult anv_InvalidateMappedMemoryRanges(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     uint32_t                                    memoryRangeCount,
     const VkMappedMemoryRange*                  pMemoryRanges)
 {
-   return anv_FlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
+
+   /* Make sure no reads get moved up above the invalidate. */
+   __builtin_ia32_mfence();
+
+   return VK_SUCCESS;
 }
 
 void anv_GetBufferMemoryRequirements(
@@ -1096,7 +1305,7 @@ void anv_GetImageMemoryRequirements(
 void anv_GetImageSparseMemoryRequirements(
     VkDevice                                    device,
     VkImage                                     image,
-    uint32_t*                                   pNumRequirements,
+    uint32_t*                                   pSparseMemoryRequirementCount,
     VkSparseImageMemoryRequirements*            pSparseMemoryRequirements)
 {
    stub();
@@ -1119,8 +1328,13 @@ VkResult anv_BindBufferMemory(
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
 
-   buffer->bo = &mem->bo;
-   buffer->offset = memoryOffset;
+   if (mem) {
+      buffer->bo = &mem->bo;
+      buffer->offset = memoryOffset;
+   } else {
+      buffer->bo = NULL;
+      buffer->offset = 0;
+   }
 
    return VK_SUCCESS;
 }
@@ -1134,8 +1348,13 @@ VkResult anv_BindImageMemory(
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
    ANV_FROM_HANDLE(anv_image, image, _image);
 
-   image->bo = &mem->bo;
-   image->offset = memoryOffset;
+   if (mem) {
+      image->bo = &mem->bo;
+      image->offset = memoryOffset;
+   } else {
+      image->bo = NULL;
+      image->offset = 0;
+   }
 
    return VK_SUCCESS;
 }
@@ -1146,12 +1365,13 @@ VkResult anv_QueueBindSparse(
     const VkBindSparseInfo*                     pBindInfo,
     VkFence                                     fence)
 {
-   stub_return(VK_UNSUPPORTED);
+   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
 }
 
 VkResult anv_CreateFence(
     VkDevice                                    _device,
     const VkFenceCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkFence*                                    pFence)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
@@ -1163,8 +1383,8 @@ VkResult anv_CreateFence(
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
 
-   fence = anv_device_alloc(device, sizeof(*fence), 8,
-                            VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   fence = anv_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
+                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (fence == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
@@ -1173,12 +1393,19 @@ VkResult anv_CreateFence(
       goto fail;
 
    fence->bo.map =
-      anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
+      anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size, 0);
    batch.next = batch.start = fence->bo.map;
    batch.end = fence->bo.map + fence->bo.size;
    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
    anv_batch_emit(&batch, GEN7_MI_NOOP);
 
+   if (!device->info.has_llc) {
+      assert(((uintptr_t) fence->bo.map & CACHELINE_MASK) == 0);
+      assert(batch.next - fence->bo.map <= CACHELINE_SIZE);
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(fence->bo.map);
+   }
+
    fence->exec2_objects[0].handle = fence->bo.gem_handle;
    fence->exec2_objects[0].relocation_count = 0;
    fence->exec2_objects[0].relocs_ptr = 0;
@@ -1202,26 +1429,29 @@ VkResult anv_CreateFence(
    fence->execbuf.rsvd1 = device->context_id;
    fence->execbuf.rsvd2 = 0;
 
+   fence->ready = false;
+
    *pFence = anv_fence_to_handle(fence);
 
    return VK_SUCCESS;
 
  fail:
-   anv_device_free(device, fence);
+   anv_free2(&device->alloc, pAllocator, fence);
 
    return result;
 }
 
 void anv_DestroyFence(
     VkDevice                                    _device,
-    VkFence                                     _fence)
+    VkFence                                     _fence,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_fence, fence, _fence);
 
    anv_gem_munmap(fence->bo.map, fence->bo.size);
    anv_gem_close(device, fence->bo.gem_handle);
-   anv_device_free(device, fence);
+   anv_free2(&device->alloc, pAllocator, fence);
 }
 
 VkResult anv_ResetFences(
@@ -1301,69 +1531,118 @@ VkResult anv_WaitForFences(
 VkResult anv_CreateSemaphore(
     VkDevice                                    device,
     const VkSemaphoreCreateInfo*                pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkSemaphore*                                pSemaphore)
 {
+   /* The DRM execbuffer ioctl always execute in-oder, even between different
+    * rings. As such, there's nothing to do for the user space semaphore.
+    */
+
    *pSemaphore = (VkSemaphore)1;
-   stub_return(VK_SUCCESS);
+
+   return VK_SUCCESS;
 }
 
 void anv_DestroySemaphore(
     VkDevice                                    device,
-    VkSemaphore                                 semaphore)
-{
-   stub();
-}
-
-VkResult anv_QueueSignalSemaphore(
-    VkQueue                                     queue,
-    VkSemaphore                                 semaphore)
+    VkSemaphore                                 semaphore,
+    const VkAllocationCallbacks*                pAllocator)
 {
-   stub_return(VK_UNSUPPORTED);
-}
-
-VkResult anv_QueueWaitSemaphore(
-    VkQueue                                     queue,
-    VkSemaphore                                 semaphore)
-{
-   stub_return(VK_UNSUPPORTED);
 }
 
 // Event functions
 
 VkResult anv_CreateEvent(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     const VkEventCreateInfo*                    pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkEvent*                                    pEvent)
 {
-   stub_return(VK_UNSUPPORTED);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_state state;
+   struct anv_event *event;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+   state = anv_state_pool_alloc(&device->dynamic_state_pool,
+                                sizeof(*event), 8);
+   event = state.map;
+   event->state = state;
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   *pEvent = anv_event_to_handle(event);
+
+   return VK_SUCCESS;
 }
 
 void anv_DestroyEvent(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event,
+    const VkAllocationCallbacks*                pAllocator)
 {
-   stub();
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   anv_state_pool_free(&device->dynamic_state_pool, event->state);
 }
 
 VkResult anv_GetEventStatus(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_UNSUPPORTED);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   if (!device->info.has_llc) {
+      /* Invalidate read cache before reading event written by GPU. */
+      __builtin_ia32_clflush(event);
+      __builtin_ia32_mfence();
+
+   }
+
+   return event->semaphore;
 }
 
 VkResult anv_SetEvent(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_UNSUPPORTED);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_SET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
 }
 
 VkResult anv_ResetEvent(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_UNSUPPORTED);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_mfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
 }
 
 // Buffer functions
@@ -1371,6 +1650,7 @@ VkResult anv_ResetEvent(
 VkResult anv_CreateBuffer(
     VkDevice                                    _device,
     const VkBufferCreateInfo*                   pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkBuffer*                                   pBuffer)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
@@ -1378,12 +1658,13 @@ VkResult anv_CreateBuffer(
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
 
-   buffer = anv_device_alloc(device, sizeof(*buffer), 8,
-                            VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   buffer = anv_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
+                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (buffer == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    buffer->size = pCreateInfo->size;
+   buffer->usage = pCreateInfo->usage;
    buffer->bo = NULL;
    buffer->offset = 0;
 
@@ -1394,65 +1675,58 @@ VkResult anv_CreateBuffer(
 
 void anv_DestroyBuffer(
     VkDevice                                    _device,
-    VkBuffer                                    _buffer)
+    VkBuffer                                    _buffer,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
 
-   anv_device_free(device, buffer);
+   anv_free2(&device->alloc, pAllocator, buffer);
 }
 
 void
-anv_fill_buffer_surface_state(struct anv_device *device, void *state,
-                              const struct anv_format *format,
+anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
+                              enum isl_format format,
                               uint32_t offset, uint32_t range, uint32_t stride)
 {
    switch (device->info.gen) {
    case 7:
       if (device->info.is_haswell)
-         gen75_fill_buffer_surface_state(state, format, offset, range, stride);
+         gen75_fill_buffer_surface_state(state.map, format, offset, range,
+                                         stride);
       else
-         gen7_fill_buffer_surface_state(state, format, offset, range, stride);
+         gen7_fill_buffer_surface_state(state.map, format, offset, range,
+                                        stride);
       break;
    case 8:
-      gen8_fill_buffer_surface_state(state, format, offset, range, stride);
+      gen8_fill_buffer_surface_state(state.map, format, offset, range, stride);
       break;
    case 9:
-      gen9_fill_buffer_surface_state(state, format, offset, range, stride);
+      gen9_fill_buffer_surface_state(state.map, format, offset, range, stride);
       break;
    default:
       unreachable("unsupported gen\n");
    }
-}
-
-VkResult anv_CreateBufferView(
-    VkDevice                                    _device,
-    const VkBufferViewCreateInfo*               pCreateInfo,
-    VkBufferView*                               pView)
-{
-   stub_return(VK_UNSUPPORTED);
-}
 
-void anv_DestroyBufferView(
-    VkDevice                                    _device,
-    VkBufferView                                _bview)
-{
-   stub();
+   if (!device->info.has_llc)
+      anv_state_clflush(state);
 }
 
 void anv_DestroySampler(
     VkDevice                                    _device,
-    VkSampler                                   _sampler)
+    VkSampler                                   _sampler,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
 
-   anv_device_free(device, sampler);
+   anv_free2(&device->alloc, pAllocator, sampler);
 }
 
 VkResult anv_CreateFramebuffer(
     VkDevice                                    _device,
     const VkFramebufferCreateInfo*              pCreateInfo,
+    const VkAllocationCallbacks*                pAllocator,
     VkFramebuffer*                              pFramebuffer)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
@@ -1462,8 +1736,8 @@ VkResult anv_CreateFramebuffer(
 
    size_t size = sizeof(*framebuffer) +
                  sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
-   framebuffer = anv_device_alloc(device, size, 8,
-                                  VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
+   framebuffer = anv_alloc2(&device->alloc, pAllocator, size, 8,
+                            VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (framebuffer == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
@@ -1484,12 +1758,13 @@ VkResult anv_CreateFramebuffer(
 
 void anv_DestroyFramebuffer(
     VkDevice                                    _device,
-    VkFramebuffer                               _fb)
+    VkFramebuffer                               _fb,
+    const VkAllocationCallbacks*                pAllocator)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
 
-   anv_device_free(device, fb);
+   anv_free2(&device->alloc, pAllocator, fb);
 }
 
 void vkCmdDbgMarkerBegin(