vk: Fill out buffer surface state when updating descriptor set
[mesa.git] / src / vulkan / anv_device.c
index 5b300afc0a80ca8c26bf4873576fbb135e034fca..88515c353eea5fbb00054409ea004ae4911933ef 100644 (file)
@@ -87,10 +87,14 @@ anv_physical_device_init(struct anv_physical_device *device,
       fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
    } else if (device->info->gen == 7 && !device->info->is_baytrail) {
       fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
-   } else if (device->info->gen == 9) {
+   } else if (device->info->gen == 7 && device->info->is_baytrail) {
+      fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
+   } else if (device->info->gen == 9 && !device->info->is_broxton) {
       fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
-   } else if (device->info->gen == 8 && !device->info->is_cherryview) {
-      /* Broadwell is as fully supported as anything */
+   } else if (device->info->gen == 9 && device->info->is_broxton) {
+      fprintf(stderr, "WARNING: Broxton Vulkan support is incomplete\n");
+   } else if (device->info->gen == 8) {
+      /* Broadwell/Cherryview is as fully supported as anything */
    } else {
       result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
                          "Vulkan not yet supported on %s", device->name);
@@ -115,9 +119,10 @@ anv_physical_device_init(struct anv_physical_device *device,
       goto fail;
    }
 
-   if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
+   if (!device->info->has_llc &&
+       anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
-                         "non-llc gpu");
+                         "kernel missing wc mmap");
       goto fail;
    }
 
@@ -150,15 +155,25 @@ anv_physical_device_finish(struct anv_physical_device *device)
 
 static const VkExtensionProperties global_extensions[] = {
    {
-      .extensionName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
-      .specVersion = 17,
+      .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
+      .specVersion = 24,
    },
+   {
+      .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
+      .specVersion = 5,
+   },
+#ifdef HAVE_WAYLAND_PLATFORM
+   {
+      .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+      .specVersion = 4,
+   },
+#endif
 };
 
 static const VkExtensionProperties device_extensions[] = {
    {
-      .extensionName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
-      .specVersion = 53,
+      .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+      .specVersion = 67,
    },
 };
 
@@ -198,7 +213,7 @@ VkResult anv_CreateInstance(
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
 
-   if (pCreateInfo->pApplicationInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
+   if (pCreateInfo->pApplicationInfo->apiVersion != VK_MAKE_VERSION(0, 210, 1))
       return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
 
    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) {
@@ -495,7 +510,7 @@ void anv_GetPhysicalDeviceProperties(
    };
 
    *pProperties = (VkPhysicalDeviceProperties) {
-      .apiVersion = VK_MAKE_VERSION(0, 170, 2),
+      .apiVersion = VK_MAKE_VERSION(0, 210, 1),
       .driverVersion = 1,
       .vendorID = 0x8086,
       .deviceID = pdevice->chipset_id,
@@ -543,15 +558,38 @@ void anv_GetPhysicalDeviceMemoryProperties(
     */
    heap_size = 3 * physical_device->aperture_size / 4;
 
-   /* The property flags below are valid only for llc platforms. */
-   pMemoryProperties->memoryTypeCount = 1;
-   pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
-      .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
-                       VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
-                       VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
-                       VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
-      .heapIndex = 1,
-   };
+   if (physical_device->info->has_llc) {
+      /* Big core GPUs share LLC with the CPU and thus one memory type can be
+       * both cached and coherent at the same time.
+       */
+      pMemoryProperties->memoryTypeCount = 1;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 1,
+      };
+   } else {
+      /* The spec requires that we expose a host-visible, coherent memory
+       * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+       * to give the application a choice between cached, but not coherent and
+       * coherent but uncached (WC though).
+       */
+      pMemoryProperties->memoryTypeCount = 2;
+      pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+         .heapIndex = 1,
+      };
+      pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
+         .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+                          VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+                          VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+         .heapIndex = 1,
+      };
+   }
 
    pMemoryProperties->memoryHeapCount = 1;
    pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
@@ -589,6 +627,20 @@ anv_queue_finish(struct anv_queue *queue)
 {
 }
 
+static struct anv_state
+anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
+{
+   struct anv_state state;
+
+   state = anv_state_pool_alloc(pool, size, align);
+   memcpy(state.map, p, size);
+
+   if (!pool->block_pool->device->info.has_llc)
+      anv_state_clflush(state);
+
+   return state;
+}
+
 static void
 anv_device_init_border_colors(struct anv_device *device)
 {
@@ -601,10 +653,8 @@ anv_device_init_border_colors(struct anv_device *device)
       [VK_BORDER_COLOR_INT_OPAQUE_WHITE] =         { .uint32 = { 1, 1, 1, 1 } },
    };
 
-   device->border_colors =
-      anv_state_pool_alloc(&device->dynamic_state_pool,
-                           sizeof(border_colors), 32);
-   memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
+   device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
+                                                    sizeof(border_colors), 32, border_colors);
 }
 
 VkResult anv_CreateDevice(
@@ -656,6 +706,9 @@ VkResult anv_CreateDevice(
    if (device->context_id == -1)
       goto fail_fd;
 
+   device->info = *physical_device->info;
+   device->isl_dev = physical_device->isl_dev;
+
    pthread_mutex_init(&device->mutex, NULL);
 
    anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
@@ -665,7 +718,7 @@ VkResult anv_CreateDevice(
    anv_state_pool_init(&device->dynamic_state_pool,
                        &device->dynamic_state_block_pool);
 
-   anv_block_pool_init(&device->instruction_block_pool, device, 4096);
+   anv_block_pool_init(&device->instruction_block_pool, device, 8192);
    anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
 
    anv_state_pool_init(&device->surface_state_pool,
@@ -675,9 +728,6 @@ VkResult anv_CreateDevice(
 
    anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
 
-   device->info = *physical_device->info;
-   device->isl_dev = physical_device->isl_dev;
-
    anv_queue_init(device, &device->queue);
 
    anv_device_init_meta(device);
@@ -808,8 +858,8 @@ void anv_GetDeviceQueue(
 
 VkResult anv_QueueSubmit(
     VkQueue                                     _queue,
-    uint32_t                                    commandBufferCount,
-    const VkCommandBuffer*                      pCommandBuffers,
+    uint32_t                                    submitCount,
+    const VkSubmitInfo*                         pSubmits,
     VkFence                                     _fence)
 {
    ANV_FROM_HANDLE(anv_queue, queue, _queue);
@@ -817,29 +867,31 @@ VkResult anv_QueueSubmit(
    struct anv_device *device = queue->device;
    int ret;
 
-   for (uint32_t i = 0; i < commandBufferCount; i++) {
-      ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
-
-      assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
-
-      ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
-      if (ret != 0) {
-         /* We don't know the real error. */
-         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
-                          "execbuf2 failed: %m");
-      }
+   for (uint32_t i = 0; i < submitCount; i++) {
+      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
+         ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
+                         pSubmits[i].pCommandBuffers[j]);
+         assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
 
-      if (fence) {
-         ret = anv_gem_execbuffer(device, &fence->execbuf);
+         ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
          if (ret != 0) {
             /* We don't know the real error. */
             return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
                              "execbuf2 failed: %m");
          }
-      }
 
-      for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
-         cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
+         if (fence) {
+            ret = anv_gem_execbuffer(device, &fence->execbuf);
+            if (ret != 0) {
+               /* We don't know the real error. */
+               return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+                                "execbuf2 failed: %m");
+            }
+         }
+
+         for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
+            cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
+      }
    }
 
    return VK_SUCCESS;
@@ -873,6 +925,9 @@ VkResult anv_DeviceWaitIdle(
    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
    anv_batch_emit(&batch, GEN7_MI_NOOP);
 
+   if (!device->info.has_llc)
+      anv_state_clflush(state);
+
    exec2_objects[0].handle = bo->gem_handle;
    exec2_objects[0].relocation_count = 0;
    exec2_objects[0].relocs_ptr = 0;
@@ -948,8 +1003,15 @@ VkResult anv_AllocateMemory(
 
    assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
 
+   if (pAllocateInfo->allocationSize == 0) {
+      /* Apparently, this is allowed */
+      *pMem = VK_NULL_HANDLE;
+      return VK_SUCCESS;
+   }
+
    /* We support exactly one memory heap. */
-   assert(pAllocateInfo->memoryTypeIndex == 0);
+   assert(pAllocateInfo->memoryTypeIndex == 0 ||
+          (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
 
    /* FINISHME: Fail if allocation request exceeds heap size. */
 
@@ -962,6 +1024,8 @@ VkResult anv_AllocateMemory(
    if (result != VK_SUCCESS)
       goto fail;
 
+   mem->type_index = pAllocateInfo->memoryTypeIndex;
+
    *pMem = anv_device_memory_to_handle(mem);
 
    return VK_SUCCESS;
@@ -980,6 +1044,9 @@ void anv_FreeMemory(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
 
+   if (mem == NULL)
+      return;
+
    if (mem->bo.map)
       anv_gem_munmap(mem->bo.map, mem->bo.size);
 
@@ -1000,13 +1067,22 @@ VkResult anv_MapMemory(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
 
+   if (mem == NULL) {
+      *ppData = NULL;
+      return VK_SUCCESS;
+   }
+
    /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
     * takes a VkDeviceMemory pointer, it seems like only one map of the memory
     * at a time is valid. We could just mmap up front and return an offset
     * pointer here, but that may exhaust virtual memory on 32 bit
     * userspace. */
 
-   mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
+   uint32_t gem_flags = 0;
+   if (!device->info.has_llc && mem->type_index == 0)
+      gem_flags |= I915_MMAP_WC;
+
+   mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size, gem_flags);
    mem->map_size = size;
 
    *ppData = mem->map;
@@ -1020,25 +1096,63 @@ void anv_UnmapMemory(
 {
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
 
+   if (mem == NULL)
+      return;
+
    anv_gem_munmap(mem->map, mem->map_size);
 }
 
+static void
+clflush_mapped_ranges(struct anv_device         *device,
+                      uint32_t                   count,
+                      const VkMappedMemoryRange *ranges)
+{
+   for (uint32_t i = 0; i < count; i++) {
+      ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
+      void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
+      void *end = mem->map + ranges[i].offset + ranges[i].size;
+
+      while (p < end) {
+         __builtin_ia32_clflush(p);
+         p += CACHELINE_SIZE;
+      }
+   }
+}
+
 VkResult anv_FlushMappedMemoryRanges(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     uint32_t                                    memoryRangeCount,
     const VkMappedMemoryRange*                  pMemoryRanges)
 {
-   /* clflush here for !llc platforms */
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   /* Make sure the writes we're flushing have landed. */
+   __builtin_ia32_sfence();
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
 
    return VK_SUCCESS;
 }
 
 VkResult anv_InvalidateMappedMemoryRanges(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     uint32_t                                    memoryRangeCount,
     const VkMappedMemoryRange*                  pMemoryRanges)
 {
-   return anv_FlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+
+   if (device->info.has_llc)
+      return VK_SUCCESS;
+
+   clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
+
+   /* Make sure no reads get moved up above the invalidate. */
+   __builtin_ia32_lfence();
+
+   return VK_SUCCESS;
 }
 
 void anv_GetBufferMemoryRequirements(
@@ -1111,8 +1225,13 @@ VkResult anv_BindBufferMemory(
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
 
-   buffer->bo = &mem->bo;
-   buffer->offset = memoryOffset;
+   if (mem) {
+      buffer->bo = &mem->bo;
+      buffer->offset = memoryOffset;
+   } else {
+      buffer->bo = NULL;
+      buffer->offset = 0;
+   }
 
    return VK_SUCCESS;
 }
@@ -1126,8 +1245,13 @@ VkResult anv_BindImageMemory(
    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
    ANV_FROM_HANDLE(anv_image, image, _image);
 
-   image->bo = &mem->bo;
-   image->offset = memoryOffset;
+   if (mem) {
+      image->bo = &mem->bo;
+      image->offset = memoryOffset;
+   } else {
+      image->bo = NULL;
+      image->offset = 0;
+   }
 
    return VK_SUCCESS;
 }
@@ -1166,12 +1290,19 @@ VkResult anv_CreateFence(
       goto fail;
 
    fence->bo.map =
-      anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
+      anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size, 0);
    batch.next = batch.start = fence->bo.map;
    batch.end = fence->bo.map + fence->bo.size;
    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
    anv_batch_emit(&batch, GEN7_MI_NOOP);
 
+   if (!device->info.has_llc) {
+      assert(((uintptr_t) fence->bo.map & CACHELINE_MASK) == 0);
+      assert(batch.next - fence->bo.map <= CACHELINE_SIZE);
+      __builtin_ia32_sfence();
+      __builtin_ia32_clflush(fence->bo.map);
+   }
+
    fence->exec2_objects[0].handle = fence->bo.gem_handle;
    fence->exec2_objects[0].relocation_count = 0;
    fence->exec2_objects[0].relocs_ptr = 0;
@@ -1298,8 +1429,13 @@ VkResult anv_CreateSemaphore(
     const VkAllocationCallbacks*                pAllocator,
     VkSemaphore*                                pSemaphore)
 {
+   /* The DRM execbuffer ioctl always execute in-oder, even between different
+    * rings. As such, there's nothing to do for the user space semaphore.
+    */
+
    *pSemaphore = (VkSemaphore)1;
-   stub_return(VK_SUCCESS);
+
+   return VK_SUCCESS;
 }
 
 void anv_DestroySemaphore(
@@ -1307,47 +1443,100 @@ void anv_DestroySemaphore(
     VkSemaphore                                 semaphore,
     const VkAllocationCallbacks*                pAllocator)
 {
-   stub();
 }
 
 // Event functions
 
 VkResult anv_CreateEvent(
-    VkDevice                                    device,
+    VkDevice                                    _device,
     const VkEventCreateInfo*                    pCreateInfo,
     const VkAllocationCallbacks*                pAllocator,
     VkEvent*                                    pEvent)
 {
-   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   struct anv_state state;
+   struct anv_event *event;
+
+   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+   state = anv_state_pool_alloc(&device->dynamic_state_pool,
+                                sizeof(*event), 4);
+   event = state.map;
+   event->state = state;
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_sfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   *pEvent = anv_event_to_handle(event);
+
+   return VK_SUCCESS;
 }
 
 void anv_DestroyEvent(
-    VkDevice                                    device,
-    VkEvent                                     event,
+    VkDevice                                    _device,
+    VkEvent                                     _event,
     const VkAllocationCallbacks*                pAllocator)
 {
-   stub();
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   anv_state_pool_free(&device->dynamic_state_pool, event->state);
 }
 
 VkResult anv_GetEventStatus(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_clflush(event);
+      __builtin_ia32_lfence();
+   }
+
+   return event->semaphore;
 }
 
 VkResult anv_SetEvent(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_SET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_sfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
 }
 
 VkResult anv_ResetEvent(
-    VkDevice                                    device,
-    VkEvent                                     event)
+    VkDevice                                    _device,
+    VkEvent                                     _event)
 {
-   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+   ANV_FROM_HANDLE(anv_device, device, _device);
+   ANV_FROM_HANDLE(anv_event, event, _event);
+
+   event->semaphore = VK_EVENT_RESET;
+
+   if (!device->info.has_llc) {
+      /* Make sure the writes we're flushing have landed. */
+      __builtin_ia32_sfence();
+      __builtin_ia32_clflush(event);
+   }
+
+   return VK_SUCCESS;
 }
 
 // Buffer functions
@@ -1369,6 +1558,7 @@ VkResult anv_CreateBuffer(
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
    buffer->size = pCreateInfo->size;
+   buffer->usage = pCreateInfo->usage;
    buffer->bo = NULL;
    buffer->offset = 0;
 
@@ -1390,7 +1580,7 @@ void anv_DestroyBuffer(
 
 void
 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
-                              const struct anv_format *format,
+                              enum isl_format format,
                               uint32_t offset, uint32_t range, uint32_t stride)
 {
    switch (device->info.gen) {
@@ -1411,23 +1601,6 @@ anv_fill_buffer_surface_state(struct anv_device *device, void *state,
    }
 }
 
-VkResult anv_CreateBufferView(
-    VkDevice                                    _device,
-    const VkBufferViewCreateInfo*               pCreateInfo,
-    const VkAllocationCallbacks*                pAllocator,
-    VkBufferView*                               pView)
-{
-   stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
-}
-
-void anv_DestroyBufferView(
-    VkDevice                                    _device,
-    VkBufferView                                _bview,
-    const VkAllocationCallbacks*                pAllocator)
-{
-   stub();
-}
-
 void anv_DestroySampler(
     VkDevice                                    _device,
     VkSampler                                   _sampler,