anv: Soft-pin everything else
authorScott D Phillips <scott.d.phillips@intel.com>
Thu, 31 May 2018 03:16:30 +0000 (20:16 -0700)
committerJason Ekstrand <jason.ekstrand@intel.com>
Fri, 1 Jun 2018 21:27:13 +0000 (14:27 -0700)
v2 (Jason Ekstrand):
 - Break up Scott's mega-patch

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Scott D Phillips <scott.d.phillips@intel.com>
src/intel/vulkan/anv_allocator.c
src/intel/vulkan/anv_device.c
src/intel/vulkan/genX_query.c

index 97fa8c970354fa8dc4049a3ef7f4bfb6bc2649c4..ab01d46cbebdec86b2279fd9a5979ecf45ebb960 100644 (file)
@@ -1077,8 +1077,10 @@ anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool
    for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
       for (unsigned i = 0; i < 16; i++) {
          struct anv_scratch_bo *bo = &pool->bos[i][s];
-         if (bo->exists > 0)
+         if (bo->exists > 0) {
+            anv_vma_free(device, &bo->bo);
             anv_gem_close(device, bo->bo.gem_handle);
+         }
       }
    }
 }
@@ -1176,6 +1178,11 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
    if (device->instance->physicalDevice.has_exec_async)
       bo->bo.flags |= EXEC_OBJECT_ASYNC;
 
+   if (device->instance->physicalDevice.use_softpin)
+      bo->bo.flags |= EXEC_OBJECT_PINNED;
+
+   anv_vma_alloc(device, &bo->bo);
+
    /* Set the exists last because it may be read by other threads */
    __sync_synchronize();
    bo->exists = true;
index 1a2bde2df4d8d07b22c5176931ee77dfcf13d531..b02e1a2749e92de5109bd6eee02229f4a3d8a03b 100644 (file)
@@ -1658,6 +1658,12 @@ VkResult anv_CreateDevice(
    if (result != VK_SUCCESS)
       goto fail_binding_table_pool;
 
+   if (physical_device->use_softpin)
+      device->workaround_bo.flags |= EXEC_OBJECT_PINNED;
+
+   if (!anv_vma_alloc(device, &device->workaround_bo))
+      goto fail_workaround_bo;
+
    anv_device_init_trivial_batch(device);
 
    if (device->info.gen >= 10)
@@ -1756,6 +1762,7 @@ void anv_DestroyDevice(
    anv_scratch_pool_finish(device, &device->scratch_pool);
 
    anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+   anv_vma_free(device, &device->workaround_bo);
    anv_gem_close(device, device->workaround_bo.gem_handle);
 
    anv_vma_free(device, &device->trivial_batch_bo);
index de409be04edacf6f3770e294ffe3ba9542e58232..e35e9b858444fab2632603bd2a83eff3a4909036 100644 (file)
@@ -94,9 +94,14 @@ VkResult genX(CreateQueryPool)(
    if (pdevice->supports_48bit_addresses)
       pool->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 
+   if (pdevice->use_softpin)
+      pool->bo.flags |= EXEC_OBJECT_PINNED;
+
    if (pdevice->has_exec_async)
       pool->bo.flags |= EXEC_OBJECT_ASYNC;
 
+   anv_vma_alloc(device, &pool->bo);
+
    /* For query pools, we set the caching mode to I915_CACHING_CACHED.  On LLC
     * platforms, this does nothing.  On non-LLC platforms, this means snooping
     * which comes at a slight cost.  However, the buffers aren't big, won't be
@@ -129,6 +134,7 @@ void genX(DestroyQueryPool)(
       return;
 
    anv_gem_munmap(pool->bo.map, pool->bo.size);
+   anv_vma_free(device, &pool->bo);
    anv_gem_close(device, pool->bo.gem_handle);
    vk_free2(&device->alloc, pAllocator, pool);
 }