fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
} else if (device->info->gen == 7 && device->info->is_baytrail) {
fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
- } else if (device->info->gen == 9 && !device->info->is_broxton) {
- fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
- } else if (device->info->gen == 9 && device->info->is_broxton) {
- fprintf(stderr, "WARNING: Broxton Vulkan support is incomplete\n");
- } else if (device->info->gen == 8) {
- /* Broadwell/Cherryview is as fully supported as anything */
+ } else if (device->info->gen >= 8) {
+ /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
+ * supported as anything */
} else {
result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", device->name);
anv_finishme("Get correct values for VkPhysicalDeviceLimits");
+ const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
+
VkSampleCountFlags sample_counts =
VK_SAMPLE_COUNT_1_BIT |
VK_SAMPLE_COUNT_2_BIT |
.maxViewportDimensions = { (1 << 14), (1 << 14) },
.viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
.viewportSubPixelBits = 13, /* We take a float? */
- .minMemoryMapAlignment = 64, /* A cache line */
+ .minMemoryMapAlignment = 4096, /* A page */
.minTexelBufferOffsetAlignment = 1,
.minUniformBufferOffsetAlignment = 1,
.minStorageBufferOffsetAlignment = 1,
.sampledImageStencilSampleCounts = sample_counts,
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
- .timestampPeriod = 80.0 / (1000 * 1000 * 1000),
+ .timestampPeriod = time_stamp_base / (1000 * 1000 * 1000),
.maxClipDistances = 0 /* FIXME */,
.maxCullDistances = 0 /* FIXME */,
.maxCombinedClipAndCullDistances = 0 /* FIXME */,
VK_QUEUE_COMPUTE_BIT |
VK_QUEUE_TRANSFER_BIT,
.queueCount = 1,
- .timestampValidBits = 0, /* XXX: Real value here */
+ .timestampValidBits = 36, /* XXX: Real value here */
.minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
};
}
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
} else {
/* The spec requires that we expose a host-visible, coherent memory
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
}
VkDevice* pDevice)
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+ VkResult result;
struct anv_device *device;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
- if (device->fd == -1)
+ if (device->fd == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_device;
+ }
device->context_id = anv_gem_create_context(device);
- if (device->context_id == -1)
+ if (device->context_id == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
+ }
+
+ device->info = *physical_device->info;
+ device->isl_dev = physical_device->isl_dev;
pthread_mutex_init(&device->mutex, NULL);
anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
- anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
+ anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 8192);
+ anv_block_pool_init(&device->instruction_block_pool, device, 64 * 1024);
+ anv_pipeline_cache_init(&device->default_pipeline_cache, device);
+
anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
anv_state_pool_init(&device->surface_state_pool,
anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
- device->info = *physical_device->info;
- device->isl_dev = physical_device->isl_dev;
-
anv_queue_init(device, &device->queue);
- anv_device_init_meta(device);
+ result = anv_device_init_meta(device);
+ if (result != VK_SUCCESS)
+ goto fail_fd;
anv_device_init_border_colors(device);
fail_device:
anv_free(&device->alloc, device);
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ return result;
}
void anv_DestroyDevice(
close(device->fd);
+ pthread_mutex_destroy(&device->mutex);
+
anv_free(&device->alloc, device);
}
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- result = anv_bo_init_new(&mem->bo, device, pAllocateInfo->allocationSize);
+ /* The kernel is going to give us whole pages anyway */
+ uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+
+ result = anv_bo_init_new(&mem->bo, device, alloc_size);
if (result != VK_SUCCESS)
goto fail;
if (!device->info.has_llc && mem->type_index == 0)
gem_flags |= I915_MMAP_WC;
- mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size, gem_flags);
- mem->map_size = size;
+ /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
+ uint64_t map_offset = offset & ~4095ull;
+ assert(offset >= map_offset);
+ uint64_t map_size = (offset + size) - map_offset;
+
+ /* Let's map whole pages */
+ map_size = align_u64(map_size, 4096);
+
+ mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
+ map_offset, map_size, gem_flags);
+ mem->map_size = map_size;
- *ppData = mem->map;
+ *ppData = mem->map + (offset - map_offset);
return VK_SUCCESS;
}
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore)
{
+ /* The DRM execbuffer ioctl always execute in-oder, even between different
+ * rings. As such, there's nothing to do for the user space semaphore.
+ */
+
*pSemaphore = (VkSemaphore)1;
- stub_return(VK_SUCCESS);
+
+ return VK_SUCCESS;
}
void anv_DestroySemaphore(
VkSemaphore semaphore,
const VkAllocationCallbacks* pAllocator)
{
- stub();
}
// Event functions