fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
} else if (device->info->gen == 7 && device->info->is_baytrail) {
fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
- } else if (device->info->gen == 9 && !device->info->is_broxton) {
- fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
- } else if (device->info->gen == 9 && device->info->is_broxton) {
- fprintf(stderr, "WARNING: Broxton Vulkan support is incomplete\n");
- } else if (device->info->gen == 8) {
- /* Broadwell/Cherryview is as fully supported as anything */
+ } else if (device->info->gen >= 8) {
+ /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
+ * supported as anything */
} else {
result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", device->name);
goto fail;
}
- if (anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION < 1)) {
+ if (!device->info->has_llc &&
+ anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
"kernel missing wc mmap");
goto fail;
anv_finishme("Get correct values for VkPhysicalDeviceLimits");
+ const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
+
VkSampleCountFlags sample_counts =
VK_SAMPLE_COUNT_1_BIT |
VK_SAMPLE_COUNT_2_BIT |
.maxViewportDimensions = { (1 << 14), (1 << 14) },
.viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
.viewportSubPixelBits = 13, /* We take a float? */
- .minMemoryMapAlignment = 64, /* A cache line */
+ .minMemoryMapAlignment = 4096, /* A page */
.minTexelBufferOffsetAlignment = 1,
.minUniformBufferOffsetAlignment = 1,
.minStorageBufferOffsetAlignment = 1,
.sampledImageStencilSampleCounts = sample_counts,
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
- .timestampPeriod = 80.0 / (1000 * 1000 * 1000),
+ .timestampPeriod = time_stamp_base / (1000 * 1000 * 1000),
.maxClipDistances = 0 /* FIXME */,
.maxCullDistances = 0 /* FIXME */,
.maxCombinedClipAndCullDistances = 0 /* FIXME */,
VK_QUEUE_COMPUTE_BIT |
VK_QUEUE_TRANSFER_BIT,
.queueCount = 1,
- .timestampValidBits = 0, /* XXX: Real value here */
+ .timestampValidBits = 36, /* XXX: Real value here */
.minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
};
}
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
} else {
/* The spec requires that we expose a host-visible, coherent memory
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
.propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 1,
+ .heapIndex = 0,
};
}
VkDevice* pDevice)
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
+ VkResult result;
struct anv_device *device;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
- if (device->fd == -1)
+ if (device->fd == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_device;
+ }
device->context_id = anv_gem_create_context(device);
- if (device->context_id == -1)
+ if (device->context_id == -1) {
+ result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
+ }
+
+ device->info = *physical_device->info;
+ device->isl_dev = physical_device->isl_dev;
pthread_mutex_init(&device->mutex, NULL);
anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
- anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
+ anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 4096);
+ anv_block_pool_init(&device->instruction_block_pool, device, 64 * 1024);
+ anv_pipeline_cache_init(&device->default_pipeline_cache, device);
+
anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
anv_state_pool_init(&device->surface_state_pool,
anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
- device->info = *physical_device->info;
- device->isl_dev = physical_device->isl_dev;
-
anv_queue_init(device, &device->queue);
- anv_device_init_meta(device);
+ result = anv_device_init_meta(device);
+ if (result != VK_SUCCESS)
+ goto fail_fd;
anv_device_init_border_colors(device);
fail_device:
anv_free(&device->alloc, device);
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ return result;
}
void anv_DestroyDevice(
close(device->fd);
+ pthread_mutex_destroy(&device->mutex);
+
anv_free(&device->alloc, device);
}
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+ if (pAllocateInfo->allocationSize == 0) {
+ /* Apparently, this is allowed */
+ *pMem = VK_NULL_HANDLE;
+ return VK_SUCCESS;
+ }
+
/* We support exactly one memory heap. */
assert(pAllocateInfo->memoryTypeIndex == 0 ||
(!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- result = anv_bo_init_new(&mem->bo, device, pAllocateInfo->allocationSize);
+ /* The kernel is going to give us whole pages anyway */
+ uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
+
+ result = anv_bo_init_new(&mem->bo, device, alloc_size);
if (result != VK_SUCCESS)
goto fail;
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
+ if (mem == NULL)
+ return;
+
if (mem->bo.map)
anv_gem_munmap(mem->bo.map, mem->bo.size);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+ if (mem == NULL) {
+ *ppData = NULL;
+ return VK_SUCCESS;
+ }
+
/* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
* takes a VkDeviceMemory pointer, it seems like only one map of the memory
* at a time is valid. We could just mmap up front and return an offset
if (!device->info.has_llc && mem->type_index == 0)
gem_flags |= I915_MMAP_WC;
- mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size, gem_flags);
- mem->map_size = size;
+ /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
+ uint64_t map_offset = offset & ~4095ull;
+ assert(offset >= map_offset);
+ uint64_t map_size = (offset + size) - map_offset;
- *ppData = mem->map;
+ /* Let's map whole pages */
+ map_size = align_u64(map_size, 4096);
+
+ mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
+ map_offset, map_size, gem_flags);
+ mem->map_size = map_size;
+
+ *ppData = mem->map + (offset - map_offset);
return VK_SUCCESS;
}
{
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+ if (mem == NULL)
+ return;
+
anv_gem_munmap(mem->map, mem->map_size);
}
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- buffer->bo = &mem->bo;
- buffer->offset = memoryOffset;
+ if (mem) {
+ buffer->bo = &mem->bo;
+ buffer->offset = memoryOffset;
+ } else {
+ buffer->bo = NULL;
+ buffer->offset = 0;
+ }
return VK_SUCCESS;
}
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
ANV_FROM_HANDLE(anv_image, image, _image);
- image->bo = &mem->bo;
- image->offset = memoryOffset;
+ if (mem) {
+ image->bo = &mem->bo;
+ image->offset = memoryOffset;
+ } else {
+ image->bo = NULL;
+ image->offset = 0;
+ }
return VK_SUCCESS;
}
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore)
{
+ /* The DRM execbuffer ioctl always execute in-oder, even between different
+ * rings. As such, there's nothing to do for the user space semaphore.
+ */
+
*pSemaphore = (VkSemaphore)1;
- stub_return(VK_SUCCESS);
+
+ return VK_SUCCESS;
}
void anv_DestroySemaphore(
VkSemaphore semaphore,
const VkAllocationCallbacks* pAllocator)
{
- stub();
}
// Event functions
VkResult anv_CreateEvent(
- VkDevice device,
+ VkDevice _device,
const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkEvent* pEvent)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_state state;
+ struct anv_event *event;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+ state = anv_state_pool_alloc(&device->dynamic_state_pool,
+ sizeof(*event), 4);
+ event = state.map;
+ event->state = state;
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ *pEvent = anv_event_to_handle(event);
+
+ return VK_SUCCESS;
}
void anv_DestroyEvent(
- VkDevice device,
- VkEvent event,
+ VkDevice _device,
+ VkEvent _event,
const VkAllocationCallbacks* pAllocator)
{
- stub();
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ anv_state_pool_free(&device->dynamic_state_pool, event->state);
}
VkResult anv_GetEventStatus(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_clflush(event);
+ __builtin_ia32_lfence();
+ }
+
+ return event->semaphore;
}
VkResult anv_SetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_SET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
VkResult anv_ResetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
// Buffer functions
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->size = pCreateInfo->size;
+ buffer->usage = pCreateInfo->usage;
buffer->bo = NULL;
buffer->offset = 0;
void
anv_fill_buffer_surface_state(struct anv_device *device, void *state,
- const struct anv_format *format,
+ enum isl_format format,
uint32_t offset, uint32_t range, uint32_t stride)
{
switch (device->info.gen) {