fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
} else if (device->info->gen == 7 && !device->info->is_baytrail) {
fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
- } else if (device->info->gen == 9) {
+ } else if (device->info->gen == 7 && device->info->is_baytrail) {
+ fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
+ } else if (device->info->gen == 9 && !device->info->is_broxton) {
fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
- } else if (device->info->gen == 8 && !device->info->is_cherryview) {
- /* Broadwell is as fully supported as anything */
+ } else if (device->info->gen == 9 && device->info->is_broxton) {
+ fprintf(stderr, "WARNING: Broxton Vulkan support is incomplete\n");
+ } else if (device->info->gen == 8) {
+ /* Broadwell/Cherryview is as fully supported as anything */
} else {
result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", device->name);
goto fail;
}
- if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
+ if (!device->info->has_llc &&
+ anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
- "non-llc gpu");
+ "kernel missing wc mmap");
goto fail;
}
static const VkExtensionProperties global_extensions[] = {
{
- .extensionName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
- .specVersion = 17,
+ .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
+ .specVersion = 24,
},
+ {
+ .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
+ .specVersion = 5,
+ },
+#ifdef HAVE_WAYLAND_PLATFORM
+ {
+ .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
+ .specVersion = 4,
+ },
+#endif
};
static const VkExtensionProperties device_extensions[] = {
{
- .extensionName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
- .specVersion = 53,
+ .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+ .specVersion = 67,
},
};
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
- if (pCreateInfo->pApplicationInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
+ if (pCreateInfo->pApplicationInfo->apiVersion != VK_MAKE_VERSION(0, 210, 1))
return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) {
.geometryShader = true,
.tessellationShader = false,
.sampleRateShading = false,
- .dualSourceBlend = true,
+ .dualSrcBlend = true,
.logicOp = true,
.multiDrawIndirect = true,
- .depthClip = false,
+ .depthClamp = false,
.depthBiasClamp = false,
.fillModeNonSolid = true,
.depthBounds = false,
.wideLines = true,
.largePoints = true,
+ .alphaToOne = true,
+ .multiViewport = true,
+ .samplerAnisotropy = false, /* FINISHME */
.textureCompressionETC2 = true,
.textureCompressionASTC_LDR = true,
.textureCompressionBC = true,
- .occlusionQueryNonConservative = false, /* FINISHME */
+ .occlusionQueryPrecise = false, /* FINISHME */
.pipelineStatisticsQuery = true,
- .vertexSideEffects = false,
- .tessellationSideEffects = false,
- .geometrySideEffects = false,
- .fragmentSideEffects = false,
- .shaderTessellationPointSize = false,
- .shaderGeometryPointSize = true,
+ .vertexPipelineStoresAndAtomics = false,
+ .fragmentStoresAndAtomics = true,
+ .shaderTessellationAndGeometryPointSize = true,
.shaderImageGatherExtended = true,
.shaderStorageImageExtendedFormats = false,
.shaderStorageImageMultisample = false,
.shaderSampledImageArrayDynamicIndexing = false,
.shaderStorageBufferArrayDynamicIndexing = false,
.shaderStorageImageArrayDynamicIndexing = false,
+ .shaderStorageImageReadWithoutFormat = false,
+ .shaderStorageImageWriteWithoutFormat = true,
.shaderClipDistance = false,
.shaderCullDistance = false,
.shaderFloat64 = false,
.shaderInt64 = false,
.shaderInt16 = false,
.alphaToOne = true,
+ .variableMultisampleRate = false,
};
}
anv_finishme("Get correct values for VkPhysicalDeviceLimits");
+ VkSampleCountFlags sample_counts =
+ VK_SAMPLE_COUNT_1_BIT |
+ VK_SAMPLE_COUNT_2_BIT |
+ VK_SAMPLE_COUNT_4_BIT |
+ VK_SAMPLE_COUNT_8_BIT;
+
VkPhysicalDeviceLimits limits = {
.maxImageDimension1D = (1 << 14),
.maxImageDimension2D = (1 << 14),
.maxImageDimension3D = (1 << 10),
.maxImageDimensionCube = (1 << 14),
.maxImageArrayLayers = (1 << 10),
-
- /* Broadwell supports 1, 2, 4, and 8 samples. */
- .sampleCounts = 4,
-
- .maxTexelBufferSize = (1 << 14),
- .maxUniformBufferSize = UINT32_MAX,
- .maxStorageBufferSize = UINT32_MAX,
+ .maxTexelBufferElements = (1 << 14),
+ .maxUniformBufferRange = UINT32_MAX,
+ .maxStorageBufferRange = UINT32_MAX,
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
.maxMemoryAllocationCount = UINT32_MAX,
+ .maxSamplerAllocationCount = UINT32_MAX,
.bufferImageGranularity = 64, /* A cache line */
.sparseAddressSpaceSize = 0,
.maxBoundDescriptorSets = MAX_SETS,
- .maxDescriptorSets = UINT32_MAX,
.maxPerStageDescriptorSamplers = 64,
.maxPerStageDescriptorUniformBuffers = 64,
.maxPerStageDescriptorStorageBuffers = 64,
.maxPerStageDescriptorSampledImages = 64,
.maxPerStageDescriptorStorageImages = 64,
+ .maxPerStageDescriptorInputAttachments = 64,
+ .maxPerStageResources = 128,
.maxDescriptorSetSamplers = 256,
.maxDescriptorSetUniformBuffers = 256,
.maxDescriptorSetUniformBuffersDynamic = 256,
.maxDescriptorSetStorageBuffersDynamic = 256,
.maxDescriptorSetSampledImages = 256,
.maxDescriptorSetStorageImages = 256,
+ .maxDescriptorSetInputAttachments = 256,
.maxVertexInputAttributes = 32,
.maxVertexInputBindings = 32,
.maxVertexInputAttributeOffset = 256,
.maxVertexInputBindingStride = 256,
.maxVertexOutputComponents = 32,
- .maxTessGenLevel = 0,
- .maxTessPatchSize = 0,
- .maxTessControlPerVertexInputComponents = 0,
- .maxTessControlPerVertexOutputComponents = 0,
- .maxTessControlPerPatchOutputComponents = 0,
- .maxTessControlTotalOutputComponents = 0,
- .maxTessEvaluationInputComponents = 0,
- .maxTessEvaluationOutputComponents = 0,
+ .maxTessellationGenerationLevel = 0,
+ .maxTessellationPatchSize = 0,
+ .maxTessellationControlPerVertexInputComponents = 0,
+ .maxTessellationControlPerVertexOutputComponents = 0,
+ .maxTessellationControlPerPatchOutputComponents = 0,
+ .maxTessellationControlTotalOutputComponents = 0,
+ .maxTessellationEvaluationInputComponents = 0,
+ .maxTessellationEvaluationOutputComponents = 0,
.maxGeometryShaderInvocations = 6,
.maxGeometryInputComponents = 16,
.maxGeometryOutputComponents = 16,
.maxGeometryOutputVertices = 16,
.maxGeometryTotalOutputComponents = 16,
.maxFragmentInputComponents = 16,
- .maxFragmentOutputBuffers = 8,
- .maxFragmentDualSourceBuffers = 2,
+ .maxFragmentOutputAttachments = 8,
+ .maxFragmentDualSrcAttachments = 2,
.maxFragmentCombinedOutputResources = 8,
.maxComputeSharedMemorySize = 1024,
.maxComputeWorkGroupCount = {
.subTexelPrecisionBits = 4 /* FIXME */,
.mipmapPrecisionBits = 4 /* FIXME */,
.maxDrawIndexedIndexValue = UINT32_MAX,
- .maxDrawIndirectInstanceCount = UINT32_MAX,
- .primitiveRestartForPatches = UINT32_MAX,
+ .maxDrawIndirectCount = UINT32_MAX,
.maxSamplerLodBias = 16,
.maxSamplerAnisotropy = 16,
.maxViewports = MAX_VIEWPORTS,
.maxFramebufferWidth = (1 << 14),
.maxFramebufferHeight = (1 << 14),
.maxFramebufferLayers = (1 << 10),
- .maxFramebufferColorSamples = 8,
- .maxFramebufferDepthSamples = 8,
- .maxFramebufferStencilSamples = 8,
+ .framebufferColorSampleCounts = sample_counts,
+ .framebufferDepthSampleCounts = sample_counts,
+ .framebufferStencilSampleCounts = sample_counts,
+ .framebufferNoAttachmentsSampleCounts = sample_counts,
.maxColorAttachments = MAX_RTS,
- .maxSampledImageColorSamples = 8,
- .maxSampledImageDepthSamples = 8,
- .maxSampledImageIntegerSamples = 1,
- .maxStorageImageSamples = 1,
+ .sampledImageColorSampleCounts = sample_counts,
+ .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
+ .sampledImageDepthSampleCounts = sample_counts,
+ .sampledImageStencilSampleCounts = sample_counts,
+ .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
- .timestampFrequency = 1000 * 1000 * 1000 / 80,
+ .timestampPeriod = 80.0 / (1000 * 1000 * 1000),
.maxClipDistances = 0 /* FIXME */,
.maxCullDistances = 0 /* FIXME */,
.maxCombinedClipAndCullDistances = 0 /* FIXME */,
+ .discreteQueuePriorities = 1,
.pointSizeRange = { 0.125, 255.875 },
.lineWidthRange = { 0.0, 7.9921875 },
.pointSizeGranularity = (1.0 / 8.0),
.lineWidthGranularity = (1.0 / 128.0),
+ .strictLines = false, /* FINISHME */
+ .standardSampleLocations = true, /* FINISHME */
+ .optimalBufferCopyOffsetAlignment = 128,
+ .optimalBufferCopyRowPitchAlignment = 128,
+ .nonCoherentAtomSize = 64,
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(0, 170, 2),
+ .apiVersion = VK_MAKE_VERSION(0, 210, 1),
.driverVersion = 1,
.vendorID = 0x8086,
.deviceID = pdevice->chipset_id,
VK_QUEUE_COMPUTE_BIT |
VK_QUEUE_TRANSFER_BIT,
.queueCount = 1,
- .supportsTimestamps = true,
+ .timestampValidBits = 0, /* XXX: Real value here */
+ .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
};
}
*/
heap_size = 3 * physical_device->aperture_size / 4;
- /* The property flags below are valid only for llc platforms. */
- pMemoryProperties->memoryTypeCount = 1;
- pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 1,
- };
+ if (physical_device->info->has_llc) {
+ /* Big core GPUs share LLC with the CPU and thus one memory type can be
+ * both cached and coherent at the same time.
+ */
+ pMemoryProperties->memoryTypeCount = 1;
+ pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = 1,
+ };
+ } else {
+ /* The spec requires that we expose a host-visible, coherent memory
+ * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+ * to give the application a choice between cached, but not coherent and
+ * coherent but uncached (WC though).
+ */
+ pMemoryProperties->memoryTypeCount = 2;
+ pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = 1,
+ };
+ pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = 1,
+ };
+ }
pMemoryProperties->memoryHeapCount = 1;
pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
{
}
+static struct anv_state
+anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
+{
+ struct anv_state state;
+
+ state = anv_state_pool_alloc(pool, size, align);
+ memcpy(state.map, p, size);
+
+ if (!pool->block_pool->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state;
+}
+
static void
anv_device_init_border_colors(struct anv_device *device)
{
[VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
};
- device->border_colors =
- anv_state_pool_alloc(&device->dynamic_state_pool,
- sizeof(border_colors), 32);
- memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
+ device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
+ sizeof(border_colors), 32, border_colors);
}
VkResult anv_CreateDevice(
if (device->context_id == -1)
goto fail_fd;
+ device->info = *physical_device->info;
+ device->isl_dev = physical_device->isl_dev;
+
pthread_mutex_init(&device->mutex, NULL);
anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 4096);
+ anv_block_pool_init(&device->instruction_block_pool, device, 8192);
anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
anv_state_pool_init(&device->surface_state_pool,
anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
- device->info = *physical_device->info;
- device->isl_dev = physical_device->isl_dev;
-
anv_queue_init(device, &device->queue);
anv_device_init_meta(device);
VkResult anv_QueueSubmit(
VkQueue _queue,
- uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers,
+ uint32_t submitCount,
+ const VkSubmitInfo* pSubmits,
VkFence _fence)
{
ANV_FROM_HANDLE(anv_queue, queue, _queue);
struct anv_device *device = queue->device;
int ret;
- for (uint32_t i = 0; i < commandBufferCount; i++) {
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
-
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ for (uint32_t i = 0; i < submitCount; i++) {
+ for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
+ pSubmits[i].pCommandBuffers[j]);
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
- if (ret != 0) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "execbuf2 failed: %m");
- }
-
- if (fence) {
- ret = anv_gem_execbuffer(device, &fence->execbuf);
+ ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
if (ret != 0) {
/* We don't know the real error. */
return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
"execbuf2 failed: %m");
}
- }
- for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
- cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
+ if (fence) {
+ ret = anv_gem_execbuffer(device, &fence->execbuf);
+ if (ret != 0) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "execbuf2 failed: %m");
+ }
+ }
+
+ for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
+ cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
+ }
}
return VK_SUCCESS;
anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
anv_batch_emit(&batch, GEN7_MI_NOOP);
+ if (!device->info.has_llc)
+ anv_state_clflush(state);
+
exec2_objects[0].handle = bo->gem_handle;
exec2_objects[0].relocation_count = 0;
exec2_objects[0].relocs_ptr = 0;
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
+ if (pAllocateInfo->allocationSize == 0) {
+ /* Apparently, this is allowed */
+ *pMem = VK_NULL_HANDLE;
+ return VK_SUCCESS;
+ }
+
/* We support exactly one memory heap. */
- assert(pAllocateInfo->memoryTypeIndex == 0);
+ assert(pAllocateInfo->memoryTypeIndex == 0 ||
+ (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
/* FINISHME: Fail if allocation request exceeds heap size. */
if (result != VK_SUCCESS)
goto fail;
+ mem->type_index = pAllocateInfo->memoryTypeIndex;
+
*pMem = anv_device_memory_to_handle(mem);
return VK_SUCCESS;
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
+ if (mem == NULL)
+ return;
+
if (mem->bo.map)
anv_gem_munmap(mem->bo.map, mem->bo.size);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+ if (mem == NULL) {
+ *ppData = NULL;
+ return VK_SUCCESS;
+ }
+
/* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
* takes a VkDeviceMemory pointer, it seems like only one map of the memory
* at a time is valid. We could just mmap up front and return an offset
* pointer here, but that may exhaust virtual memory on 32 bit
* userspace. */
- mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
+ uint32_t gem_flags = 0;
+ if (!device->info.has_llc && mem->type_index == 0)
+ gem_flags |= I915_MMAP_WC;
+
+ mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size, gem_flags);
mem->map_size = size;
*ppData = mem->map;
{
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
+ if (mem == NULL)
+ return;
+
anv_gem_munmap(mem->map, mem->map_size);
}
+static void
+clflush_mapped_ranges(struct anv_device *device,
+ uint32_t count,
+ const VkMappedMemoryRange *ranges)
+{
+ for (uint32_t i = 0; i < count; i++) {
+ ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
+ void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
+ void *end = mem->map + ranges[i].offset + ranges[i].size;
+
+ while (p < end) {
+ __builtin_ia32_clflush(p);
+ p += CACHELINE_SIZE;
+ }
+ }
+}
+
VkResult anv_FlushMappedMemoryRanges(
- VkDevice device,
+ VkDevice _device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges)
{
- /* clflush here for !llc platforms */
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ if (device->info.has_llc)
+ return VK_SUCCESS;
+
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+
+ clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
return VK_SUCCESS;
}
VkResult anv_InvalidateMappedMemoryRanges(
- VkDevice device,
+ VkDevice _device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges)
{
- return anv_FlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ if (device->info.has_llc)
+ return VK_SUCCESS;
+
+ clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
+
+ /* Make sure no reads get moved up above the invalidate. */
+ __builtin_ia32_lfence();
+
+ return VK_SUCCESS;
}
void anv_GetBufferMemoryRequirements(
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
- buffer->bo = &mem->bo;
- buffer->offset = memoryOffset;
+ if (mem) {
+ buffer->bo = &mem->bo;
+ buffer->offset = memoryOffset;
+ } else {
+ buffer->bo = NULL;
+ buffer->offset = 0;
+ }
return VK_SUCCESS;
}
ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
ANV_FROM_HANDLE(anv_image, image, _image);
- image->bo = &mem->bo;
- image->offset = memoryOffset;
+ if (mem) {
+ image->bo = &mem->bo;
+ image->offset = memoryOffset;
+ } else {
+ image->bo = NULL;
+ image->offset = 0;
+ }
return VK_SUCCESS;
}
goto fail;
fence->bo.map =
- anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
+ anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size, 0);
batch.next = batch.start = fence->bo.map;
batch.end = fence->bo.map + fence->bo.size;
anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
anv_batch_emit(&batch, GEN7_MI_NOOP);
+ if (!device->info.has_llc) {
+ assert(((uintptr_t) fence->bo.map & CACHELINE_MASK) == 0);
+ assert(batch.next - fence->bo.map <= CACHELINE_SIZE);
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(fence->bo.map);
+ }
+
fence->exec2_objects[0].handle = fence->bo.gem_handle;
fence->exec2_objects[0].relocation_count = 0;
fence->exec2_objects[0].relocs_ptr = 0;
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore)
{
+ /* The DRM execbuffer ioctl always execute in-oder, even between different
+ * rings. As such, there's nothing to do for the user space semaphore.
+ */
+
*pSemaphore = (VkSemaphore)1;
- stub_return(VK_SUCCESS);
+
+ return VK_SUCCESS;
}
void anv_DestroySemaphore(
VkSemaphore semaphore,
const VkAllocationCallbacks* pAllocator)
{
- stub();
}
// Event functions
VkResult anv_CreateEvent(
- VkDevice device,
+ VkDevice _device,
const VkEventCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkEvent* pEvent)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_state state;
+ struct anv_event *event;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
+
+ state = anv_state_pool_alloc(&device->dynamic_state_pool,
+ sizeof(*event), 4);
+ event = state.map;
+ event->state = state;
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ *pEvent = anv_event_to_handle(event);
+
+ return VK_SUCCESS;
}
void anv_DestroyEvent(
- VkDevice device,
- VkEvent event,
+ VkDevice _device,
+ VkEvent _event,
const VkAllocationCallbacks* pAllocator)
{
- stub();
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ anv_state_pool_free(&device->dynamic_state_pool, event->state);
}
VkResult anv_GetEventStatus(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_clflush(event);
+ __builtin_ia32_lfence();
+ }
+
+ return event->semaphore;
}
VkResult anv_SetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_SET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
VkResult anv_ResetEvent(
- VkDevice device,
- VkEvent event)
+ VkDevice _device,
+ VkEvent _event)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_event, event, _event);
+
+ event->semaphore = VK_EVENT_RESET;
+
+ if (!device->info.has_llc) {
+ /* Make sure the writes we're flushing have landed. */
+ __builtin_ia32_sfence();
+ __builtin_ia32_clflush(event);
+ }
+
+ return VK_SUCCESS;
}
// Buffer functions
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->size = pCreateInfo->size;
+ buffer->usage = pCreateInfo->usage;
buffer->bo = NULL;
buffer->offset = 0;
void
anv_fill_buffer_surface_state(struct anv_device *device, void *state,
- const struct anv_format *format,
+ enum isl_format format,
uint32_t offset, uint32_t range, uint32_t stride)
{
switch (device->info.gen) {
}
}
-VkResult anv_CreateBufferView(
- VkDevice _device,
- const VkBufferViewCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkBufferView* pView)
-{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
-}
-
-void anv_DestroyBufferView(
- VkDevice _device,
- VkBufferView _bview,
- const VkAllocationCallbacks* pAllocator)
-{
- stub();
-}
-
void anv_DestroySampler(
VkDevice _device,
VkSampler _sampler,