va_start(args, fmt);
if (unlikely(INTEL_DEBUG & DEBUG_PERF))
- vfprintf(stderr, fmt, args);
+ intel_logd_v(fmt, args);
va_end(args);
}
static VkResult
anv_physical_device_init_uuids(struct anv_physical_device *device)
{
- const struct build_id_note *note = build_id_find_nhdr("libvulkan_intel.so");
+ const struct build_id_note *note =
+ build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
if (!note) {
return vk_errorf(device->instance, device,
VK_ERROR_INITIALIZATION_FAILED,
}
if (device->info.is_haswell) {
- fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
+ intel_logw("Haswell Vulkan support is incomplete");
} else if (device->info.gen == 7 && !device->info.is_baytrail) {
- fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
+ intel_logw("Ivy Bridge Vulkan support is incomplete");
} else if (device->info.gen == 7 && device->info.is_baytrail) {
- fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
+ intel_logw("Bay Trail Vulkan support is incomplete");
} else if (device->info.gen >= 8 && device->info.gen <= 9) {
- /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
- * supported as anything */
+ /* Broadwell, Cherryview, Skylake, Broxton, Kabylake, Coffelake is as
+ * fully supported as anything */
+ } else if (device->info.gen == 10) {
+ intel_logw("Cannonlake Vulkan support is alpha");
} else {
result = vk_errorf(device->instance, device,
VK_ERROR_INCOMPATIBLE_DRIVER,
* many platforms, but otherwise, things will just work.
*/
if (device->subslice_total < 1 || device->eu_total < 1) {
- fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
- " query GPU properties.\n");
+ intel_logw("Kernel 4.1 required to properly query GPU properties");
}
} else if (device->info.gen == 7) {
device->subslice_total = 1 << (device->info.gt - 1);
}
device->compiler->shader_debug_log = compiler_debug_log;
device->compiler->shader_perf_log = compiler_perf_log;
+ device->compiler->supports_pull_constants = false;
isl_device_init(&device->isl_dev, &device->info, swizzled);
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR: {
+ VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *features =
+ (VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *) ext;
+ features->samplerYcbcrConversion = true;
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
+ VkPhysicalDevicePointClippingPropertiesKHR *properties =
+ (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
+ properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
+ anv_finishme("Implement pop-free point clipping");
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
result = anv_bo_cache_import(device, &device->bo_cache,
- fd_info->fd, pAllocateInfo->allocationSize,
- &mem->bo);
+ fd_info->fd, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
+
+ VkDeviceSize aligned_alloc_size =
+ align_u64(pAllocateInfo->allocationSize, 4096);
+
+ /* For security purposes, we reject importing the bo if it's smaller
+ * than the requested allocation size. This prevents a malicious client
+ * from passing a buffer to a trusted client, lying about the size, and
+ * telling the trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ */
+ if (mem->bo->size < aligned_alloc_size) {
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ "aligned allocationSize too large for "
+ "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: "
+ "%"PRIu64"B > %"PRIu64"B",
+ aligned_alloc_size, mem->bo->size);
+ anv_bo_cache_release(device, &device->bo_cache, mem->bo);
+ goto fail;
+ }
+
+ /* From the Vulkan spec:
+ *
+ * "Importing memory from a file descriptor transfers ownership of
+ * the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd_info->fd);
} else {
result = anv_bo_cache_alloc(device, &device->bo_cache,
pAllocateInfo->allocationSize,
anv_GetImageMemoryRequirements(_device, pInfo->image,
&pMemoryRequirements->memoryRequirements);
+ vk_foreach_struct_const(ext, pInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR: {
+ ANV_FROM_HANDLE(anv_image, image, pInfo->image);
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ const VkImagePlaneMemoryRequirementsInfoKHR *plane_reqs =
+ (const VkImagePlaneMemoryRequirementsInfoKHR *) ext;
+ uint32_t plane = anv_image_aspect_to_plane(image->aspects,
+ plane_reqs->planeAspect);
+
+ assert(image->planes[plane].offset == 0);
+
+ /* The Vulkan spec (git aaed022) says:
+ *
+ * memoryTypeBits is a bitfield and contains one bit set for every
+ * supported memory type for the resource. The bit `1<<i` is set
+ * if and only if the memory type `i` in the
+ * VkPhysicalDeviceMemoryProperties structure for the physical
+ * device is supported.
+ *
+ * All types are currently supported for images.
+ */
+ pMemoryRequirements->memoryRequirements.memoryTypeBits =
+ (1ull << pdevice->memory.type_count) - 1;
+
+ pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
+ pMemoryRequirements->memoryRequirements.alignment =
+ image->planes[plane].alignment;
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
+
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
*pCommittedMemoryInBytes = 0;
}
-VkResult anv_BindBufferMemory(
- VkDevice device,
- VkBuffer _buffer,
- VkDeviceMemory _memory,
- VkDeviceSize memoryOffset)
+static void
+anv_bind_buffer_memory(const VkBindBufferMemoryInfoKHR *pBindInfo)
{
- ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
- ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
+ ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
+
+ assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR);
if (mem) {
assert((buffer->usage & mem->type->valid_buffer_usage) == buffer->usage);
buffer->bo = mem->bo;
- buffer->offset = memoryOffset;
+ buffer->offset = pBindInfo->memoryOffset;
} else {
buffer->bo = NULL;
buffer->offset = 0;
}
+}
+
+VkResult anv_BindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ anv_bind_buffer_memory(
+ &(VkBindBufferMemoryInfoKHR) {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .buffer = buffer,
+ .memory = memory,
+ .memoryOffset = memoryOffset,
+ });
+
+ return VK_SUCCESS;
+}
+
+VkResult anv_BindBufferMemory2KHR(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR* pBindInfos)
+{
+ for (uint32_t i = 0; i < bindInfoCount; i++)
+ anv_bind_buffer_memory(&pBindInfos[i]);
return VK_SUCCESS;
}