#include <unistd.h>
#include <fcntl.h>
#include <xf86drm.h>
+#include <drm_fourcc.h>
#include "anv_private.h"
#include "util/strtod.h"
#include "util/debug.h"
#include "util/build_id.h"
#include "util/mesa-sha1.h"
-#include "util/vk_util.h"
+#include "vk_util.h"
+#include "common/gen_defines.h"
#include "genxml/gen7_pack.h"
va_start(args, fmt);
if (unlikely(INTEL_DEBUG & DEBUG_PERF))
- vfprintf(stderr, fmt, args);
+ intel_logd_v(fmt, args);
va_end(args);
}
/* If, for whatever reason, we can't actually get the GTT size from the
* kernel (too old?) fall back to the aperture size.
*/
- anv_perf_warn("Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
+ anv_perf_warn(NULL, NULL,
+ "Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
if (anv_gem_get_aperture(fd, >t_size) == -1) {
- return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ return vk_errorf(NULL, NULL, VK_ERROR_INITIALIZATION_FAILED,
"failed to get aperture size: %m");
}
}
return VK_SUCCESS;
}
+static VkResult
+anv_physical_device_init_heaps(struct anv_physical_device *device, int fd)
+{
+ /* The kernel query only tells us whether or not the kernel supports the
+ * EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and not whether or not the
+ * hardware has actual 48bit address support.
+ */
+ device->supports_48bit_addresses =
+ (device->info.gen >= 8) && anv_gem_supports_48b_addresses(fd);
+
+ uint64_t heap_size;
+ VkResult result = anv_compute_heap_size(fd, &heap_size);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (heap_size > (2ull << 30) && !device->supports_48bit_addresses) {
+ /* When running with an overridden PCI ID, we may get a GTT size from
+ * the kernel that is greater than 2 GiB but the execbuf check for 48bit
+ * address support can still fail. Just clamp the address space size to
+ * 2 GiB if we don't have 48-bit support.
+ */
+ intel_logw("%s:%d: The kernel reported a GTT size larger than 2 GiB but "
+ "not support for 48-bit addresses",
+ __FILE__, __LINE__);
+ heap_size = 2ull << 30;
+ }
+
+ if (heap_size <= 3ull * (1ull << 30)) {
+ /* In this case, everything fits nicely into the 32-bit address space,
+ * so there's no need for supporting 48bit addresses on client-allocated
+ * memory objects.
+ */
+ device->memory.heap_count = 1;
+ device->memory.heaps[0] = (struct anv_memory_heap) {
+ .size = heap_size,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .supports_48bit_addresses = false,
+ };
+ } else {
+ /* Not everything will fit nicely into a 32-bit address space. In this
+ * case we need a 64-bit heap. Advertise a small 32-bit heap and a
+ * larger 48-bit heap. If we're in this case, then we have a total heap
+ * size larger than 3GiB which most likely means they have 8 GiB of
+ * video memory and so carving off 1 GiB for the 32-bit heap should be
+ * reasonable.
+ */
+ const uint64_t heap_size_32bit = 1ull << 30;
+ const uint64_t heap_size_48bit = heap_size - heap_size_32bit;
+
+ assert(device->supports_48bit_addresses);
+
+ device->memory.heap_count = 2;
+ device->memory.heaps[0] = (struct anv_memory_heap) {
+ .size = heap_size_48bit,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .supports_48bit_addresses = true,
+ };
+ device->memory.heaps[1] = (struct anv_memory_heap) {
+ .size = heap_size_32bit,
+ .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
+ .supports_48bit_addresses = false,
+ };
+ }
+
+ uint32_t type_count = 0;
+ for (uint32_t heap = 0; heap < device->memory.heap_count; heap++) {
+ uint32_t valid_buffer_usage = ~0;
+
+ /* There appears to be a hardware issue in the VF cache where it only
+ * considers the bottom 32 bits of memory addresses. If you happen to
+ * have two vertex buffers which get placed exactly 4 GiB apart and use
+ * them in back-to-back draw calls, you can get collisions. In order to
+ * solve this problem, we require vertex and index buffers be bound to
+ * memory allocated out of the 32-bit heap.
+ */
+ if (device->memory.heaps[heap].supports_48bit_addresses) {
+ valid_buffer_usage &= ~(VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
+ }
+
+ if (device->info.has_llc) {
+ /* Big core GPUs share LLC with the CPU and thus one memory type can be
+ * both cached and coherent at the same time.
+ */
+ device->memory.types[type_count++] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = heap,
+ .valid_buffer_usage = valid_buffer_usage,
+ };
+ } else {
+ /* The spec requires that we expose a host-visible, coherent memory
+ * type, but Atom GPUs don't share LLC. Thus we offer two memory types
+ * to give the application a choice between cached, but not coherent and
+ * coherent but uncached (WC though).
+ */
+ device->memory.types[type_count++] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
+ .heapIndex = heap,
+ .valid_buffer_usage = valid_buffer_usage,
+ };
+ device->memory.types[type_count++] = (struct anv_memory_type) {
+ .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
+ .heapIndex = heap,
+ .valid_buffer_usage = valid_buffer_usage,
+ };
+ }
+ }
+ device->memory.type_count = type_count;
+
+ return VK_SUCCESS;
+}
+
static VkResult
anv_physical_device_init_uuids(struct anv_physical_device *device)
{
- const struct build_id_note *note = build_id_find_nhdr("libvulkan_intel.so");
+ const struct build_id_note *note =
+ build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
if (!note) {
- return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ return vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"Failed to find build-id");
}
unsigned build_id_len = build_id_length(note);
if (build_id_len < 20) {
- return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ return vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"build-id too short. It needs to be a SHA");
}
VkResult result;
int fd;
+ brw_process_intel_debug_variable();
+
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0)
return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
assert(strlen(path) < ARRAY_SIZE(device->path));
strncpy(device->path, path, ARRAY_SIZE(device->path));
- device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
- if (!device->chipset_id) {
- result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
- goto fail;
+ device->no_hw = getenv("INTEL_NO_HW") != NULL;
+
+ const int pci_id_override = gen_get_pci_device_id_override();
+ if (pci_id_override < 0) {
+ device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
+ if (!device->chipset_id) {
+ result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ goto fail;
+ }
+ } else {
+ device->chipset_id = pci_id_override;
+ device->no_hw = true;
}
device->name = gen_get_device_name(device->chipset_id);
}
if (device->info.is_haswell) {
- fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
+ intel_logw("Haswell Vulkan support is incomplete");
} else if (device->info.gen == 7 && !device->info.is_baytrail) {
- fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
+ intel_logw("Ivy Bridge Vulkan support is incomplete");
} else if (device->info.gen == 7 && device->info.is_baytrail) {
- fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
- } else if (device->info.gen >= 8) {
- /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
- * supported as anything */
+ intel_logw("Bay Trail Vulkan support is incomplete");
+ } else if (device->info.gen >= 8 && device->info.gen <= 10) {
+ /* Gen8-10 fully supported */
} else {
- result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", device->name);
goto fail;
}
device->cmd_parser_version =
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
if (device->cmd_parser_version == -1) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"failed to get command parser version");
goto fail;
}
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"kernel missing gem wait");
goto fail;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"kernel missing execbuf2");
goto fail;
}
if (!device->info.has_llc &&
anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INITIALIZATION_FAILED,
"kernel missing wc mmap");
goto fail;
}
- device->supports_48bit_addresses = anv_gem_supports_48b_addresses(fd);
-
- result = anv_compute_heap_size(fd, &device->heap_size);
+ result = anv_physical_device_init_heaps(device, fd);
if (result != VK_SUCCESS)
goto fail;
device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
+ device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
+ device->has_exec_fence = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE);
+ device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY);
+ device->has_syncobj_wait = device->has_syncobj &&
+ anv_gem_supports_syncobj_wait(fd);
+ device->has_context_priority = anv_gem_has_context_priority(fd);
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
+ /* Starting with Gen10, the timestamp frequency of the command streamer may
+ * vary from one part to another. We can query the value from the kernel.
+ */
+ if (device->info.gen >= 10) {
+ int timestamp_frequency =
+ anv_gem_get_param(fd, I915_PARAM_CS_TIMESTAMP_FREQUENCY);
+
+ if (timestamp_frequency < 0)
+ intel_logw("Kernel 4.16-rc1+ required to properly query CS timestamp frequency");
+ else
+ device->info.timestamp_frequency = timestamp_frequency;
+ }
+
/* GENs prior to 8 do not support EU/Subslice info */
if (device->info.gen >= 8) {
device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
* many platforms, but otherwise, things will just work.
*/
if (device->subslice_total < 1 || device->eu_total < 1) {
- fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
- " query GPU properties.\n");
+ intel_logw("Kernel 4.1 required to properly query GPU properties");
}
} else if (device->info.gen == 7) {
device->subslice_total = 1 << (device->info.gt - 1);
if (device->info.is_cherryview &&
device->subslice_total > 0 && device->eu_total > 0) {
- /* Logical CS threads = EUs per subslice * 7 threads per EU */
- uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
+ /* Logical CS threads = EUs per subslice * num threads per EU */
+ uint32_t max_cs_threads =
+ device->eu_total / device->subslice_total * device->info.num_thread_per_eu;
/* Fuse configurations may give more threads than expected, never less. */
if (max_cs_threads > device->info.max_cs_threads)
device->info.max_cs_threads = max_cs_threads;
}
- brw_process_intel_debug_variable();
-
device->compiler = brw_compiler_create(NULL, &device->info);
if (device->compiler == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
device->compiler->shader_debug_log = compiler_debug_log;
device->compiler->shader_perf_log = compiler_perf_log;
+ device->compiler->supports_pull_constants = false;
+ device->compiler->constant_buffer_0_is_relative = true;
isl_device_init(&device->isl_dev, &device->info, swizzled);
goto fail;
}
+ anv_physical_device_get_supported_extensions(device,
+ &device->supported_extensions);
+
device->local_fd = fd;
return VK_SUCCESS;
close(device->local_fd);
}
-static const VkExtensionProperties global_extensions[] = {
- {
- .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
- .specVersion = 25,
- },
-#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- {
- .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
- .specVersion = 5,
- },
-#endif
-#ifdef VK_USE_PLATFORM_XCB_KHR
- {
- .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
- .specVersion = 6,
- },
-#endif
-#ifdef VK_USE_PLATFORM_XLIB_KHR
- {
- .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
- .specVersion = 6,
- },
-#endif
- {
- .extensionName = VK_KHX_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
-static const VkExtensionProperties device_extensions[] = {
- {
- .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
- .specVersion = 68,
- },
- {
- .extensionName = VK_KHX_EXTERNAL_MEMORY_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
- .specVersion = 1,
- },
- {
- .extensionName = VK_KHX_MULTIVIEW_EXTENSION_NAME,
- .specVersion = 1,
- },
-};
-
static void *
default_alloc_func(void *pUserData, size_t size, size_t align,
VkSystemAllocationScope allocationScope)
.pfnFree = default_free_func,
};
+VkResult anv_EnumerateInstanceExtensionProperties(
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties)
+{
+ VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+
+ for (int i = 0; i < ANV_INSTANCE_EXTENSION_COUNT; i++) {
+ if (anv_instance_extensions_supported.extensions[i]) {
+ vk_outarray_append(&out, prop) {
+ *prop = anv_instance_extensions[i];
+ }
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
VkResult anv_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkInstance* pInstance)
{
struct anv_instance *instance;
+ VkResult result;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
+ /* Check if user passed a debug report callback to be used during
+ * Create/Destroy of instance.
+ */
+ const VkDebugReportCallbackCreateInfoEXT *ctor_cb =
+ vk_find_struct_const(pCreateInfo->pNext,
+ DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT);
+
uint32_t client_version;
if (pCreateInfo->pApplicationInfo &&
pCreateInfo->pApplicationInfo->apiVersion != 0) {
if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
- return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+
+ if (ctor_cb && ctor_cb->flags & VK_DEBUG_REPORT_ERROR_BIT_EXT)
+ ctor_cb->pfnCallback(VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
+ VK_NULL_HANDLE, /* No handle available yet. */
+ __LINE__,
+ 0,
+ "anv",
+ "incompatible driver version",
+ ctor_cb->pUserData);
+
+ return vk_errorf(NULL, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
"Client requested version %d.%d.%d",
VK_VERSION_MAJOR(client_version),
VK_VERSION_MINOR(client_version),
VK_VERSION_PATCH(client_version));
}
+ struct anv_instance_extension_table enabled_extensions = {};
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- bool found = false;
- for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
+ int idx;
+ for (idx = 0; idx < ANV_INSTANCE_EXTENSION_COUNT; idx++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- global_extensions[j].extensionName) == 0) {
- found = true;
+ anv_instance_extensions[idx].extensionName) == 0)
break;
- }
}
- if (!found)
+
+ if (idx >= ANV_INSTANCE_EXTENSION_COUNT)
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+
+ if (!anv_instance_extensions_supported.extensions[idx])
+ return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+
+ enabled_extensions.extensions[idx] = true;
}
instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
instance->alloc = default_alloc;
instance->apiVersion = client_version;
+ instance->enabled_extensions = enabled_extensions;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(instance->dispatch.entrypoints); i++) {
+ /* Vulkan requires that entrypoints for extensions which have not been
+ * enabled must not be advertised.
+ */
+ if (!anv_entrypoint_is_enabled(i, instance->apiVersion,
+ &instance->enabled_extensions, NULL)) {
+ instance->dispatch.entrypoints[i] = NULL;
+ } else if (anv_dispatch_table.entrypoints[i] != NULL) {
+ instance->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
+ } else {
+ instance->dispatch.entrypoints[i] =
+ anv_tramp_dispatch_table.entrypoints[i];
+ }
+ }
+
instance->physicalDeviceCount = -1;
+ result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
+ if (result != VK_SUCCESS) {
+ vk_free2(&default_alloc, pAllocator, instance);
+ return vk_error(result);
+ }
+
_mesa_locale_init();
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
VG(VALGRIND_DESTROY_MEMPOOL(instance));
+ vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
+
_mesa_locale_fini();
vk_free(&instance->alloc, instance);
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
+ VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
+ features->variablePointersStorageBuffer = true;
+ features->variablePointers = true;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR: {
+ VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *features =
+ (VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR *) ext;
+ features->samplerYcbcrConversion = true;
+ break;
+ }
+
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR: {
+ VkPhysicalDevice16BitStorageFeaturesKHR *features =
+ (VkPhysicalDevice16BitStorageFeaturesKHR *)ext;
+ ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
+
+ features->storageBuffer16BitAccess = pdevice->info.gen >= 8;
+ features->uniformAndStorageBuffer16BitAccess = pdevice->info.gen >= 8;
+ features->storagePushConstant16 = pdevice->info.gen >= 8;
+ features->storageInputOutput16 = false;
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
(1ul << 30) : (1ul << 27);
+ const uint32_t max_samplers = (devinfo->gen >= 8 || devinfo->is_haswell) ?
+ 128 : 16;
+
VkSampleCountFlags sample_counts =
isl_device_get_sample_counts(&pdevice->isl_dev);
.bufferImageGranularity = 64, /* A cache line */
.sparseAddressSpaceSize = 0,
.maxBoundDescriptorSets = MAX_SETS,
- .maxPerStageDescriptorSamplers = 64,
+ .maxPerStageDescriptorSamplers = max_samplers,
.maxPerStageDescriptorUniformBuffers = 64,
.maxPerStageDescriptorStorageBuffers = 64,
- .maxPerStageDescriptorSampledImages = 64,
+ .maxPerStageDescriptorSampledImages = max_samplers,
.maxPerStageDescriptorStorageImages = 64,
.maxPerStageDescriptorInputAttachments = 64,
- .maxPerStageResources = 128,
- .maxDescriptorSetSamplers = 256,
- .maxDescriptorSetUniformBuffers = 256,
+ .maxPerStageResources = 250,
+ .maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
+ .maxDescriptorSetUniformBuffers = 6 * 64, /* number of stages * maxPerStageDescriptorUniformBuffers */
.maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
- .maxDescriptorSetStorageBuffers = 256,
+ .maxDescriptorSetStorageBuffers = 6 * 64, /* number of stages * maxPerStageDescriptorStorageBuffers */
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
- .maxDescriptorSetSampledImages = 256,
- .maxDescriptorSetStorageImages = 256,
+ .maxDescriptorSetSampledImages = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSampledImages */
+ .maxDescriptorSetStorageImages = 6 * 64, /* number of stages * maxPerStageDescriptorStorageImages */
.maxDescriptorSetInputAttachments = 256,
.maxVertexInputAttributes = MAX_VBS,
.maxVertexInputBindings = MAX_VBS,
.viewportSubPixelBits = 13, /* We take a float? */
.minMemoryMapAlignment = 4096, /* A page */
.minTexelBufferOffsetAlignment = 1,
- .minUniformBufferOffsetAlignment = 16,
+ /* We need 16 for UBO block reads to work and 32 for push UBOs */
+ .minUniformBufferOffsetAlignment = 32,
.minStorageBufferOffsetAlignment = 4,
.minTexelOffset = -8,
.maxTexelOffset = 7,
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
.timestampComputeAndGraphics = false,
- .timestampPeriod = devinfo->timebase_scale,
+ .timestampPeriod = 1000000000.0 / devinfo->timestamp_frequency,
.maxClipDistances = 8,
.maxCullDistances = 8,
.maxCombinedClipAndCullDistances = 8,
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(1, 0, 42),
- .driverVersion = 1,
+ .apiVersion = anv_physical_device_api_version(pdevice),
+ .driverVersion = vk_get_driver_version(),
.vendorID = 0x8086,
.deviceID = pdevice->chipset_id,
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
.sparseProperties = {0}, /* Broadwell doesn't do sparse. */
};
- strcpy(pProperties->deviceName, pdevice->name);
+ snprintf(pProperties->deviceName, sizeof(pProperties->deviceName),
+ "%s", pdevice->name);
memcpy(pProperties->pipelineCacheUUID,
pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
}
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHX: {
- VkPhysicalDeviceIDPropertiesKHX *id_props =
- (VkPhysicalDeviceIDPropertiesKHX *)ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
+ VkPhysicalDeviceIDPropertiesKHR *id_props =
+ (VkPhysicalDeviceIDPropertiesKHR *)ext;
memcpy(id_props->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
memcpy(id_props->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
/* The LUID is for Windows. */
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
+ VkPhysicalDevicePointClippingPropertiesKHR *properties =
+ (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
+ properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
+ anv_finishme("Implement pop-free point clipping");
+ break;
+ }
+
default:
anv_debug_ignored_stype(ext->sType);
break;
{
ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
- if (physical_device->info.has_llc) {
- /* Big core GPUs share LLC with the CPU and thus one memory type can be
- * both cached and coherent at the same time.
- */
- pMemoryProperties->memoryTypeCount = 1;
- pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 0,
- };
- } else {
- /* The spec requires that we expose a host-visible, coherent memory
- * type, but Atom GPUs don't share LLC. Thus we offer two memory types
- * to give the application a choice between cached, but not coherent and
- * coherent but uncached (WC though).
- */
- pMemoryProperties->memoryTypeCount = 2;
- pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
- .heapIndex = 0,
- };
- pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
- .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
- .heapIndex = 0,
+ pMemoryProperties->memoryTypeCount = physical_device->memory.type_count;
+ for (uint32_t i = 0; i < physical_device->memory.type_count; i++) {
+ pMemoryProperties->memoryTypes[i] = (VkMemoryType) {
+ .propertyFlags = physical_device->memory.types[i].propertyFlags,
+ .heapIndex = physical_device->memory.types[i].heapIndex,
};
}
- pMemoryProperties->memoryHeapCount = 1;
- pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
- .size = physical_device->heap_size,
- .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
- };
+ pMemoryProperties->memoryHeapCount = physical_device->memory.heap_count;
+ for (uint32_t i = 0; i < physical_device->memory.heap_count; i++) {
+ pMemoryProperties->memoryHeaps[i] = (VkMemoryHeap) {
+ .size = physical_device->memory.heaps[i].size,
+ .flags = physical_device->memory.heaps[i].flags,
+ };
+ }
}
void anv_GetPhysicalDeviceMemoryProperties2KHR(
}
PFN_vkVoidFunction anv_GetInstanceProcAddr(
- VkInstance instance,
+ VkInstance _instance,
const char* pName)
{
- return anv_lookup_entrypoint(NULL, pName);
+ ANV_FROM_HANDLE(anv_instance, instance, _instance);
+
+ /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
+ * when we have to return valid function pointers, NULL, or it's left
+ * undefined. See the table for exact details.
+ */
+ if (pName == NULL)
+ return NULL;
+
+#define LOOKUP_ANV_ENTRYPOINT(entrypoint) \
+ if (strcmp(pName, "vk" #entrypoint) == 0) \
+ return (PFN_vkVoidFunction)anv_##entrypoint
+
+ LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
+ LOOKUP_ANV_ENTRYPOINT(EnumerateInstanceLayerProperties);
+ LOOKUP_ANV_ENTRYPOINT(CreateInstance);
+
+#undef LOOKUP_ANV_ENTRYPOINT
+
+ if (instance == NULL)
+ return NULL;
+
+ int idx = anv_get_entrypoint_index(pName);
+ if (idx < 0)
+ return NULL;
+
+ return instance->dispatch.entrypoints[idx];
}
/* With version 1+ of the loader interface the ICD should expose
const char* pName)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- return anv_lookup_entrypoint(&device->info, pName);
+
+ if (!device || !pName)
+ return NULL;
+
+ int idx = anv_get_entrypoint_index(pName);
+ if (idx < 0)
+ return NULL;
+
+ return device->dispatch.entrypoints[idx];
+}
+
+VkResult
+anv_CreateDebugReportCallbackEXT(VkInstance _instance,
+ const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDebugReportCallbackEXT* pCallback)
+{
+ ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ return vk_create_debug_report_callback(&instance->debug_report_callbacks,
+ pCreateInfo, pAllocator, &instance->alloc,
+ pCallback);
+}
+
+void
+anv_DestroyDebugReportCallbackEXT(VkInstance _instance,
+ VkDebugReportCallbackEXT _callback,
+ const VkAllocationCallbacks* pAllocator)
+{
+ ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
+ _callback, pAllocator, &instance->alloc);
+}
+
+void
+anv_DebugReportMessageEXT(VkInstance _instance,
+ VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objectType,
+ uint64_t object,
+ size_t location,
+ int32_t messageCode,
+ const char* pLayerPrefix,
+ const char* pMessage)
+{
+ ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
+ object, location, messageCode, pLayerPrefix, pMessage);
}
static void
border_colors);
}
+static void
+anv_device_init_trivial_batch(struct anv_device *device)
+{
+ anv_bo_init_new(&device->trivial_batch_bo, device, 4096);
+
+ if (device->instance->physicalDevice.has_exec_async)
+ device->trivial_batch_bo.flags |= EXEC_OBJECT_ASYNC;
+
+ void *map = anv_gem_mmap(device, device->trivial_batch_bo.gem_handle,
+ 0, 4096, 0);
+
+ struct anv_batch batch = {
+ .start = map,
+ .next = map,
+ .end = map + 4096,
+ };
+
+ anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
+ anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
+
+ if (!device->info.has_llc)
+ gen_clflush_range(map, batch.next - map);
+
+ anv_gem_munmap(map, device->trivial_batch_bo.size);
+}
+
+VkResult anv_EnumerateDeviceExtensionProperties(
+ VkPhysicalDevice physicalDevice,
+ const char* pLayerName,
+ uint32_t* pPropertyCount,
+ VkExtensionProperties* pProperties)
+{
+ ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
+ VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
+ (void)device;
+
+ for (int i = 0; i < ANV_DEVICE_EXTENSION_COUNT; i++) {
+ if (device->supported_extensions.extensions[i]) {
+ vk_outarray_append(&out, prop) {
+ *prop = anv_device_extensions[i];
+ }
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
+static void
+anv_device_init_dispatch(struct anv_device *device)
+{
+ const struct anv_dispatch_table *genX_table;
+ switch (device->info.gen) {
+ case 10:
+ genX_table = &gen10_dispatch_table;
+ break;
+ case 9:
+ genX_table = &gen9_dispatch_table;
+ break;
+ case 8:
+ genX_table = &gen8_dispatch_table;
+ break;
+ case 7:
+ if (device->info.is_haswell)
+ genX_table = &gen75_dispatch_table;
+ else
+ genX_table = &gen7_dispatch_table;
+ break;
+ default:
+ unreachable("unsupported gen\n");
+ }
+
+ for (unsigned i = 0; i < ARRAY_SIZE(device->dispatch.entrypoints); i++) {
+ /* Vulkan requires that entrypoints for extensions which have not been
+ * enabled must not be advertised.
+ */
+ if (!anv_entrypoint_is_enabled(i, device->instance->apiVersion,
+ &device->instance->enabled_extensions,
+ &device->enabled_extensions)) {
+ device->dispatch.entrypoints[i] = NULL;
+ } else if (genX_table->entrypoints[i]) {
+ device->dispatch.entrypoints[i] = genX_table->entrypoints[i];
+ } else {
+ device->dispatch.entrypoints[i] = anv_dispatch_table.entrypoints[i];
+ }
+ }
+}
+
+static int
+vk_priority_to_gen(int priority)
+{
+ switch (priority) {
+ case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
+ return GEN_CONTEXT_LOW_PRIORITY;
+ case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
+ return GEN_CONTEXT_MEDIUM_PRIORITY;
+ case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
+ return GEN_CONTEXT_HIGH_PRIORITY;
+ case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
+ return GEN_CONTEXT_REALTIME_PRIORITY;
+ default:
+ unreachable("Invalid priority");
+ }
+}
+
VkResult anv_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
+ struct anv_device_extension_table enabled_extensions = { };
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
- bool found = false;
- for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
+ int idx;
+ for (idx = 0; idx < ANV_DEVICE_EXTENSION_COUNT; idx++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
- device_extensions[j].extensionName) == 0) {
- found = true;
+ anv_device_extensions[idx].extensionName) == 0)
break;
- }
}
- if (!found)
+
+ if (idx >= ANV_DEVICE_EXTENSION_COUNT)
+ return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+
+ if (!physical_device->supported_extensions.extensions[idx])
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+
+ enabled_extensions.extensions[idx] = true;
}
+ /* Check enabled features */
+ if (pCreateInfo->pEnabledFeatures) {
+ VkPhysicalDeviceFeatures supported_features;
+ anv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
+ VkBool32 *supported_feature = (VkBool32 *)&supported_features;
+ VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
+ unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
+ for (uint32_t i = 0; i < num_features; i++) {
+ if (enabled_feature[i] && !supported_feature[i])
+ return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ }
+ }
+
+ /* Check if client specified queue priority. */
+ const VkDeviceQueueGlobalPriorityCreateInfoEXT *queue_priority =
+ vk_find_struct_const(pCreateInfo->pQueueCreateInfos[0].pNext,
+ DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
+
+ VkQueueGlobalPriorityEXT priority =
+ queue_priority ? queue_priority->globalPriority :
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
+
device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
device->chipset_id = physical_device->chipset_id;
+ device->no_hw = physical_device->no_hw;
device->lost = false;
if (pAllocator)
goto fail_fd;
}
+ /* As per spec, the driver implementation may deny requests to acquire
+ * a priority above the default priority (MEDIUM) if the caller does not
+ * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_EXT
+ * is returned.
+ */
+ if (physical_device->has_context_priority) {
+ int err = anv_gem_set_context_param(device->fd, device->context_id,
+ I915_CONTEXT_PARAM_PRIORITY,
+ vk_priority_to_gen(priority));
+ if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
+ result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
+ goto fail_fd;
+ }
+ }
+
device->info = physical_device->info;
device->isl_dev = physical_device->isl_dev;
device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
pCreateInfo->pEnabledFeatures->robustBufferAccess;
+ device->enabled_extensions = enabled_extensions;
+
+ anv_device_init_dispatch(device);
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
}
pthread_condattr_destroy(&condattr);
- anv_bo_pool_init(&device->batch_bo_pool, device);
+ uint64_t bo_flags =
+ (physical_device->supports_48bit_addresses ? EXEC_OBJECT_SUPPORTS_48B_ADDRESS : 0) |
+ (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
+ (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
+
+ anv_bo_pool_init(&device->batch_bo_pool, device, bo_flags);
result = anv_bo_cache_init(&device->bo_cache);
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
- result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384);
+ /* For the state pools we explicitly disable 48bit. */
+ bo_flags = (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
+ (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
+
+ result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384,
+ bo_flags);
if (result != VK_SUCCESS)
goto fail_bo_cache;
- result = anv_state_pool_init(&device->instruction_state_pool, device, 16384);
+ result = anv_state_pool_init(&device->instruction_state_pool, device, 16384,
+ bo_flags);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
- result = anv_state_pool_init(&device->surface_state_pool, device, 4096);
+ result = anv_state_pool_init(&device->surface_state_pool, device, 4096,
+ bo_flags);
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
+ anv_device_init_trivial_batch(device);
+
anv_scratch_pool_init(device, &device->scratch_pool);
anv_queue_init(device, &device->queue);
case 9:
result = gen9_init_device_state(device);
break;
+ case 10:
+ result = gen10_init_device_state(device);
+ break;
+ case 11:
+ result = gen11_init_device_state(device);
+ break;
default:
/* Shouldn't get here as we don't create physical devices for any other
* gens. */
anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
anv_gem_close(device, device->workaround_bo.gem_handle);
+ anv_gem_close(device, device->trivial_batch_bo.gem_handle);
+
anv_state_pool_finish(&device->surface_state_pool);
anv_state_pool_finish(&device->instruction_state_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
vk_free(&device->alloc, device);
}
-VkResult anv_EnumerateInstanceExtensionProperties(
- const char* pLayerName,
- uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties)
-{
- if (pProperties == NULL) {
- *pPropertyCount = ARRAY_SIZE(global_extensions);
- return VK_SUCCESS;
- }
-
- *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(global_extensions));
- typed_memcpy(pProperties, global_extensions, *pPropertyCount);
-
- if (*pPropertyCount < ARRAY_SIZE(global_extensions))
- return VK_INCOMPLETE;
-
- return VK_SUCCESS;
-}
-
-VkResult anv_EnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties)
-{
- if (pProperties == NULL) {
- *pPropertyCount = ARRAY_SIZE(device_extensions);
- return VK_SUCCESS;
- }
-
- *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(device_extensions));
- typed_memcpy(pProperties, device_extensions, *pPropertyCount);
-
- if (*pPropertyCount < ARRAY_SIZE(device_extensions))
- return VK_INCOMPLETE;
-
- return VK_SUCCESS;
-}
-
VkResult anv_EnumerateInstanceLayerProperties(
uint32_t* pPropertyCount,
VkLayerProperties* pProperties)
if (ret == -1) {
/* We don't know the real error. */
device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST, "get_reset_stats failed: %m");
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
+ "get_reset_stats failed: %m");
}
if (active) {
device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST,
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
"GPU hung on one of our command buffers");
} else if (pending) {
device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST,
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
"GPU hung with commands in-flight");
}
} else if (ret == -1) {
/* We don't know the real error. */
device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
+ "gem wait failed: %m");
}
/* Query for device status after the busy call. If the BO we're checking
} else if (ret == -1) {
/* We don't know the real error. */
device->lost = true;
- return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
+ "gem wait failed: %m");
}
/* Query for device status after the wait. If the BO we're waiting on got
anv_bo_init(bo, gem_handle, size);
- if (device->instance->physicalDevice.supports_48bit_addresses)
- bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
-
- if (device->instance->physicalDevice.has_exec_async)
- bo->flags |= EXEC_OBJECT_ASYNC;
-
return VK_SUCCESS;
}
VkDeviceMemory* pMem)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_device_memory *mem;
VkResult result = VK_SUCCESS;
/* The Vulkan 1.0.33 spec says "allocationSize must be greater than 0". */
assert(pAllocateInfo->allocationSize > 0);
- /* We support exactly one memory heap. */
- assert(pAllocateInfo->memoryTypeIndex == 0 ||
- (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
-
/* The kernel relocation API has a limitation of a 32-bit delta value
* applied to the address before it is written which, in spite of it being
* unsigned, is treated as signed . Because of the way that this maps to
if (mem == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- mem->type_index = pAllocateInfo->memoryTypeIndex;
+ assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
+ mem->type = &pdevice->memory.types[pAllocateInfo->memoryTypeIndex];
mem->map = NULL;
mem->map_size = 0;
- const VkImportMemoryFdInfoKHX *fd_info =
- vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHX);
+ const VkImportMemoryFdInfoKHR *fd_info =
+ vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
/* The Vulkan spec permits handleType to be 0, in which case the struct is
* ignored.
*/
if (fd_info && fd_info->handleType) {
- /* At the moment, we only support the OPAQUE_FD memory type which is
- * just a GEM buffer.
- */
+ /* At the moment, we support only the below handle types. */
assert(fd_info->handleType ==
- VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHX);
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
+ fd_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
result = anv_bo_cache_import(device, &device->bo_cache,
- fd_info->fd, pAllocateInfo->allocationSize,
- &mem->bo);
+ fd_info->fd, &mem->bo);
if (result != VK_SUCCESS)
goto fail;
+
+ VkDeviceSize aligned_alloc_size =
+ align_u64(pAllocateInfo->allocationSize, 4096);
+
+ /* For security purposes, we reject importing the bo if it's smaller
+ * than the requested allocation size. This prevents a malicious client
+ * from passing a buffer to a trusted client, lying about the size, and
+ * telling the trusted client to try and texture from an image that goes
+ * out-of-bounds. This sort of thing could lead to GPU hangs or worse
+ * in the trusted client. The trusted client can protect itself against
+ * this sort of attack but only if it can trust the buffer size.
+ */
+ if (mem->bo->size < aligned_alloc_size) {
+ result = vk_errorf(device->instance, device,
+ VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
+ "aligned allocationSize too large for "
+ "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR: "
+ "%"PRIu64"B > %"PRIu64"B",
+ aligned_alloc_size, mem->bo->size);
+ anv_bo_cache_release(device, &device->bo_cache, mem->bo);
+ goto fail;
+ }
+
+ /* From the Vulkan spec:
+ *
+ * "Importing memory from a file descriptor transfers ownership of
+ * the file descriptor from the application to the Vulkan
+ * implementation. The application must not perform any operations on
+ * the file descriptor after a successful import."
+ *
+ * If the import fails, we leave the file descriptor open.
+ */
+ close(fd_info->fd);
} else {
result = anv_bo_cache_alloc(device, &device->bo_cache,
pAllocateInfo->allocationSize,
&mem->bo);
if (result != VK_SUCCESS)
goto fail;
+
+ const VkMemoryDedicatedAllocateInfoKHR *dedicated_info =
+ vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
+ if (dedicated_info && dedicated_info->image != VK_NULL_HANDLE) {
+ ANV_FROM_HANDLE(anv_image, image, dedicated_info->image);
+
+ /* Some legacy (non-modifiers) consumers need the tiling to be set on
+ * the BO. In this case, we have a dedicated allocation.
+ */
+ if (image->needs_set_tiling) {
+ const uint32_t i915_tiling =
+ isl_tiling_to_i915_tiling(image->planes[0].surface.isl.tiling);
+ int ret = anv_gem_set_tiling(device, mem->bo->gem_handle,
+ image->planes[0].surface.isl.row_pitch,
+ i915_tiling);
+ if (ret) {
+ anv_bo_cache_release(device, &device->bo_cache, mem->bo);
+ return vk_errorf(device->instance, NULL,
+ VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "failed to set BO tiling: %m");
+ }
+ }
+ }
+ }
+
+ assert(mem->type->heapIndex < pdevice->memory.heap_count);
+ if (pdevice->memory.heaps[mem->type->heapIndex].supports_48bit_addresses)
+ mem->bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ const struct wsi_memory_allocate_info *wsi_info =
+ vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
+ if (wsi_info && wsi_info->implicit_sync) {
+ /* We need to set the WRITE flag on window system buffers so that GEM
+ * will know we're writing to them and synchronize uses on other rings
+ * (eg if the display server uses the blitter ring).
+ */
+ mem->bo->flags |= EXEC_OBJECT_WRITE;
+ } else if (pdevice->has_exec_async) {
+ mem->bo->flags |= EXEC_OBJECT_ASYNC;
}
*pMem = anv_device_memory_to_handle(mem);
return result;
}
-VkResult anv_GetMemoryFdKHX(
+VkResult anv_GetMemoryFdKHR(
VkDevice device_h,
- VkDeviceMemory memory_h,
- VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+ const VkMemoryGetFdInfoKHR* pGetFdInfo,
int* pFd)
{
ANV_FROM_HANDLE(anv_device, dev, device_h);
- ANV_FROM_HANDLE(anv_device_memory, mem, memory_h);
+ ANV_FROM_HANDLE(anv_device_memory, mem, pGetFdInfo->memory);
- /* We support only one handle type. */
- assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHX);
+ assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
+
+ assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
+ pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
return anv_bo_cache_export(dev, &dev->bo_cache, mem->bo, pFd);
}
-VkResult anv_GetMemoryFdPropertiesKHX(
- VkDevice device_h,
- VkExternalMemoryHandleTypeFlagBitsKHX handleType,
+VkResult anv_GetMemoryFdPropertiesKHR(
+ VkDevice _device,
+ VkExternalMemoryHandleTypeFlagBitsKHR handleType,
int fd,
- VkMemoryFdPropertiesKHX* pMemoryFdProperties)
+ VkMemoryFdPropertiesKHR* pMemoryFdProperties)
{
- /* The valid usage section for this function says:
- *
- * "handleType must not be one of the handle types defined as opaque."
- *
- * Since we only handle opaque handles for now, there are no FD properties.
- */
- return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHX;
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+
+ switch (handleType) {
+ case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
+ /* dma-buf can be imported as any memory type */
+ pMemoryFdProperties->memoryTypeBits =
+ (1 << pdevice->memory.type_count) - 1;
+ return VK_SUCCESS;
+
+ default:
+ /* The valid usage section for this function says:
+ *
+ * "handleType must not be one of the handle types defined as
+ * opaque."
+ *
+ * So opaque handle types fall into the default "unsupported" case.
+ */
+ return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ }
}
void anv_FreeMemory(
* userspace. */
uint32_t gem_flags = 0;
- if (!device->info.has_llc && mem->type_index == 0)
+
+ if (!device->info.has_llc &&
+ (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
gem_flags |= I915_MMAP_WC;
/* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
if (ranges[i].offset >= mem->map_size)
continue;
- anv_clflush_range(mem->map + ranges[i].offset,
+ gen_clflush_range(mem->map + ranges[i].offset,
MIN2(ranges[i].size, mem->map_size - ranges[i].offset));
}
}
{
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
/* The Vulkan spec (git aaed022) says:
*
* supported memory type for the resource. The bit `1<<i` is set if and
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
* structure for the physical device is supported.
- *
- * We support exactly one memory type on LLC, two on non-LLC.
*/
- pMemoryRequirements->memoryTypeBits = device->info.has_llc ? 1 : 3;
+ uint32_t memory_types = 0;
+ for (uint32_t i = 0; i < pdevice->memory.type_count; i++) {
+ uint32_t valid_usage = pdevice->memory.types[i].valid_buffer_usage;
+ if ((valid_usage & buffer->usage) == buffer->usage)
+ memory_types |= (1u << i);
+ }
+
+ /* Base alignment requirement of a cache line */
+ uint32_t alignment = 16;
+
+ /* We need an alignment of 32 for pushing UBOs */
+ if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
+ alignment = MAX2(alignment, 32);
pMemoryRequirements->size = buffer->size;
- pMemoryRequirements->alignment = 16;
+ pMemoryRequirements->alignment = alignment;
+
+ /* Storage and Uniform buffers should have their size aligned to
+ * 32-bits to avoid boundary checks when last DWord is not complete.
+ * This would ensure that not internal padding would be needed for
+ * 16-bit types.
+ */
+ if (device->robust_buffer_access &&
+ (buffer->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
+ buffer->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
+ pMemoryRequirements->size = align_u64(buffer->size, 4);
+
+ pMemoryRequirements->memoryTypeBits = memory_types;
+}
+
+void anv_GetBufferMemoryRequirements2KHR(
+ VkDevice _device,
+ const VkBufferMemoryRequirementsInfo2KHR* pInfo,
+ VkMemoryRequirements2KHR* pMemoryRequirements)
+{
+ anv_GetBufferMemoryRequirements(_device, pInfo->buffer,
+ &pMemoryRequirements->memoryRequirements);
+
+ vk_foreach_struct(ext, pMemoryRequirements->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
+ VkMemoryDedicatedRequirementsKHR *requirements = (void *)ext;
+ requirements->prefersDedicatedAllocation = VK_FALSE;
+ requirements->requiresDedicatedAllocation = VK_FALSE;
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
}
void anv_GetImageMemoryRequirements(
{
ANV_FROM_HANDLE(anv_image, image, _image);
ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
/* The Vulkan spec (git aaed022) says:
*
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
* structure for the physical device is supported.
*
- * We support exactly one memory type on LLC, two on non-LLC.
+ * All types are currently supported for images.
*/
- pMemoryRequirements->memoryTypeBits = device->info.has_llc ? 1 : 3;
+ uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
pMemoryRequirements->size = image->size;
pMemoryRequirements->alignment = image->alignment;
+ pMemoryRequirements->memoryTypeBits = memory_types;
+}
+
+void anv_GetImageMemoryRequirements2KHR(
+ VkDevice _device,
+ const VkImageMemoryRequirementsInfo2KHR* pInfo,
+ VkMemoryRequirements2KHR* pMemoryRequirements)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_image, image, pInfo->image);
+
+ anv_GetImageMemoryRequirements(_device, pInfo->image,
+ &pMemoryRequirements->memoryRequirements);
+
+ vk_foreach_struct_const(ext, pInfo->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR: {
+ struct anv_physical_device *pdevice = &device->instance->physicalDevice;
+ const VkImagePlaneMemoryRequirementsInfoKHR *plane_reqs =
+ (const VkImagePlaneMemoryRequirementsInfoKHR *) ext;
+ uint32_t plane = anv_image_aspect_to_plane(image->aspects,
+ plane_reqs->planeAspect);
+
+ assert(image->planes[plane].offset == 0);
+
+ /* The Vulkan spec (git aaed022) says:
+ *
+ * memoryTypeBits is a bitfield and contains one bit set for every
+ * supported memory type for the resource. The bit `1<<i` is set
+ * if and only if the memory type `i` in the
+ * VkPhysicalDeviceMemoryProperties structure for the physical
+ * device is supported.
+ *
+ * All types are currently supported for images.
+ */
+ pMemoryRequirements->memoryRequirements.memoryTypeBits =
+ (1ull << pdevice->memory.type_count) - 1;
+
+ pMemoryRequirements->memoryRequirements.size = image->planes[plane].size;
+ pMemoryRequirements->memoryRequirements.alignment =
+ image->planes[plane].alignment;
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
+
+ vk_foreach_struct(ext, pMemoryRequirements->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
+ VkMemoryDedicatedRequirementsKHR *requirements = (void *)ext;
+ if (image->needs_set_tiling) {
+ /* If we need to set the tiling for external consumers, we need a
+ * dedicated allocation.
+ *
+ * See also anv_AllocateMemory.
+ */
+ requirements->prefersDedicatedAllocation = VK_TRUE;
+ requirements->requiresDedicatedAllocation = VK_TRUE;
+ } else {
+ requirements->prefersDedicatedAllocation = VK_FALSE;
+ requirements->requiresDedicatedAllocation = VK_FALSE;
+ }
+ break;
+ }
+
+ default:
+ anv_debug_ignored_stype(ext->sType);
+ break;
+ }
+ }
}
void anv_GetImageSparseMemoryRequirements(
*pSparseMemoryRequirementCount = 0;
}
+void anv_GetImageSparseMemoryRequirements2KHR(
+ VkDevice device,
+ const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
+{
+ *pSparseMemoryRequirementCount = 0;
+}
+
void anv_GetDeviceMemoryCommitment(
VkDevice device,
VkDeviceMemory memory,
*pCommittedMemoryInBytes = 0;
}
-VkResult anv_BindBufferMemory(
- VkDevice device,
- VkBuffer _buffer,
- VkDeviceMemory _memory,
- VkDeviceSize memoryOffset)
+static void
+anv_bind_buffer_memory(const VkBindBufferMemoryInfoKHR *pBindInfo)
{
- ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
- ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
+ ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
+
+ assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR);
if (mem) {
+ assert((buffer->usage & mem->type->valid_buffer_usage) == buffer->usage);
buffer->bo = mem->bo;
- buffer->offset = memoryOffset;
+ buffer->offset = pBindInfo->memoryOffset;
} else {
buffer->bo = NULL;
buffer->offset = 0;
}
+}
+
+VkResult anv_BindBufferMemory(
+ VkDevice device,
+ VkBuffer buffer,
+ VkDeviceMemory memory,
+ VkDeviceSize memoryOffset)
+{
+ anv_bind_buffer_memory(
+ &(VkBindBufferMemoryInfoKHR) {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ .buffer = buffer,
+ .memory = memory,
+ .memoryOffset = memoryOffset,
+ });
+
+ return VK_SUCCESS;
+}
+
+VkResult anv_BindBufferMemory2KHR(
+ VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR* pBindInfos)
+{
+ for (uint32_t i = 0; i < bindInfoCount; i++)
+ anv_bind_buffer_memory(&pBindInfos[i]);
return VK_SUCCESS;
}