#include "mesa/main/git_sha1.h"
#include "util/strtod.h"
+#include "gen7_pack.h"
+
struct anv_dispatch_table dtable;
+static void
+compiler_debug_log(void *data, const char *fmt, ...)
+{ }
+
+static void
+compiler_perf_log(void *data, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_PERF))
+ vfprintf(stderr, fmt, args);
+
+ va_end(args);
+}
+
static VkResult
anv_physical_device_init(struct anv_physical_device *device,
struct anv_instance *instance,
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0)
- return vk_errorf(VK_ERROR_UNAVAILABLE, "failed to open %s: %m", path);
+ return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to open %s: %m", path);
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = instance;
device->path = path;
-
+
device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
if (!device->chipset_id) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get chipset id: %m");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get chipset id: %m");
goto fail;
}
device->name = brw_get_device_name(device->chipset_id);
- device->info = brw_get_device_info(device->chipset_id, -1);
+ device->info = brw_get_device_info(device->chipset_id);
if (!device->info) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get device info");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get device info");
+ goto fail;
+ }
+
+ if (device->info->is_haswell) {
+ fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
+ } else if (device->info->gen == 7 && !device->info->is_baytrail) {
+ fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
+ } else if (device->info->gen == 9) {
+ fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n");
+ } else if (device->info->gen == 8 && !device->info->is_cherryview) {
+ /* Broadwell is as fully supported as anything */
+ } else {
+ result = vk_errorf(VK_UNSUPPORTED,
+ "Vulkan not yet supported on %s", device->name);
goto fail;
}
-
+
if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get aperture size: %m");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "failed to get aperture size: %m");
goto fail;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing gem wait");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing gem wait");
goto fail;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing execbuf2");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "kernel missing execbuf2");
goto fail;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
- result = vk_errorf(VK_ERROR_UNAVAILABLE, "non-llc gpu");
+ result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ "non-llc gpu");
goto fail;
}
-
+
close(fd);
+ brw_process_intel_debug_variable();
+
+ device->compiler = brw_compiler_create(NULL, device->info);
+ if (device->compiler == NULL) {
+ result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ goto fail;
+ }
+ device->compiler->shader_debug_log = compiler_debug_log;
+ device->compiler->shader_perf_log = compiler_perf_log;
+
+ isl_device_init(&device->isl_dev, device->info);
+
return VK_SUCCESS;
-
+
fail:
close(fd);
return result;
}
+static void
+anv_physical_device_finish(struct anv_physical_device *device)
+{
+ ralloc_free(device->compiler);
+}
+
static void *default_alloc(
void* pUserData,
size_t size,
static const VkExtensionProperties global_extensions[] = {
{
- .extName = "VK_WSI_swapchain",
- .specVersion = 12
+ .extName = VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME,
+ .specVersion = 17,
},
};
static const VkExtensionProperties device_extensions[] = {
{
- .extName = "VK_WSI_device_swapchain",
- .specVersion = 12
+ .extName = VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME,
+ .specVersion = 53,
},
};
-
VkResult anv_CreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
VkInstance* pInstance)
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
+ if (pCreateInfo->pAppInfo->apiVersion != VK_MAKE_VERSION(0, 170, 2))
+ return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+
for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
bool found = false;
for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
}
}
if (!found)
- return vk_error(VK_ERROR_INVALID_EXTENSION);
+ return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
if (pCreateInfo->pAllocCb) {
instance->pfnAlloc = alloc_callbacks->pfnAlloc;
instance->pfnFree = alloc_callbacks->pfnFree;
instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
- instance->physicalDeviceCount = 0;
+ instance->physicalDeviceCount = -1;
_mesa_locale_init();
{
ANV_FROM_HANDLE(anv_instance, instance, _instance);
+ if (instance->physicalDeviceCount > 0) {
+ /* We support at most one physical device. */
+ assert(instance->physicalDeviceCount == 1);
+ anv_physical_device_finish(&instance->physicalDevice);
+ }
+
anv_finish_wsi(instance);
VG(VALGRIND_DESTROY_MEMPOOL(instance));
ANV_FROM_HANDLE(anv_instance, instance, _instance);
VkResult result;
- if (instance->physicalDeviceCount == 0) {
+ if (instance->physicalDeviceCount < 0) {
result = anv_physical_device_init(&instance->physicalDevice,
instance, "/dev/dri/renderD128");
- if (result != VK_SUCCESS)
+ if (result == VK_UNSUPPORTED) {
+ instance->physicalDeviceCount = 0;
+ } else if (result == VK_SUCCESS) {
+ instance->physicalDeviceCount = 1;
+ } else {
return result;
-
- instance->physicalDeviceCount = 1;
+ }
}
/* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
.sampleRateShading = false,
.dualSourceBlend = true,
.logicOp = true,
- .instancedDrawIndirect = true,
+ .multiDrawIndirect = true,
.depthClip = false,
.depthBiasClamp = false,
.fillModeNonSolid = true,
.textureCompressionETC2 = true,
.textureCompressionASTC_LDR = true,
.textureCompressionBC = true,
+ .occlusionQueryNonConservative = false, /* FINISHME */
.pipelineStatisticsQuery = true,
.vertexSideEffects = false,
.tessellationSideEffects = false,
.fragmentSideEffects = false,
.shaderTessellationPointSize = false,
.shaderGeometryPointSize = true,
- .shaderTextureGatherExtended = true,
+ .shaderImageGatherExtended = true,
.shaderStorageImageExtendedFormats = false,
.shaderStorageImageMultisample = false,
- .shaderStorageBufferArrayConstantIndexing = false,
- .shaderStorageImageArrayConstantIndexing = false,
.shaderUniformBufferArrayDynamicIndexing = true,
.shaderSampledImageArrayDynamicIndexing = false,
.shaderStorageBufferArrayDynamicIndexing = false,
.shaderCullDistance = false,
.shaderFloat64 = false,
.shaderInt64 = false,
- .shaderFloat16 = false,
.shaderInt16 = false,
+ .alphaToOne = true,
};
return VK_SUCCESS;
}
-VkResult anv_GetPhysicalDeviceLimits(
+VkResult anv_GetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceLimits* pLimits)
+ VkPhysicalDeviceProperties* pProperties)
{
- ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
- const struct brw_device_info *devinfo = physical_device->info;
+ ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
+ const struct brw_device_info *devinfo = pdevice->info;
- anv_finishme("Get correct values for PhysicalDeviceLimits");
+ anv_finishme("Get correct values for VkPhysicalDeviceLimits");
- *pLimits = (VkPhysicalDeviceLimits) {
+ VkPhysicalDeviceLimits limits = {
.maxImageDimension1D = (1 << 14),
.maxImageDimension2D = (1 << 14),
.maxImageDimension3D = (1 << 10),
.maxImageDimensionCube = (1 << 14),
.maxImageArrayLayers = (1 << 10),
+
+ /* Broadwell supports 1, 2, 4, and 8 samples. */
+ .sampleCounts = 4,
+
.maxTexelBufferSize = (1 << 14),
.maxUniformBufferSize = UINT32_MAX,
.maxStorageBufferSize = UINT32_MAX,
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
.maxMemoryAllocationCount = UINT32_MAX,
.bufferImageGranularity = 64, /* A cache line */
+ .sparseAddressSpaceSize = 0,
.maxBoundDescriptorSets = MAX_SETS,
.maxDescriptorSets = UINT32_MAX,
.maxPerStageDescriptorSamplers = 64,
.maxPerStageDescriptorStorageImages = 64,
.maxDescriptorSetSamplers = 256,
.maxDescriptorSetUniformBuffers = 256,
+ .maxDescriptorSetUniformBuffersDynamic = 256,
.maxDescriptorSetStorageBuffers = 256,
+ .maxDescriptorSetStorageBuffersDynamic = 256,
.maxDescriptorSetSampledImages = 256,
.maxDescriptorSetStorageImages = 256,
.maxVertexInputAttributes = 32,
+ .maxVertexInputBindings = 32,
.maxVertexInputAttributeOffset = 256,
.maxVertexInputBindingStride = 256,
.maxVertexOutputComponents = 32,
.maxSamplerLodBias = 16,
.maxSamplerAnisotropy = 16,
.maxViewports = MAX_VIEWPORTS,
- .maxDynamicViewportStates = UINT32_MAX,
.maxViewportDimensions = { (1 << 14), (1 << 14) },
.viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
.viewportSubPixelBits = 13, /* We take a float? */
.lineWidthGranularity = (1.0 / 128.0),
};
- return VK_SUCCESS;
-}
-
-VkResult anv_GetPhysicalDeviceProperties(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties* pProperties)
-{
- ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
-
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(0, 138, 1),
+ .apiVersion = VK_MAKE_VERSION(0, 170, 2),
.driverVersion = 1,
.vendorId = 0x8086,
.deviceId = pdevice->chipset_id,
.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
+ .limits = limits,
+ .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
};
strcpy(pProperties->deviceName, pdevice->name);
{
if (pQueueFamilyProperties == NULL) {
*pCount = 1;
+ return VK_SUCCESS;
}
assert(*pCount >= 1);
queue->device = device;
queue->pool = &device->surface_state_pool;
- queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
- if (queue->completed_serial.map == NULL)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
-
- *(uint32_t *)queue->completed_serial.map = 0;
- queue->next_serial = 1;
-
return VK_SUCCESS;
}
static void
anv_queue_finish(struct anv_queue *queue)
{
-#ifdef HAVE_VALGRIND
- /* This gets torn down with the device so we only need to do this if
- * valgrind is present.
- */
- anv_state_pool_free(queue->pool, queue->completed_serial);
-#endif
}
static void
}
}
if (!found)
- return vk_error(VK_ERROR_INVALID_EXTENSION);
+ return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
- anv_set_dispatch_gen(physical_device->info->gen);
+ anv_set_dispatch_devinfo(physical_device->info);
device = anv_instance_alloc(instance, sizeof(*device), 8,
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
if (device->fd == -1)
goto fail_device;
-
+
device->context_id = anv_gem_create_context(device);
if (device->context_id == -1)
goto fail_fd;
anv_state_pool_init(&device->dynamic_state_pool,
&device->dynamic_state_block_pool);
- anv_block_pool_init(&device->instruction_block_pool, device, 2048);
+ anv_block_pool_init(&device->instruction_block_pool, device, 4096);
anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
anv_state_pool_init(&device->surface_state_pool,
&device->surface_state_block_pool);
+ anv_bo_init_new(&device->workaround_bo, device, 1024);
+
anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
device->info = *physical_device->info;
-
- device->compiler = anv_compiler_create(device);
+ device->isl_dev = physical_device->isl_dev;
anv_queue_init(device, &device->queue);
fail_device:
anv_device_free(device, device);
- return vk_error(VK_ERROR_UNAVAILABLE);
+ return vk_error(VK_ERROR_INITIALIZATION_FAILED);
}
void anv_DestroyDevice(
{
ANV_FROM_HANDLE(anv_device, device, _device);
- anv_compiler_destroy(device->compiler);
-
anv_queue_finish(&device->queue);
anv_device_finish_meta(device);
anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
#endif
+ anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
+ anv_gem_close(device, device->workaround_bo.gem_handle);
+
anv_bo_pool_finish(&device->batch_bo_pool);
anv_state_pool_finish(&device->dynamic_state_pool);
anv_block_pool_finish(&device->dynamic_state_block_pool);
}
/* None supported at this time */
- return vk_error(VK_ERROR_INVALID_LAYER);
+ return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
}
VkResult anv_EnumerateDeviceLayerProperties(
}
/* None supported at this time */
- return vk_error(VK_ERROR_INVALID_LAYER);
+ return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
}
VkResult anv_GetDeviceQueue(
assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
- if (ret != 0)
- return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
+ if (ret != 0) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "execbuf2 failed: %m");
+ }
if (fence) {
ret = anv_gem_execbuffer(device, &fence->execbuf);
- if (ret != 0)
- return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
+ if (ret != 0) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "execbuf2 failed: %m");
+ }
}
for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
ret = anv_gem_execbuffer(device, &execbuf);
if (ret != 0) {
- result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
+ /* We don't know the real error. */
+ result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
goto fail;
}
timeout = INT64_MAX;
ret = anv_gem_wait(device, bo->gem_handle, &timeout);
if (ret != 0) {
- result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
+ /* We don't know the real error. */
+ result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
goto fail;
}
assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
- if (pAllocInfo->memoryTypeIndex != 0) {
- /* We support exactly one memory heap. */
- return vk_error(VK_ERROR_INVALID_VALUE);
- }
+ /* We support exactly one memory heap. */
+ assert(pAllocInfo->memoryTypeIndex == 0);
/* FINISHME: Fail if allocation request exceeds heap size. */
mem->map_size = size;
*ppData = mem->map;
-
+
return VK_SUCCESS;
}
uint64_t timeout)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+
+ /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
+ * to block indefinitely timeouts <= 0. Unfortunately, this was broken
+ * for a couple of kernel releases. Since there's no way to know
+ * whether or not the kernel we're using is one of the broken ones, the
+ * best we can do is to clamp the timeout to INT64_MAX. This limits the
+ * maximum timeout from 584 years to 292 years - likely not a big deal.
+ */
+ if (timeout > INT64_MAX)
+ timeout = INT64_MAX;
+
int64_t t = timeout;
- int ret;
/* FIXME: handle !waitAll */
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
- ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
- if (ret == -1 && errno == ETIME)
+ int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
+ if (ret == -1 && errno == ETIME) {
return VK_TIMEOUT;
- else if (ret == -1)
- return vk_errorf(VK_ERROR_UNKNOWN, "gem wait failed: %m");
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ "gem wait failed: %m");
+ }
}
return VK_SUCCESS;
const VkSemaphoreCreateInfo* pCreateInfo,
VkSemaphore* pSemaphore)
{
- stub_return(VK_UNSUPPORTED);
+ *pSemaphore = (VkSemaphore)1;
+ stub_return(VK_SUCCESS);
}
void anv_DestroySemaphore(
void
anv_fill_buffer_surface_state(struct anv_device *device, void *state,
const struct anv_format *format,
- uint32_t offset, uint32_t range)
+ uint32_t offset, uint32_t range, uint32_t stride)
{
switch (device->info.gen) {
case 7:
- gen7_fill_buffer_surface_state(state, format, offset, range);
+ if (device->info.is_haswell)
+ gen75_fill_buffer_surface_state(state, format, offset, range, stride);
+ else
+ gen7_fill_buffer_surface_state(state, format, offset, range, stride);
break;
case 8:
- gen8_fill_buffer_surface_state(state, format, offset, range);
+ gen8_fill_buffer_surface_state(state, format, offset, range, stride);
+ break;
+ case 9:
+ gen9_fill_buffer_surface_state(state, format, offset, range, stride);
break;
default:
unreachable("unsupported gen\n");
}
}
-VkResult
-anv_buffer_view_create(
- struct anv_device * device,
- const VkBufferViewCreateInfo* pCreateInfo,
- struct anv_buffer_view ** bview_out)
+VkResult anv_CreateBufferView(
+ VkDevice _device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ VkBufferView* pView)
{
- ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
- struct anv_buffer_view *bview;
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
-
- bview = anv_device_alloc(device, sizeof(*bview), 8,
- VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
- if (bview == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- *bview = (struct anv_buffer_view) {
- .bo = buffer->bo,
- .offset = buffer->offset + pCreateInfo->offset,
- .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
- .format = anv_format_for_vk_format(pCreateInfo->format),
- .range = pCreateInfo->range,
- };
-
- *bview_out = bview;
-
- return VK_SUCCESS;
+ stub_return(VK_UNSUPPORTED);
}
void anv_DestroyBufferView(
VkDevice _device,
VkBufferView _bview)
{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
-
- anv_state_pool_free(&device->surface_state_pool, bview->surface_state);
- anv_device_free(device, bview);
+ stub();
}
void anv_DestroySampler(
anv_device_free(device, sampler);
}
-// Descriptor set functions
-
-VkResult anv_CreateDescriptorSetLayout(
- VkDevice _device,
- const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- VkDescriptorSetLayout* pSetLayout)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_descriptor_set_layout *set_layout;
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
-
- uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
- uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
- uint32_t num_dynamic_buffers = 0;
- uint32_t count = 0;
- VkShaderStageFlags stages = 0;
- uint32_t s;
-
- for (uint32_t i = 0; i < pCreateInfo->count; i++) {
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
- sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
- break;
- default:
- break;
- }
-
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
- surface_count[s] += pCreateInfo->pBinding[i].arraySize;
- break;
- default:
- break;
- }
-
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
- break;
- default:
- break;
- }
-
- stages |= pCreateInfo->pBinding[i].stageFlags;
- count += pCreateInfo->pBinding[i].arraySize;
- }
-
- uint32_t sampler_total = 0;
- uint32_t surface_total = 0;
- for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
- sampler_total += sampler_count[s];
- surface_total += surface_count[s];
- }
-
- size_t size = sizeof(*set_layout) +
- (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
- set_layout = anv_device_alloc(device, size, 8,
- VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
- if (!set_layout)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- set_layout->num_dynamic_buffers = num_dynamic_buffers;
- set_layout->count = count;
- set_layout->shader_stages = stages;
-
- struct anv_descriptor_slot *p = set_layout->entries;
- struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
- struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
- for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
- set_layout->stage[s].surface_count = surface_count[s];
- set_layout->stage[s].surface_start = surface[s] = p;
- p += surface_count[s];
- set_layout->stage[s].sampler_count = sampler_count[s];
- set_layout->stage[s].sampler_start = sampler[s] = p;
- p += sampler_count[s];
- }
-
- uint32_t descriptor = 0;
- int8_t dynamic_slot = 0;
- bool is_dynamic;
- for (uint32_t i = 0; i < pCreateInfo->count; i++) {
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
- for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
- sampler[s]->index = descriptor + j;
- sampler[s]->dynamic_slot = -1;
- sampler[s]++;
- }
- break;
- default:
- break;
- }
-
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- is_dynamic = true;
- break;
- default:
- is_dynamic = false;
- break;
- }
-
- switch (pCreateInfo->pBinding[i].descriptorType) {
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
- for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
- surface[s]->index = descriptor + j;
- if (is_dynamic)
- surface[s]->dynamic_slot = dynamic_slot + j;
- else
- surface[s]->dynamic_slot = -1;
- surface[s]++;
- }
- break;
- default:
- break;
- }
-
- if (is_dynamic)
- dynamic_slot += pCreateInfo->pBinding[i].arraySize;
-
- descriptor += pCreateInfo->pBinding[i].arraySize;
- }
-
- *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
-
- return VK_SUCCESS;
-}
-
-void anv_DestroyDescriptorSetLayout(
- VkDevice _device,
- VkDescriptorSetLayout _set_layout)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
-
- anv_device_free(device, set_layout);
-}
-
-VkResult anv_CreateDescriptorPool(
- VkDevice device,
- VkDescriptorPoolUsage poolUsage,
- uint32_t maxSets,
- const VkDescriptorPoolCreateInfo* pCreateInfo,
- VkDescriptorPool* pDescriptorPool)
-{
- anv_finishme("VkDescriptorPool is a stub");
- pDescriptorPool->handle = 1;
- return VK_SUCCESS;
-}
-
-void anv_DestroyDescriptorPool(
- VkDevice _device,
- VkDescriptorPool _pool)
-{
- anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
-}
-
-VkResult anv_ResetDescriptorPool(
- VkDevice device,
- VkDescriptorPool descriptorPool)
-{
- anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
- return VK_SUCCESS;
-}
-
-VkResult
-anv_descriptor_set_create(struct anv_device *device,
- const struct anv_descriptor_set_layout *layout,
- struct anv_descriptor_set **out_set)
-{
- struct anv_descriptor_set *set;
- size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
-
- set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
- if (!set)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- /* A descriptor set may not be 100% filled. Clear the set so we can can
- * later detect holes in it.
- */
- memset(set, 0, size);
-
- *out_set = set;
-
- return VK_SUCCESS;
-}
-
-void
-anv_descriptor_set_destroy(struct anv_device *device,
- struct anv_descriptor_set *set)
-{
- anv_device_free(device, set);
-}
-
-VkResult anv_AllocDescriptorSets(
- VkDevice _device,
- VkDescriptorPool descriptorPool,
- VkDescriptorSetUsage setUsage,
- uint32_t count,
- const VkDescriptorSetLayout* pSetLayouts,
- VkDescriptorSet* pDescriptorSets)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
-
- VkResult result = VK_SUCCESS;
- struct anv_descriptor_set *set;
- uint32_t i;
-
- for (i = 0; i < count; i++) {
- ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
-
- result = anv_descriptor_set_create(device, layout, &set);
- if (result != VK_SUCCESS)
- break;
-
- pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
- }
-
- if (result != VK_SUCCESS)
- anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets);
-
- return result;
-}
-
-VkResult anv_FreeDescriptorSets(
- VkDevice _device,
- VkDescriptorPool descriptorPool,
- uint32_t count,
- const VkDescriptorSet* pDescriptorSets)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
-
- for (uint32_t i = 0; i < count; i++) {
- ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
-
- anv_descriptor_set_destroy(device, set);
- }
-
- return VK_SUCCESS;
-}
-
-void anv_UpdateDescriptorSets(
- VkDevice device,
- uint32_t writeCount,
- const VkWriteDescriptorSet* pDescriptorWrites,
- uint32_t copyCount,
- const VkCopyDescriptorSet* pDescriptorCopies)
-{
- for (uint32_t i = 0; i < writeCount; i++) {
- const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
- ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
-
- switch (write->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- for (uint32_t j = 0; j < write->count; j++) {
- ANV_FROM_HANDLE(anv_sampler, sampler,
- write->pDescriptors[j].sampler);
-
- set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
- .type = ANV_DESCRIPTOR_TYPE_SAMPLER,
- .sampler = sampler,
- };
- }
-
- if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
- break;
-
- /* fallthrough */
-
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- for (uint32_t j = 0; j < write->count; j++) {
- ANV_FROM_HANDLE(anv_image_view, iview,
- write->pDescriptors[j].imageView);
-
- set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
- .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
- .image_view = iview,
- };
- }
- break;
-
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- anv_finishme("texel buffers not implemented");
- break;
-
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- anv_finishme("input attachments not implemented");
- break;
-
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- for (uint32_t j = 0; j < write->count; j++) {
- ANV_FROM_HANDLE(anv_buffer_view, bview,
- write->pDescriptors[j].bufferView);
-
- set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
- .type = ANV_DESCRIPTOR_TYPE_BUFFER_VIEW,
- .buffer_view = bview,
- };
- }
-
- default:
- break;
- }
- }
-
- for (uint32_t i = 0; i < copyCount; i++) {
- const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
- ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
- ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
- for (uint32_t j = 0; j < copy->count; j++) {
- dest->descriptors[copy->destBinding + j] =
- src->descriptors[copy->srcBinding + j];
- }
- }
-}
-
VkResult anv_CreateFramebuffer(
VkDevice _device,
const VkFramebufferCreateInfo* pCreateInfo,
anv_device_free(device, fb);
}
-VkResult anv_CreateRenderPass(
- VkDevice _device,
- const VkRenderPassCreateInfo* pCreateInfo,
- VkRenderPass* pRenderPass)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_render_pass *pass;
- size_t size;
- size_t attachments_offset;
-
- assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
-
- size = sizeof(*pass);
- size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
- attachments_offset = size;
- size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
-
- pass = anv_device_alloc(device, size, 8,
- VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
- if (pass == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
- /* Clear the subpasses along with the parent pass. This required because
- * each array member of anv_subpass must be a valid pointer if not NULL.
- */
- memset(pass, 0, size);
- pass->attachment_count = pCreateInfo->attachmentCount;
- pass->subpass_count = pCreateInfo->subpassCount;
- pass->attachments = (void *) pass + attachments_offset;
-
- for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
- struct anv_render_pass_attachment *att = &pass->attachments[i];
-
- att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
- att->samples = pCreateInfo->pAttachments[i].samples;
- att->load_op = pCreateInfo->pAttachments[i].loadOp;
- att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
- // att->store_op = pCreateInfo->pAttachments[i].storeOp;
- // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
-
- if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- if (anv_format_is_color(att->format)) {
- ++pass->num_color_clear_attachments;
- } else if (att->format->depth_format) {
- pass->has_depth_clear_attachment = true;
- }
- } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- assert(att->format->has_stencil);
- pass->has_stencil_clear_attachment = true;
- }
- }
-
- for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
- const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
- struct anv_subpass *subpass = &pass->subpasses[i];
-
- subpass->input_count = desc->inputCount;
- subpass->color_count = desc->colorCount;
-
- if (desc->inputCount > 0) {
- subpass->input_attachments =
- anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
- 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-
- for (uint32_t j = 0; j < desc->inputCount; j++) {
- subpass->input_attachments[j]
- = desc->pInputAttachments[j].attachment;
- }
- }
-
- if (desc->colorCount > 0) {
- subpass->color_attachments =
- anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
- 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-
- for (uint32_t j = 0; j < desc->colorCount; j++) {
- subpass->color_attachments[j]
- = desc->pColorAttachments[j].attachment;
- }
- }
-
- if (desc->pResolveAttachments) {
- subpass->resolve_attachments =
- anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
- 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
-
- for (uint32_t j = 0; j < desc->colorCount; j++) {
- subpass->resolve_attachments[j]
- = desc->pResolveAttachments[j].attachment;
- }
- }
-
- subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
- }
-
- *pRenderPass = anv_render_pass_to_handle(pass);
-
- return VK_SUCCESS;
-}
-
-void anv_DestroyRenderPass(
- VkDevice _device,
- VkRenderPass _pass)
-{
- ANV_FROM_HANDLE(anv_device, device, _device);
- ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
-
- for (uint32_t i = 0; i < pass->subpass_count; i++) {
- /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
- * Don't free the null arrays.
- */
- struct anv_subpass *subpass = &pass->subpasses[i];
-
- anv_device_free(device, subpass->input_attachments);
- anv_device_free(device, subpass->color_attachments);
- anv_device_free(device, subpass->resolve_attachments);
- }
-
- anv_device_free(device, pass);
-}
-
-VkResult anv_GetRenderAreaGranularity(
- VkDevice device,
- VkRenderPass renderPass,
- VkExtent2D* pGranularity)
-{
- *pGranularity = (VkExtent2D) { 1, 1 };
-
- return VK_SUCCESS;
-}
-
void vkCmdDbgMarkerBegin(
VkCmdBuffer cmdBuffer,
const char* pMarker)