X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fvulkan%2Fanv_device.c;h=aca082ac1b51b26e90896afdcdd91242c065c052;hb=3db43e8f3e60f8dc746eb4ab2e86f6b1b32d248a;hp=05e723fe60d6ee0b382c7c1865c71efe5abf8fa3;hpb=c284c39b135821a9417b95319fa6726e5892bef9;p=mesa.git diff --git a/src/vulkan/anv_device.c b/src/vulkan/anv_device.c index 05e723fe60d..aca082ac1b5 100644 --- a/src/vulkan/anv_device.c +++ b/src/vulkan/anv_device.c @@ -31,6 +31,8 @@ #include "mesa/main/git_sha1.h" #include "util/strtod.h" +#include "gen7_pack.h" + struct anv_dispatch_table dtable; static void @@ -74,13 +76,27 @@ anv_physical_device_init(struct anv_physical_device *device, } device->name = brw_get_device_name(device->chipset_id); - device->info = brw_get_device_info(device->chipset_id, -1); + device->info = brw_get_device_info(device->chipset_id); if (!device->info) { result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED, "failed to get device info"); goto fail; } - + + if (device->info->is_haswell) { + fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n"); + } else if (device->info->gen == 7 && !device->info->is_baytrail) { + fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n"); + } else if (device->info->gen == 9) { + fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n"); + } else if (device->info->gen == 8 && !device->info->is_cherryview) { + /* Broadwell is as fully supported as anything */ + } else { + result = vk_errorf(VK_UNSUPPORTED, + "Vulkan not yet supported on %s", device->name); + goto fail; + } + if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) { result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED, "failed to get aperture size: %m"); @@ -104,7 +120,7 @@ anv_physical_device_init(struct anv_physical_device *device, "non-llc gpu"); goto fail; } - + close(fd); brw_process_intel_debug_variable(); @@ -117,8 +133,10 @@ anv_physical_device_init(struct anv_physical_device *device, device->compiler->shader_debug_log = compiler_debug_log; device->compiler->shader_perf_log = compiler_perf_log; + isl_device_init(&device->isl_dev, device->info); + return VK_SUCCESS; - + fail: close(fd); return result; @@ -206,7 +224,7 @@ VkResult anv_CreateInstance( instance->pfnAlloc = alloc_callbacks->pfnAlloc; instance->pfnFree = alloc_callbacks->pfnFree; instance->apiVersion = pCreateInfo->pAppInfo->apiVersion; - instance->physicalDeviceCount = 0; + instance->physicalDeviceCount = -1; _mesa_locale_init(); @@ -271,13 +289,16 @@ VkResult anv_EnumeratePhysicalDevices( ANV_FROM_HANDLE(anv_instance, instance, _instance); VkResult result; - if (instance->physicalDeviceCount == 0) { + if (instance->physicalDeviceCount < 0) { result = anv_physical_device_init(&instance->physicalDevice, instance, "/dev/dri/renderD128"); - if (result != VK_SUCCESS) + if (result == VK_UNSUPPORTED) { + instance->physicalDeviceCount = 0; + } else if (result == VK_SUCCESS) { + instance->physicalDeviceCount = 1; + } else { return result; - - instance->physicalDeviceCount = 1; + } } /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL; @@ -619,7 +640,7 @@ VkResult anv_CreateDevice( return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT); } - anv_set_dispatch_gen(physical_device->info->gen); + anv_set_dispatch_devinfo(physical_device->info); device = anv_instance_alloc(instance, sizeof(*device), 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT); @@ -633,7 +654,7 @@ VkResult anv_CreateDevice( device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC); if (device->fd == -1) goto fail_device; - + device->context_id = anv_gem_create_context(device); if (device->context_id == -1) goto fail_fd; @@ -647,15 +668,18 @@ VkResult anv_CreateDevice( anv_state_pool_init(&device->dynamic_state_pool, &device->dynamic_state_block_pool); - anv_block_pool_init(&device->instruction_block_pool, device, 2048); + anv_block_pool_init(&device->instruction_block_pool, device, 4096); anv_block_pool_init(&device->surface_state_block_pool, device, 4096); anv_state_pool_init(&device->surface_state_pool, &device->surface_state_block_pool); + anv_bo_init_new(&device->workaround_bo, device, 1024); + anv_block_pool_init(&device->scratch_block_pool, device, 0x10000); device->info = *physical_device->info; + device->isl_dev = physical_device->isl_dev; anv_queue_init(device, &device->queue); @@ -691,6 +715,9 @@ void anv_DestroyDevice( anv_state_pool_free(&device->dynamic_state_pool, device->border_colors); #endif + anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size); + anv_gem_close(device, device->workaround_bo.gem_handle); + anv_bo_pool_finish(&device->batch_bo_pool); anv_state_pool_finish(&device->dynamic_state_pool); anv_block_pool_finish(&device->dynamic_state_block_pool); @@ -1001,7 +1028,7 @@ VkResult anv_MapMemory( mem->map_size = size; *ppData = mem->map; - + return VK_SUCCESS; } @@ -1272,14 +1299,24 @@ VkResult anv_WaitForFences( uint64_t timeout) { ANV_FROM_HANDLE(anv_device, device, _device); + + /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed + * to block indefinitely timeouts <= 0. Unfortunately, this was broken + * for a couple of kernel releases. Since there's no way to know + * whether or not the kernel we're using is one of the broken ones, the + * best we can do is to clamp the timeout to INT64_MAX. This limits the + * maximum timeout from 584 years to 292 years - likely not a big deal. + */ + if (timeout > INT64_MAX) + timeout = INT64_MAX; + int64_t t = timeout; - int ret; /* FIXME: handle !waitAll */ for (uint32_t i = 0; i < fenceCount; i++) { ANV_FROM_HANDLE(anv_fence, fence, pFences[i]); - ret = anv_gem_wait(device, fence->bo.gem_handle, &t); + int ret = anv_gem_wait(device, fence->bo.gem_handle, &t); if (ret == -1 && errno == ETIME) { return VK_TIMEOUT; } else if (ret == -1) { @@ -1299,7 +1336,7 @@ VkResult anv_CreateSemaphore( const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore) { - pSemaphore->handle = 1; + *pSemaphore = (VkSemaphore)1; stub_return(VK_SUCCESS); } @@ -1401,58 +1438,39 @@ void anv_DestroyBuffer( void anv_fill_buffer_surface_state(struct anv_device *device, void *state, const struct anv_format *format, - uint32_t offset, uint32_t range) + uint32_t offset, uint32_t range, uint32_t stride) { switch (device->info.gen) { case 7: - gen7_fill_buffer_surface_state(state, format, offset, range); + if (device->info.is_haswell) + gen75_fill_buffer_surface_state(state, format, offset, range, stride); + else + gen7_fill_buffer_surface_state(state, format, offset, range, stride); break; case 8: - gen8_fill_buffer_surface_state(state, format, offset, range); + gen8_fill_buffer_surface_state(state, format, offset, range, stride); + break; + case 9: + gen9_fill_buffer_surface_state(state, format, offset, range, stride); break; default: unreachable("unsupported gen\n"); } } -VkResult -anv_buffer_view_create( - struct anv_device * device, - const VkBufferViewCreateInfo* pCreateInfo, - struct anv_buffer_view ** bview_out) +VkResult anv_CreateBufferView( + VkDevice _device, + const VkBufferViewCreateInfo* pCreateInfo, + VkBufferView* pView) { - ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer); - struct anv_buffer_view *bview; - - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO); - - bview = anv_device_alloc(device, sizeof(*bview), 8, - VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - if (bview == NULL) - return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - - *bview = (struct anv_buffer_view) { - .bo = buffer->bo, - .offset = buffer->offset + pCreateInfo->offset, - .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64), - .format = anv_format_for_vk_format(pCreateInfo->format), - .range = pCreateInfo->range, - }; - - *bview_out = bview; - - return VK_SUCCESS; + stub_return(VK_UNSUPPORTED); } void anv_DestroyBufferView( VkDevice _device, VkBufferView _bview) { - ANV_FROM_HANDLE(anv_device, device, _device); - ANV_FROM_HANDLE(anv_buffer_view, bview, _bview); - - anv_state_pool_free(&device->surface_state_pool, bview->surface_state); - anv_device_free(device, bview); + stub(); } void anv_DestroySampler( @@ -1465,353 +1483,6 @@ void anv_DestroySampler( anv_device_free(device, sampler); } -// Descriptor set functions - -VkResult anv_CreateDescriptorSetLayout( - VkDevice _device, - const VkDescriptorSetLayoutCreateInfo* pCreateInfo, - VkDescriptorSetLayout* pSetLayout) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - struct anv_descriptor_set_layout *set_layout; - uint32_t s; - - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO); - - uint32_t immutable_sampler_count = 0; - for (uint32_t b = 0; b < pCreateInfo->count; b++) { - if (pCreateInfo->pBinding[b].pImmutableSamplers) - immutable_sampler_count += pCreateInfo->pBinding[b].arraySize; - } - - size_t size = sizeof(struct anv_descriptor_set_layout) + - pCreateInfo->count * sizeof(set_layout->binding[0]) + - immutable_sampler_count * sizeof(struct anv_sampler *); - - set_layout = anv_device_alloc(device, size, 8, - VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - if (!set_layout) - return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - - /* We just allocate all the samplers at the end of the struct */ - struct anv_sampler **samplers = - (struct anv_sampler **)&set_layout->binding[pCreateInfo->count]; - - set_layout->binding_count = pCreateInfo->count; - set_layout->shader_stages = 0; - set_layout->size = 0; - - /* Initialize all binding_layout entries to -1 */ - memset(set_layout->binding, -1, - pCreateInfo->count * sizeof(set_layout->binding[0])); - - /* Initialize all samplers to 0 */ - memset(samplers, 0, immutable_sampler_count * sizeof(*samplers)); - - uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, }; - uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, }; - uint32_t dynamic_offset_count = 0; - - for (uint32_t b = 0; b < pCreateInfo->count; b++) { - uint32_t array_size = MAX2(1, pCreateInfo->pBinding[b].arraySize); - set_layout->binding[b].array_size = array_size; - set_layout->size += array_size; - - switch (pCreateInfo->pBinding[b].descriptorType) { - case VK_DESCRIPTOR_TYPE_SAMPLER: - case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: - for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) { - set_layout->binding[b].stage[s].sampler_index = sampler_count[s]; - sampler_count[s] += array_size; - } - break; - default: - break; - } - - switch (pCreateInfo->pBinding[b].descriptorType) { - case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: - case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: - case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: - for_each_bit(s, pCreateInfo->pBinding[b].stageFlags) { - set_layout->binding[b].stage[s].surface_index = surface_count[s]; - surface_count[s] += array_size; - } - break; - default: - break; - } - - switch (pCreateInfo->pBinding[b].descriptorType) { - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: - set_layout->binding[b].dynamic_offset_index = dynamic_offset_count; - dynamic_offset_count += array_size; - break; - default: - break; - } - - if (pCreateInfo->pBinding[b].pImmutableSamplers) { - set_layout->binding[b].immutable_samplers = samplers; - samplers += array_size; - - for (uint32_t i = 0; i < array_size; i++) - set_layout->binding[b].immutable_samplers[i] = - anv_sampler_from_handle(pCreateInfo->pBinding[b].pImmutableSamplers[i]); - } else { - set_layout->binding[b].immutable_samplers = NULL; - } - - set_layout->shader_stages |= pCreateInfo->pBinding[b].stageFlags; - } - - set_layout->dynamic_offset_count = dynamic_offset_count; - - *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout); - - return VK_SUCCESS; -} - -void anv_DestroyDescriptorSetLayout( - VkDevice _device, - VkDescriptorSetLayout _set_layout) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout); - - anv_device_free(device, set_layout); -} - -VkResult anv_CreateDescriptorPool( - VkDevice device, - const VkDescriptorPoolCreateInfo* pCreateInfo, - VkDescriptorPool* pDescriptorPool) -{ - anv_finishme("VkDescriptorPool is a stub"); - pDescriptorPool->handle = 1; - return VK_SUCCESS; -} - -void anv_DestroyDescriptorPool( - VkDevice _device, - VkDescriptorPool _pool) -{ - anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets"); -} - -VkResult anv_ResetDescriptorPool( - VkDevice device, - VkDescriptorPool descriptorPool) -{ - anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets"); - return VK_SUCCESS; -} - -VkResult -anv_descriptor_set_create(struct anv_device *device, - const struct anv_descriptor_set_layout *layout, - struct anv_descriptor_set **out_set) -{ - struct anv_descriptor_set *set; - size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]); - - set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - if (!set) - return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - - /* A descriptor set may not be 100% filled. Clear the set so we can can - * later detect holes in it. - */ - memset(set, 0, size); - - /* Go through and fill out immutable samplers if we have any */ - struct anv_descriptor *desc = set->descriptors; - for (uint32_t b = 0; b < layout->binding_count; b++) { - if (layout->binding[b].immutable_samplers) { - for (uint32_t i = 0; i < layout->binding[b].array_size; i++) - desc[i].sampler = layout->binding[b].immutable_samplers[i]; - } - desc += layout->binding[b].array_size; - } - - *out_set = set; - - return VK_SUCCESS; -} - -void -anv_descriptor_set_destroy(struct anv_device *device, - struct anv_descriptor_set *set) -{ - anv_device_free(device, set); -} - -VkResult anv_AllocDescriptorSets( - VkDevice _device, - VkDescriptorPool descriptorPool, - VkDescriptorSetUsage setUsage, - uint32_t count, - const VkDescriptorSetLayout* pSetLayouts, - VkDescriptorSet* pDescriptorSets) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - - VkResult result = VK_SUCCESS; - struct anv_descriptor_set *set; - uint32_t i; - - for (i = 0; i < count; i++) { - ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]); - - result = anv_descriptor_set_create(device, layout, &set); - if (result != VK_SUCCESS) - break; - - pDescriptorSets[i] = anv_descriptor_set_to_handle(set); - } - - if (result != VK_SUCCESS) - anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets); - - return result; -} - -VkResult anv_FreeDescriptorSets( - VkDevice _device, - VkDescriptorPool descriptorPool, - uint32_t count, - const VkDescriptorSet* pDescriptorSets) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - - for (uint32_t i = 0; i < count; i++) { - ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]); - - anv_descriptor_set_destroy(device, set); - } - - return VK_SUCCESS; -} - -void anv_UpdateDescriptorSets( - VkDevice device, - uint32_t writeCount, - const VkWriteDescriptorSet* pDescriptorWrites, - uint32_t copyCount, - const VkCopyDescriptorSet* pDescriptorCopies) -{ - for (uint32_t i = 0; i < writeCount; i++) { - const VkWriteDescriptorSet *write = &pDescriptorWrites[i]; - ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet); - - switch (write->descriptorType) { - case VK_DESCRIPTOR_TYPE_SAMPLER: - for (uint32_t j = 0; j < write->count; j++) { - ANV_FROM_HANDLE(anv_sampler, sampler, - write->pDescriptors[j].sampler); - - set->descriptors[write->destBinding + j] = (struct anv_descriptor) { - .type = ANV_DESCRIPTOR_TYPE_SAMPLER, - .sampler = sampler, - }; - } - break; - - case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: - for (uint32_t j = 0; j < write->count; j++) { - struct anv_descriptor *desc = - &set->descriptors[write->destBinding + j]; - ANV_FROM_HANDLE(anv_image_view, iview, - write->pDescriptors[j].imageView); - ANV_FROM_HANDLE(anv_sampler, sampler, - write->pDescriptors[j].sampler); - - desc->type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER; - desc->image_view = iview; - - /* If this descriptor has an immutable sampler, we don't want - * to stomp on it. - */ - if (sampler) - desc->sampler = sampler; - } - break; - - case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: - case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - for (uint32_t j = 0; j < write->count; j++) { - ANV_FROM_HANDLE(anv_image_view, iview, - write->pDescriptors[j].imageView); - - set->descriptors[write->destBinding + j] = (struct anv_descriptor) { - .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW, - .image_view = iview, - }; - } - break; - - case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - anv_finishme("texel buffers not implemented"); - break; - - case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: - anv_finishme("input attachments not implemented"); - break; - - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: - case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: - case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: - for (uint32_t j = 0; j < write->count; j++) { - if (write->pDescriptors[j].bufferView.handle) { - ANV_FROM_HANDLE(anv_buffer_view, bview, - write->pDescriptors[j].bufferView); - - set->descriptors[write->destBinding + j] = - (struct anv_descriptor) { - .type = ANV_DESCRIPTOR_TYPE_BUFFER_VIEW, - .buffer_view = bview, - }; - } else { - ANV_FROM_HANDLE(anv_buffer, buffer, - write->pDescriptors[j].bufferInfo.buffer); - assert(buffer); - - set->descriptors[write->destBinding + j] = - (struct anv_descriptor) { - .type = ANV_DESCRIPTOR_TYPE_BUFFER_AND_OFFSET, - .buffer = buffer, - .offset = write->pDescriptors[j].bufferInfo.offset, - .range = write->pDescriptors[j].bufferInfo.range, - }; - } - } - - default: - break; - } - } - - for (uint32_t i = 0; i < copyCount; i++) { - const VkCopyDescriptorSet *copy = &pDescriptorCopies[i]; - ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet); - ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet); - for (uint32_t j = 0; j < copy->count; j++) { - dest->descriptors[copy->destBinding + j] = - src->descriptors[copy->srcBinding + j]; - } - } -} - VkResult anv_CreateFramebuffer( VkDevice _device, const VkFramebufferCreateInfo* pCreateInfo, @@ -1854,143 +1525,6 @@ void anv_DestroyFramebuffer( anv_device_free(device, fb); } -VkResult anv_CreateRenderPass( - VkDevice _device, - const VkRenderPassCreateInfo* pCreateInfo, - VkRenderPass* pRenderPass) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - struct anv_render_pass *pass; - size_t size; - size_t attachments_offset; - - assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO); - - size = sizeof(*pass); - size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]); - attachments_offset = size; - size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]); - - pass = anv_device_alloc(device, size, 8, - VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - if (pass == NULL) - return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - - /* Clear the subpasses along with the parent pass. This required because - * each array member of anv_subpass must be a valid pointer if not NULL. - */ - memset(pass, 0, size); - pass->attachment_count = pCreateInfo->attachmentCount; - pass->subpass_count = pCreateInfo->subpassCount; - pass->attachments = (void *) pass + attachments_offset; - - for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) { - struct anv_render_pass_attachment *att = &pass->attachments[i]; - - att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format); - att->samples = pCreateInfo->pAttachments[i].samples; - att->load_op = pCreateInfo->pAttachments[i].loadOp; - att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp; - // att->store_op = pCreateInfo->pAttachments[i].storeOp; - // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp; - - if (anv_format_is_color(att->format)) { - if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { - ++pass->num_color_clear_attachments; - } - } else { - if (att->format->depth_format && - att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { - pass->has_depth_clear_attachment = true; - } - - if (att->format->has_stencil && - att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { - assert(att->format->has_stencil); - pass->has_stencil_clear_attachment = true; - } - } - } - - for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { - const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i]; - struct anv_subpass *subpass = &pass->subpasses[i]; - - subpass->input_count = desc->inputCount; - subpass->color_count = desc->colorCount; - - if (desc->inputCount > 0) { - subpass->input_attachments = - anv_device_alloc(device, desc->inputCount * sizeof(uint32_t), - 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - - for (uint32_t j = 0; j < desc->inputCount; j++) { - subpass->input_attachments[j] - = desc->pInputAttachments[j].attachment; - } - } - - if (desc->colorCount > 0) { - subpass->color_attachments = - anv_device_alloc(device, desc->colorCount * sizeof(uint32_t), - 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - - for (uint32_t j = 0; j < desc->colorCount; j++) { - subpass->color_attachments[j] - = desc->pColorAttachments[j].attachment; - } - } - - if (desc->pResolveAttachments) { - subpass->resolve_attachments = - anv_device_alloc(device, desc->colorCount * sizeof(uint32_t), - 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT); - - for (uint32_t j = 0; j < desc->colorCount; j++) { - subpass->resolve_attachments[j] - = desc->pResolveAttachments[j].attachment; - } - } - - subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment; - } - - *pRenderPass = anv_render_pass_to_handle(pass); - - return VK_SUCCESS; -} - -void anv_DestroyRenderPass( - VkDevice _device, - VkRenderPass _pass) -{ - ANV_FROM_HANDLE(anv_device, device, _device); - ANV_FROM_HANDLE(anv_render_pass, pass, _pass); - - for (uint32_t i = 0; i < pass->subpass_count; i++) { - /* In VkSubpassCreateInfo, each of the attachment arrays may be null. - * Don't free the null arrays. - */ - struct anv_subpass *subpass = &pass->subpasses[i]; - - anv_device_free(device, subpass->input_attachments); - anv_device_free(device, subpass->color_attachments); - anv_device_free(device, subpass->resolve_attachments); - } - - anv_device_free(device, pass); -} - -VkResult anv_GetRenderAreaGranularity( - VkDevice device, - VkRenderPass renderPass, - VkExtent2D* pGranularity) -{ - *pGranularity = (VkExtent2D) { 1, 1 }; - - return VK_SUCCESS; -} - void vkCmdDbgMarkerBegin( VkCmdBuffer cmdBuffer, const char* pMarker)