From fd817291c7f87985d9ef9015cc086d1b5fd86825 Mon Sep 17 00:00:00 2001 From: Jason Ekstrand Date: Thu, 6 Feb 2020 21:18:59 -0600 Subject: [PATCH] anv: Handle NULL descriptors Reviewed-by: Caio Marcelo de Oliveira Filho Reviewed-by: Lionel Landwerlin Part-of: --- src/intel/vulkan/anv_cmd_buffer.c | 2 - src/intel/vulkan/anv_descriptor_set.c | 32 +++-- src/intel/vulkan/anv_device.c | 12 ++ src/intel/vulkan/anv_private.h | 10 ++ src/intel/vulkan/genX_cmd_buffer.c | 167 ++++++++++++++++---------- 5 files changed, 150 insertions(+), 73 deletions(-) diff --git a/src/intel/vulkan/anv_cmd_buffer.c b/src/intel/vulkan/anv_cmd_buffer.c index 8f94715c0d0..f4921ba551e 100644 --- a/src/intel/vulkan/anv_cmd_buffer.c +++ b/src/intel/vulkan/anv_cmd_buffer.c @@ -1112,9 +1112,7 @@ void anv_CmdPushDescriptorSetKHR( case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: for (uint32_t j = 0; j < write->descriptorCount; j++) { - assert(write->pBufferInfo[j].buffer); ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer); - assert(buffer); anv_descriptor_set_write_buffer(cmd_buffer->device, set, &cmd_buffer->surface_state_stream, diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c index b9d7eea86eb..2e101fd1fdc 100644 --- a/src/intel/vulkan/anv_descriptor_set.c +++ b/src/intel/vulkan/anv_descriptor_set.c @@ -1166,6 +1166,7 @@ anv_descriptor_set_write_image_view(struct anv_device *device, void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + element * anv_descriptor_size(bind_layout); + memset(desc_map, 0, anv_descriptor_size(bind_layout)); if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) { struct anv_sampled_image_descriptor desc_data[3]; @@ -1194,6 +1195,9 @@ anv_descriptor_set_write_image_view(struct anv_device *device, MAX2(1, bind_layout->max_plane_count) * sizeof(desc_data[0])); } + if (image_view == NULL) + return; + if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) { assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM)); assert(image_view->n_planes == 1); @@ -1215,7 +1219,7 @@ anv_descriptor_set_write_image_view(struct anv_device *device, anv_descriptor_set_write_image_param(desc_map, image_param); } - if (image_view && (bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE)) { + if (bind_layout->data & ANV_DESCRIPTOR_TEXTURE_SWIZZLE) { assert(!(bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE)); assert(image_view); struct anv_texture_swizzle_descriptor desc_data[3]; @@ -1251,14 +1255,20 @@ anv_descriptor_set_write_buffer_view(struct anv_device *device, assert(type == bind_layout->type); + void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + + element * anv_descriptor_size(bind_layout); + + if (buffer_view == NULL) { + *desc = (struct anv_descriptor) { .type = type, }; + memset(desc_map, 0, anv_descriptor_size(bind_layout)); + return; + } + *desc = (struct anv_descriptor) { .type = type, .buffer_view = buffer_view, }; - void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + - element * anv_descriptor_size(bind_layout); - if (bind_layout->data & ANV_DESCRIPTOR_SAMPLED_IMAGE) { struct anv_sampled_image_descriptor desc_data = { .image = anv_surface_state_to_handle(buffer_view->surface_state), @@ -1301,6 +1311,15 @@ anv_descriptor_set_write_buffer(struct anv_device *device, assert(type == bind_layout->type); + void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + + element * anv_descriptor_size(bind_layout); + + if (buffer == NULL) { + *desc = (struct anv_descriptor) { .type = type, }; + memset(desc_map, 0, anv_descriptor_size(bind_layout)); + return; + } + struct anv_address bind_addr = anv_address_add(buffer->address, offset); uint64_t bind_range = anv_buffer_get_range(buffer, offset, range); @@ -1344,9 +1363,6 @@ anv_descriptor_set_write_buffer(struct anv_device *device, }; } - void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset + - element * anv_descriptor_size(bind_layout); - if (bind_layout->data & ANV_DESCRIPTOR_ADDRESS_RANGE) { struct anv_address_range_descriptor desc_data = { .address = anv_address_physical(bind_addr), @@ -1421,9 +1437,7 @@ void anv_UpdateDescriptorSets( case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: for (uint32_t j = 0; j < write->descriptorCount; j++) { - assert(write->pBufferInfo[j].buffer); ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer); - assert(buffer); anv_descriptor_set_write_buffer(device, set, NULL, diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index c5c56fbc20f..82b73bc0595 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -2890,6 +2890,18 @@ VkResult anv_CreateDevice( if (result != VK_SUCCESS) goto fail_workaround_bo; + /* Allocate a null surface state at surface state offset 0. This makes + * NULL descriptor handling trivial because we can just memset structures + * to zero and they have a valid descriptor. + */ + device->null_surface_state = + anv_state_pool_alloc(&device->surface_state_pool, + device->isl_dev.ss.size, + device->isl_dev.ss.align); + isl_null_fill_state(&device->isl_dev, device->null_surface_state.map, + isl_extent3d(1, 1, 1) /* This shouldn't matter */); + assert(device->null_surface_state.offset == 0); + if (device->info.gen >= 10) { result = anv_device_init_hiz_clear_value_bo(device); if (result != VK_SUCCESS) diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 352d4b8e249..ed851f5aacf 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -1292,9 +1292,19 @@ struct anv_device { struct anv_state_pool binding_table_pool; struct anv_state_pool surface_state_pool; + /** BO used for various workarounds + * + * There are a number of workarounds on our hardware which require writing + * data somewhere and it doesn't really matter where. For that, we use + * this BO and just write to the first dword or so. + * + * We also need to be able to handle NULL buffers bound as pushed UBOs. + * For that, we use the high bytes (>= 1024) of the workaround BO. + */ struct anv_bo * workaround_bo; struct anv_bo * trivial_batch_bo; struct anv_bo * hiz_clear_bo; + struct anv_state null_surface_state; struct anv_pipeline_cache default_pipeline_cache; struct blorp_context blorp; diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index b2b292c1ff3..bc7ca7fd5db 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -2575,18 +2575,23 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer, case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: { - struct anv_surface_state sstate = - (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? - desc->image_view->planes[binding->plane].general_sampler_surface_state : - desc->image_view->planes[binding->plane].optimal_sampler_surface_state; - surface_state = sstate.state; - assert(surface_state.alloc_size); - if (need_client_mem_relocs) - add_surface_state_relocs(cmd_buffer, sstate); + if (desc->image_view) { + struct anv_surface_state sstate = + (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? + desc->image_view->planes[binding->plane].general_sampler_surface_state : + desc->image_view->planes[binding->plane].optimal_sampler_surface_state; + surface_state = sstate.state; + assert(surface_state.alloc_size); + if (need_client_mem_relocs) + add_surface_state_relocs(cmd_buffer, sstate); + } else { + surface_state = cmd_buffer->device->null_surface_state; + } break; } case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: assert(shader->stage == MESA_SHADER_FRAGMENT); + assert(desc->image_view != NULL); if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) { /* For depth and stencil input attachments, we treat it like any * old texture that a user may have bound. @@ -2613,68 +2618,84 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer, break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { - struct anv_surface_state sstate = (binding->write_only) - ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state - : desc->image_view->planes[binding->plane].storage_surface_state; - surface_state = sstate.state; - assert(surface_state.alloc_size); - if (need_client_mem_relocs) - add_surface_state_relocs(cmd_buffer, sstate); + if (desc->image_view) { + struct anv_surface_state sstate = (binding->write_only) + ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state + : desc->image_view->planes[binding->plane].storage_surface_state; + surface_state = sstate.state; + assert(surface_state.alloc_size); + if (need_client_mem_relocs) + add_surface_state_relocs(cmd_buffer, sstate); + } else { + surface_state = cmd_buffer->device->null_surface_state; + } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: - surface_state = desc->buffer_view->surface_state; - assert(surface_state.alloc_size); - if (need_client_mem_relocs) { - add_surface_reloc(cmd_buffer, surface_state, - desc->buffer_view->address); + if (desc->buffer_view) { + surface_state = desc->buffer_view->surface_state; + assert(surface_state.alloc_size); + if (need_client_mem_relocs) { + add_surface_reloc(cmd_buffer, surface_state, + desc->buffer_view->address); + } + } else { + surface_state = cmd_buffer->device->null_surface_state; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { - /* Compute the offset within the buffer */ - struct anv_push_constants *push = - &cmd_buffer->state.push_constants[shader->stage]; - - uint32_t dynamic_offset = - push->dynamic_offsets[binding->dynamic_offset_index]; - uint64_t offset = desc->offset + dynamic_offset; - /* Clamp to the buffer size */ - offset = MIN2(offset, desc->buffer->size); - /* Clamp the range to the buffer size */ - uint32_t range = MIN2(desc->range, desc->buffer->size - offset); - - /* Align the range for consistency */ - if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) - range = align_u32(range, ANV_UBO_BOUNDS_CHECK_ALIGNMENT); - - struct anv_address address = - anv_address_add(desc->buffer->address, offset); - - surface_state = - anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64); - enum isl_format format = - anv_isl_format_for_descriptor_type(desc->type); - - anv_fill_buffer_surface_state(cmd_buffer->device, surface_state, - format, address, range, 1); - if (need_client_mem_relocs) - add_surface_reloc(cmd_buffer, surface_state, address); + if (desc->buffer) { + /* Compute the offset within the buffer */ + struct anv_push_constants *push = + &cmd_buffer->state.push_constants[shader->stage]; + + uint32_t dynamic_offset = + push->dynamic_offsets[binding->dynamic_offset_index]; + uint64_t offset = desc->offset + dynamic_offset; + /* Clamp to the buffer size */ + offset = MIN2(offset, desc->buffer->size); + /* Clamp the range to the buffer size */ + uint32_t range = MIN2(desc->range, desc->buffer->size - offset); + + /* Align the range for consistency */ + if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) + range = align_u32(range, ANV_UBO_BOUNDS_CHECK_ALIGNMENT); + + struct anv_address address = + anv_address_add(desc->buffer->address, offset); + + surface_state = + anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64); + enum isl_format format = + anv_isl_format_for_descriptor_type(desc->type); + + anv_fill_buffer_surface_state(cmd_buffer->device, surface_state, + format, address, range, 1); + if (need_client_mem_relocs) + add_surface_reloc(cmd_buffer, surface_state, address); + } else { + surface_state = cmd_buffer->device->null_surface_state; + } break; } case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - surface_state = (binding->write_only) - ? desc->buffer_view->writeonly_storage_surface_state - : desc->buffer_view->storage_surface_state; - assert(surface_state.alloc_size); - if (need_client_mem_relocs) { - add_surface_reloc(cmd_buffer, surface_state, - desc->buffer_view->address); + if (desc->buffer_view) { + surface_state = (binding->write_only) + ? desc->buffer_view->writeonly_storage_surface_state + : desc->buffer_view->storage_surface_state; + assert(surface_state.alloc_size); + if (need_client_mem_relocs) { + add_surface_reloc(cmd_buffer, surface_state, + desc->buffer_view->address); + } + } else { + surface_state = cmd_buffer->device->null_surface_state; } break; @@ -2886,16 +2907,29 @@ get_push_range_address(struct anv_cmd_buffer *cmd_buffer, &set->descriptors[range->index]; if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) { - return desc->buffer_view->address; + if (desc->buffer_view) + return desc->buffer_view->address; } else { assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); - struct anv_push_constants *push = - &cmd_buffer->state.push_constants[stage]; - uint32_t dynamic_offset = - push->dynamic_offsets[range->dynamic_offset_index]; - return anv_address_add(desc->buffer->address, - desc->offset + dynamic_offset); + if (desc->buffer) { + struct anv_push_constants *push = + &cmd_buffer->state.push_constants[stage]; + uint32_t dynamic_offset = + push->dynamic_offsets[range->dynamic_offset_index]; + return anv_address_add(desc->buffer->address, + desc->offset + dynamic_offset); + } } + + /* For NULL UBOs, we just return an address in the workaround BO. We do + * writes to it for workarounds but always at the bottom. The higher + * bytes should be all zeros. + */ + assert(range->length * 32 <= 2048); + return (struct anv_address) { + .bo = cmd_buffer->device->workaround_bo, + .offset = 1024, + }; } } } @@ -2935,8 +2969,17 @@ get_push_range_bound_size(struct anv_cmd_buffer *cmd_buffer, &set->descriptors[range->index]; if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) { + if (!desc->buffer_view) + return 0; + + if (range->start * 32 > desc->buffer_view->range) + return 0; + return desc->buffer_view->range; } else { + if (!desc->buffer) + return 0; + assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); /* Compute the offset within the buffer */ struct anv_push_constants *push = -- 2.30.2