--- /dev/null
- #include "anv_nir_builder.h"
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_meta.h"
+#include "anv_meta_clear.h"
+#include "anv_private.h"
- nir_builder_init_simple_shader(&b, MESA_SHADER_VERTEX);
++#include "glsl/nir/nir_builder.h"
+
+struct anv_render_pass anv_meta_dummy_renderpass = {0};
+
+static nir_shader *
+build_nir_vertex_shader(bool attr_flat)
+{
+ nir_builder b;
+
+ const struct glsl_type *vertex_type = glsl_vec4_type();
+
- nir_builder_init_simple_shader(&b, MESA_SHADER_FRAGMENT);
++ nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
+
+ nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
+ vertex_type, "a_pos");
+ pos_in->data.location = VERT_ATTRIB_GENERIC0;
+ nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
+ vertex_type, "gl_Position");
+ pos_out->data.location = VARYING_SLOT_POS;
+ nir_copy_var(&b, pos_out, pos_in);
+
+ /* Add one more pass-through attribute. For clear shaders, this is used
+ * to store the color and for blit shaders it's the texture coordinate.
+ */
+ const struct glsl_type *attr_type = glsl_vec4_type();
+ nir_variable *attr_in = nir_variable_create(b.shader, nir_var_shader_in,
+ attr_type, "a_attr");
+ attr_in->data.location = VERT_ATTRIB_GENERIC1;
+ nir_variable *attr_out = nir_variable_create(b.shader, nir_var_shader_out,
+ attr_type, "v_attr");
+ attr_out->data.location = VARYING_SLOT_VAR0;
+ attr_out->data.interpolation = attr_flat ? INTERP_QUALIFIER_FLAT :
+ INTERP_QUALIFIER_SMOOTH;
+ nir_copy_var(&b, attr_out, attr_in);
+
+ return b.shader;
+}
+
+static nir_shader *
+build_nir_copy_fragment_shader(enum glsl_sampler_dim tex_dim)
+{
+ nir_builder b;
+
++ nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
+
+ const struct glsl_type *color_type = glsl_vec4_type();
+
+ nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
+ glsl_vec4_type(), "v_attr");
+ tex_pos_in->data.location = VARYING_SLOT_VAR0;
+
+ const struct glsl_type *sampler_type =
+ glsl_sampler_type(tex_dim, false, false, glsl_get_base_type(color_type));
+ nir_variable *sampler = nir_variable_create(b.shader, nir_var_uniform,
+ sampler_type, "s_tex");
+ sampler->data.descriptor_set = 0;
+ sampler->data.binding = 0;
+
+ nir_tex_instr *tex = nir_tex_instr_create(b.shader, 1);
+ tex->sampler_dim = tex_dim;
+ tex->op = nir_texop_tex;
+ tex->src[0].src_type = nir_tex_src_coord;
+ tex->src[0].src = nir_src_for_ssa(nir_load_var(&b, tex_pos_in));
+ tex->dest_type = nir_type_float; /* TODO */
+
+ if (tex_dim == GLSL_SAMPLER_DIM_2D)
+ tex->is_array = true;
+ tex->coord_components = 3;
+
+ tex->sampler = nir_deref_var_create(tex, sampler);
+
+ nir_ssa_dest_init(&tex->instr, &tex->dest, 4, "tex");
+ nir_builder_instr_insert(&b, &tex->instr);
+
+ nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
+ color_type, "f_color");
+ color_out->data.location = FRAG_RESULT_DATA0;
+ nir_store_var(&b, color_out, &tex->dest.ssa, 4);
+
+ return b.shader;
+}
+
+void
+anv_meta_save(struct anv_meta_saved_state *state,
+ const struct anv_cmd_buffer *cmd_buffer,
+ uint32_t dynamic_mask)
+{
+ state->old_pipeline = cmd_buffer->state.pipeline;
+ state->old_descriptor_set0 = cmd_buffer->state.descriptors[0];
+ memcpy(state->old_vertex_bindings, cmd_buffer->state.vertex_bindings,
+ sizeof(state->old_vertex_bindings));
+
+ state->dynamic_mask = dynamic_mask;
+ anv_dynamic_state_copy(&state->dynamic, &cmd_buffer->state.dynamic,
+ dynamic_mask);
+}
+
+void
+anv_meta_restore(const struct anv_meta_saved_state *state,
+ struct anv_cmd_buffer *cmd_buffer)
+{
+ cmd_buffer->state.pipeline = state->old_pipeline;
+ cmd_buffer->state.descriptors[0] = state->old_descriptor_set0;
+ memcpy(cmd_buffer->state.vertex_bindings, state->old_vertex_bindings,
+ sizeof(state->old_vertex_bindings));
+
+ cmd_buffer->state.vb_dirty |= (1 << ANV_META_VERTEX_BINDING_COUNT) - 1;
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_VERTEX_BIT;
+
+ anv_dynamic_state_copy(&cmd_buffer->state.dynamic, &state->dynamic,
+ state->dynamic_mask);
+ cmd_buffer->state.dirty |= state->dynamic_mask;
+}
+
+VkImageViewType
+anv_meta_get_view_type(const struct anv_image *image)
+{
+ switch (image->type) {
+ case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
+ case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
+ case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+ default:
+ unreachable("bad VkImageViewType");
+ }
+}
+
+static uint32_t
+meta_blit_get_dest_view_base_array_slice(const struct anv_image *dest_image,
+ const VkImageSubresourceLayers *dest_subresource,
+ const VkOffset3D *dest_offset)
+{
+ switch (dest_image->type) {
+ case VK_IMAGE_TYPE_1D:
+ case VK_IMAGE_TYPE_2D:
+ return dest_subresource->baseArrayLayer;
+ case VK_IMAGE_TYPE_3D:
+ /* HACK: Vulkan does not allow attaching a 3D image to a framebuffer,
+ * but meta does it anyway. When doing so, we translate the
+ * destination's z offset into an array offset.
+ */
+ return dest_offset->z;
+ default:
+ assert(!"bad VkImageType");
+ return 0;
+ }
+}
+
+static void
+anv_device_init_meta_blit_state(struct anv_device *device)
+{
+ anv_CreateRenderPass(anv_device_to_handle(device),
+ &(VkRenderPassCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = 1,
+ .pAttachments = &(VkAttachmentDescription) {
+ .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .subpassCount = 1,
+ .pSubpasses = &(VkSubpassDescription) {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .inputAttachmentCount = 0,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &(VkAttachmentReference) {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .pResolveAttachments = NULL,
+ .pDepthStencilAttachment = &(VkAttachmentReference) {
+ .attachment = VK_ATTACHMENT_UNUSED,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .preserveAttachmentCount = 1,
+ .pPreserveAttachments = &(VkAttachmentReference) {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ },
+ .dependencyCount = 0,
+ }, NULL, &device->meta_state.blit.render_pass);
+
+ /* We don't use a vertex shader for clearing, but instead build and pass
+ * the VUEs directly to the rasterization backend. However, we do need
+ * to provide GLSL source for the vertex shader so that the compiler
+ * does not dead-code our inputs.
+ */
+ struct anv_shader_module vs = {
+ .nir = build_nir_vertex_shader(false),
+ };
+
+ struct anv_shader_module fs_2d = {
+ .nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_2D),
+ };
+
+ struct anv_shader_module fs_3d = {
+ .nir = build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_3D),
+ };
+
+ VkPipelineVertexInputStateCreateInfo vi_create_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 2,
+ .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+ {
+ .binding = 0,
+ .stride = 0,
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+ },
+ {
+ .binding = 1,
+ .stride = 5 * sizeof(float),
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+ },
+ },
+ .vertexAttributeDescriptionCount = 3,
+ .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+ {
+ /* VUE Header */
+ .location = 0,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32B32A32_UINT,
+ .offset = 0
+ },
+ {
+ /* Position */
+ .location = 1,
+ .binding = 1,
+ .format = VK_FORMAT_R32G32_SFLOAT,
+ .offset = 0
+ },
+ {
+ /* Texture Coordinate */
+ .location = 2,
+ .binding = 1,
+ .format = VK_FORMAT_R32G32B32_SFLOAT,
+ .offset = 8
+ }
+ }
+ };
+
+ VkDescriptorSetLayoutCreateInfo ds_layout_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .bindingCount = 1,
+ .pBinding = (VkDescriptorSetLayoutBinding[]) {
+ {
+ .binding = 0,
+ .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ .descriptorCount = 1,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .pImmutableSamplers = NULL
+ },
+ }
+ };
+ anv_CreateDescriptorSetLayout(anv_device_to_handle(device), &ds_layout_info,
+ NULL, &device->meta_state.blit.ds_layout);
+
+ anv_CreatePipelineLayout(anv_device_to_handle(device),
+ &(VkPipelineLayoutCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = 1,
+ .pSetLayouts = &device->meta_state.blit.ds_layout,
+ },
+ NULL, &device->meta_state.blit.pipeline_layout);
+
+ VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
+ {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_VERTEX_BIT,
+ .module = anv_shader_module_to_handle(&vs),
+ .pName = "main",
+ .pSpecializationInfo = NULL
+ }, {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .module = VK_NULL_HANDLE, /* TEMPLATE VALUE! FILL ME IN! */
+ .pName = "main",
+ .pSpecializationInfo = NULL
+ },
+ };
+
+ const VkGraphicsPipelineCreateInfo vk_pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = ARRAY_SIZE(pipeline_shader_stages),
+ .pStages = pipeline_shader_stages,
+ .pVertexInputState = &vi_create_info,
+ .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ .primitiveRestartEnable = false,
+ },
+ .pViewportState = &(VkPipelineViewportStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1,
+ },
+ .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .rasterizerDiscardEnable = false,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .cullMode = VK_CULL_MODE_NONE,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE
+ },
+ .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .rasterizationSamples = 1,
+ .sampleShadingEnable = false,
+ .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
+ },
+ .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .attachmentCount = 1,
+ .pAttachments = (VkPipelineColorBlendAttachmentState []) {
+ { .colorWriteMask =
+ VK_COLOR_COMPONENT_A_BIT |
+ VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT },
+ }
+ },
+ .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = 9,
+ .pDynamicStates = (VkDynamicState[]) {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_LINE_WIDTH,
+ VK_DYNAMIC_STATE_DEPTH_BIAS,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ },
+ },
+ .flags = 0,
+ .layout = device->meta_state.blit.pipeline_layout,
+ .renderPass = device->meta_state.blit.render_pass,
+ .subpass = 0,
+ };
+
+ const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
+ .use_repclear = false,
+ .disable_viewport = true,
+ .disable_scissor = true,
+ .disable_vs = true,
+ .use_rectlist = true
+ };
+
+ pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_2d);
+ anv_graphics_pipeline_create(anv_device_to_handle(device),
+ &vk_pipeline_info, &anv_pipeline_info,
+ NULL, &device->meta_state.blit.pipeline_2d_src);
+
+ pipeline_shader_stages[1].module = anv_shader_module_to_handle(&fs_3d);
+ anv_graphics_pipeline_create(anv_device_to_handle(device),
+ &vk_pipeline_info, &anv_pipeline_info,
+ NULL, &device->meta_state.blit.pipeline_3d_src);
+
+ ralloc_free(vs.nir);
+ ralloc_free(fs_2d.nir);
+ ralloc_free(fs_3d.nir);
+}
+
+static void
+meta_prepare_blit(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_meta_saved_state *saved_state)
+{
+ anv_meta_save(saved_state, cmd_buffer,
+ (1 << VK_DYNAMIC_STATE_VIEWPORT));
+}
+
+struct blit_region {
+ VkOffset3D src_offset;
+ VkExtent3D src_extent;
+ VkOffset3D dest_offset;
+ VkExtent3D dest_extent;
+};
+
+static void
+meta_emit_blit(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_image *src_image,
+ struct anv_image_view *src_iview,
+ VkOffset3D src_offset,
+ VkExtent3D src_extent,
+ struct anv_image *dest_image,
+ struct anv_image_view *dest_iview,
+ VkOffset3D dest_offset,
+ VkExtent3D dest_extent,
+ VkFilter blit_filter)
+{
+ struct anv_device *device = cmd_buffer->device;
+ VkDescriptorPool dummy_desc_pool = (VkDescriptorPool)1;
+
+ struct blit_vb_data {
+ float pos[2];
+ float tex_coord[3];
+ } *vb_data;
+
+ unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
+
+ struct anv_state vb_state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, vb_size, 16);
+ memset(vb_state.map, 0, sizeof(struct anv_vue_header));
+ vb_data = vb_state.map + sizeof(struct anv_vue_header);
+
+ vb_data[0] = (struct blit_vb_data) {
+ .pos = {
+ dest_offset.x + dest_extent.width,
+ dest_offset.y + dest_extent.height,
+ },
+ .tex_coord = {
+ (float)(src_offset.x + src_extent.width) / (float)src_iview->extent.width,
+ (float)(src_offset.y + src_extent.height) / (float)src_iview->extent.height,
+ (float)src_offset.z / (float)src_iview->extent.depth,
+ },
+ };
+
+ vb_data[1] = (struct blit_vb_data) {
+ .pos = {
+ dest_offset.x,
+ dest_offset.y + dest_extent.height,
+ },
+ .tex_coord = {
+ (float)src_offset.x / (float)src_iview->extent.width,
+ (float)(src_offset.y + src_extent.height) / (float)src_iview->extent.height,
+ (float)src_offset.z / (float)src_iview->extent.depth,
+ },
+ };
+
+ vb_data[2] = (struct blit_vb_data) {
+ .pos = {
+ dest_offset.x,
+ dest_offset.y,
+ },
+ .tex_coord = {
+ (float)src_offset.x / (float)src_iview->extent.width,
+ (float)src_offset.y / (float)src_iview->extent.height,
+ (float)src_offset.z / (float)src_iview->extent.depth,
+ },
+ };
+
+ anv_state_clflush(vb_state);
+
+ struct anv_buffer vertex_buffer = {
+ .device = device,
+ .size = vb_size,
+ .bo = &device->dynamic_state_block_pool.bo,
+ .offset = vb_state.offset,
+ };
+
+ anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer), 0, 2,
+ (VkBuffer[]) {
+ anv_buffer_to_handle(&vertex_buffer),
+ anv_buffer_to_handle(&vertex_buffer)
+ },
+ (VkDeviceSize[]) {
+ 0,
+ sizeof(struct anv_vue_header),
+ });
+
+ VkSampler sampler;
+ ANV_CALL(CreateSampler)(anv_device_to_handle(device),
+ &(VkSamplerCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ .magFilter = blit_filter,
+ .minFilter = blit_filter,
+ }, &cmd_buffer->pool->alloc, &sampler);
+
+ VkDescriptorSet set;
+ anv_AllocateDescriptorSets(anv_device_to_handle(device),
+ &(VkDescriptorSetAllocateInfo) {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ .descriptorPool = dummy_desc_pool,
+ .setLayoutCount = 1,
+ .pSetLayouts = &device->meta_state.blit.ds_layout
+ }, &set);
+ anv_UpdateDescriptorSets(anv_device_to_handle(device),
+ 1, /* writeCount */
+ (VkWriteDescriptorSet[]) {
+ {
+ .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ .dstSet = set,
+ .dstBinding = 0,
+ .dstArrayElement = 0,
+ .descriptorCount = 1,
+ .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ .pImageInfo = (VkDescriptorImageInfo[]) {
+ {
+ .sampler = sampler,
+ .imageView = anv_image_view_to_handle(src_iview),
+ .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ }
+ }
+ }, 0, NULL);
+
+ VkFramebuffer fb;
+ anv_CreateFramebuffer(anv_device_to_handle(device),
+ &(VkFramebufferCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .attachmentCount = 1,
+ .pAttachments = (VkImageView[]) {
+ anv_image_view_to_handle(dest_iview),
+ },
+ .width = dest_iview->extent.width,
+ .height = dest_iview->extent.height,
+ .layers = 1
+ }, &cmd_buffer->pool->alloc, &fb);
+
+ ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
+ &(VkRenderPassBeginInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderPass = device->meta_state.blit.render_pass,
+ .framebuffer = fb,
+ .renderArea = {
+ .offset = { dest_offset.x, dest_offset.y },
+ .extent = { dest_extent.width, dest_extent.height },
+ },
+ .clearValueCount = 0,
+ .pClearValues = NULL,
+ }, VK_SUBPASS_CONTENTS_INLINE);
+
+ VkPipeline pipeline;
+
+ switch (src_image->type) {
+ case VK_IMAGE_TYPE_1D:
+ anv_finishme("VK_IMAGE_TYPE_1D");
+ pipeline = device->meta_state.blit.pipeline_2d_src;
+ break;
+ case VK_IMAGE_TYPE_2D:
+ pipeline = device->meta_state.blit.pipeline_2d_src;
+ break;
+ case VK_IMAGE_TYPE_3D:
+ pipeline = device->meta_state.blit.pipeline_3d_src;
+ break;
+ default:
+ unreachable(!"bad VkImageType");
+ }
+
+ if (cmd_buffer->state.pipeline != anv_pipeline_from_handle(pipeline)) {
+ anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer),
+ VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ }
+
+ anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 1,
+ &(VkViewport) {
+ .x = 0.0f,
+ .y = 0.0f,
+ .width = dest_iview->extent.width,
+ .height = dest_iview->extent.height,
+ .minDepth = 0.0f,
+ .maxDepth = 1.0f,
+ });
+
+ anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ device->meta_state.blit.pipeline_layout, 0, 1,
+ &set, 0, NULL);
+
+ ANV_CALL(CmdDraw)(anv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
+
+ ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
+
+ /* At the point where we emit the draw call, all data from the
+ * descriptor sets, etc. has been used. We are free to delete it.
+ */
+ anv_descriptor_set_destroy(device, anv_descriptor_set_from_handle(set));
+ anv_DestroySampler(anv_device_to_handle(device), sampler,
+ &cmd_buffer->pool->alloc);
+ anv_DestroyFramebuffer(anv_device_to_handle(device), fb,
+ &cmd_buffer->pool->alloc);
+}
+
+static void
+meta_finish_blit(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_meta_saved_state *saved_state)
+{
+ anv_meta_restore(saved_state, cmd_buffer);
+}
+
+static VkFormat
+vk_format_for_size(int bs)
+{
+ switch (bs) {
+ case 1: return VK_FORMAT_R8_UINT;
+ case 2: return VK_FORMAT_R8G8_UINT;
+ case 3: return VK_FORMAT_R8G8B8_UINT;
+ case 4: return VK_FORMAT_R8G8B8A8_UINT;
+ case 6: return VK_FORMAT_R16G16B16_UINT;
+ case 8: return VK_FORMAT_R16G16B16A16_UINT;
+ case 12: return VK_FORMAT_R32G32B32_UINT;
+ case 16: return VK_FORMAT_R32G32B32A32_UINT;
+ default:
+ unreachable("Invalid format block size");
+ }
+}
+
+static void
+do_buffer_copy(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_bo *src, uint64_t src_offset,
+ struct anv_bo *dest, uint64_t dest_offset,
+ int width, int height, VkFormat copy_format)
+{
+ VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
+
+ VkImageCreateInfo image_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .format = copy_format,
+ .extent = {
+ .width = width,
+ .height = height,
+ .depth = 1,
+ },
+ .mipLevels = 1,
+ .arrayLayers = 1,
+ .samples = 1,
+ .tiling = VK_IMAGE_TILING_LINEAR,
+ .usage = 0,
+ .flags = 0,
+ };
+
+ VkImage src_image;
+ image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+ anv_CreateImage(vk_device, &image_info,
+ &cmd_buffer->pool->alloc, &src_image);
+
+ VkImage dest_image;
+ image_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ anv_CreateImage(vk_device, &image_info,
+ &cmd_buffer->pool->alloc, &dest_image);
+
+ /* We could use a vk call to bind memory, but that would require
+ * creating a dummy memory object etc. so there's really no point.
+ */
+ anv_image_from_handle(src_image)->bo = src;
+ anv_image_from_handle(src_image)->offset = src_offset;
+ anv_image_from_handle(dest_image)->bo = dest;
+ anv_image_from_handle(dest_image)->offset = dest_offset;
+
+ struct anv_image_view src_iview;
+ anv_image_view_init(&src_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = src_image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = copy_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ struct anv_image_view dest_iview;
+ anv_image_view_init(&dest_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = dest_image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = copy_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ },
+ cmd_buffer);
+
+ meta_emit_blit(cmd_buffer,
+ anv_image_from_handle(src_image),
+ &src_iview,
+ (VkOffset3D) { 0, 0, 0 },
+ (VkExtent3D) { width, height, 1 },
+ anv_image_from_handle(dest_image),
+ &dest_iview,
+ (VkOffset3D) { 0, 0, 0 },
+ (VkExtent3D) { width, height, 1 },
+ VK_FILTER_NEAREST);
+
+ anv_DestroyImage(vk_device, src_image, &cmd_buffer->pool->alloc);
+ anv_DestroyImage(vk_device, dest_image, &cmd_buffer->pool->alloc);
+}
+
+void anv_CmdCopyBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
+ ANV_FROM_HANDLE(anv_buffer, dest_buffer, destBuffer);
+
+ struct anv_meta_saved_state saved_state;
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ for (unsigned r = 0; r < regionCount; r++) {
+ uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
+ uint64_t dest_offset = dest_buffer->offset + pRegions[r].dstOffset;
+ uint64_t copy_size = pRegions[r].size;
+
+ /* First, we compute the biggest format that can be used with the
+ * given offsets and size.
+ */
+ int bs = 16;
+
+ int fs = ffs(src_offset) - 1;
+ if (fs != -1)
+ bs = MIN2(bs, 1 << fs);
+ assert(src_offset % bs == 0);
+
+ fs = ffs(dest_offset) - 1;
+ if (fs != -1)
+ bs = MIN2(bs, 1 << fs);
+ assert(dest_offset % bs == 0);
+
+ fs = ffs(pRegions[r].size) - 1;
+ if (fs != -1)
+ bs = MIN2(bs, 1 << fs);
+ assert(pRegions[r].size % bs == 0);
+
+ VkFormat copy_format = vk_format_for_size(bs);
+
+ /* This is maximum possible width/height our HW can handle */
+ uint64_t max_surface_dim = 1 << 14;
+
+ /* First, we make a bunch of max-sized copies */
+ uint64_t max_copy_size = max_surface_dim * max_surface_dim * bs;
+ while (copy_size > max_copy_size) {
+ do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+ dest_buffer->bo, dest_offset,
+ max_surface_dim, max_surface_dim, copy_format);
+ copy_size -= max_copy_size;
+ src_offset += max_copy_size;
+ dest_offset += max_copy_size;
+ }
+
+ uint64_t height = copy_size / (max_surface_dim * bs);
+ assert(height < max_surface_dim);
+ if (height != 0) {
+ uint64_t rect_copy_size = height * max_surface_dim * bs;
+ do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+ dest_buffer->bo, dest_offset,
+ max_surface_dim, height, copy_format);
+ copy_size -= rect_copy_size;
+ src_offset += rect_copy_size;
+ dest_offset += rect_copy_size;
+ }
+
+ if (copy_size != 0) {
+ do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
+ dest_buffer->bo, dest_offset,
+ copy_size / bs, 1, copy_format);
+ }
+ }
+
+ meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+void anv_CmdCopyImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+ ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+
+ struct anv_meta_saved_state saved_state;
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ for (unsigned r = 0; r < regionCount; r++) {
+ struct anv_image_view src_iview;
+ anv_image_view_init(&src_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = srcImage,
+ .viewType = anv_meta_get_view_type(src_image),
+ .format = src_image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = pRegions[r].srcSubresource.aspectMask,
+ .baseMipLevel = pRegions[r].srcSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = pRegions[r].srcSubresource.baseArrayLayer,
+ .layerCount = pRegions[r].dstSubresource.layerCount,
+ },
+ },
+ cmd_buffer);
+
+ const VkOffset3D dest_offset = {
+ .x = pRegions[r].dstOffset.x,
+ .y = pRegions[r].dstOffset.y,
+ .z = 0,
+ };
+
+ unsigned num_slices;
+ if (src_image->type == VK_IMAGE_TYPE_3D) {
+ assert(pRegions[r].srcSubresource.layerCount == 1 &&
+ pRegions[r].dstSubresource.layerCount == 1);
+ num_slices = pRegions[r].extent.depth;
+ } else {
+ assert(pRegions[r].srcSubresource.layerCount ==
+ pRegions[r].dstSubresource.layerCount);
+ assert(pRegions[r].extent.depth == 1);
+ num_slices = pRegions[r].dstSubresource.layerCount;
+ }
+
+ const uint32_t dest_base_array_slice =
+ meta_blit_get_dest_view_base_array_slice(dest_image,
+ &pRegions[r].dstSubresource,
+ &pRegions[r].dstOffset);
+
+ for (unsigned slice = 0; slice < num_slices; slice++) {
+ VkOffset3D src_offset = pRegions[r].srcOffset;
+ src_offset.z += slice;
+
+ struct anv_image_view dest_iview;
+ anv_image_view_init(&dest_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = destImage,
+ .viewType = anv_meta_get_view_type(dest_image),
+ .format = dest_image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = pRegions[r].dstSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = dest_base_array_slice + slice,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ meta_emit_blit(cmd_buffer,
+ src_image, &src_iview,
+ src_offset,
+ pRegions[r].extent,
+ dest_image, &dest_iview,
+ dest_offset,
+ pRegions[r].extent,
+ VK_FILTER_NEAREST);
+ }
+ }
+
+ meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+void anv_CmdBlitImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkFilter filter)
+
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+ ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+
+ struct anv_meta_saved_state saved_state;
+
+ anv_finishme("respect VkFilter");
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ for (unsigned r = 0; r < regionCount; r++) {
+ struct anv_image_view src_iview;
+ anv_image_view_init(&src_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = srcImage,
+ .viewType = anv_meta_get_view_type(src_image),
+ .format = src_image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = pRegions[r].srcSubresource.aspectMask,
+ .baseMipLevel = pRegions[r].srcSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = pRegions[r].srcSubresource.baseArrayLayer,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ const VkOffset3D dest_offset = {
+ .x = pRegions[r].dstOffset.x,
+ .y = pRegions[r].dstOffset.y,
+ .z = 0,
+ };
+
+ const uint32_t dest_array_slice =
+ meta_blit_get_dest_view_base_array_slice(dest_image,
+ &pRegions[r].dstSubresource,
+ &pRegions[r].dstOffset);
+
+ if (pRegions[r].srcSubresource.layerCount > 1)
+ anv_finishme("FINISHME: copy multiple array layers");
+
+ if (pRegions[r].dstExtent.depth > 1)
+ anv_finishme("FINISHME: copy multiple depth layers");
+
+ struct anv_image_view dest_iview;
+ anv_image_view_init(&dest_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = destImage,
+ .viewType = anv_meta_get_view_type(dest_image),
+ .format = dest_image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = pRegions[r].dstSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = dest_array_slice,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ meta_emit_blit(cmd_buffer,
+ src_image, &src_iview,
+ pRegions[r].srcOffset,
+ pRegions[r].srcExtent,
+ dest_image, &dest_iview,
+ dest_offset,
+ pRegions[r].dstExtent,
+ filter);
+ }
+
+ meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+static struct anv_image *
+make_image_for_buffer(VkDevice vk_device, VkBuffer vk_buffer, VkFormat format,
+ VkImageUsageFlags usage,
+ VkImageType image_type,
+ const VkAllocationCallbacks *alloc,
+ const VkBufferImageCopy *copy)
+{
+ ANV_FROM_HANDLE(anv_buffer, buffer, vk_buffer);
+
+ VkExtent3D extent = copy->imageExtent;
+ if (copy->bufferRowLength)
+ extent.width = copy->bufferRowLength;
+ if (copy->bufferImageHeight)
+ extent.height = copy->bufferImageHeight;
+ extent.depth = 1;
+
+ VkImage vk_image;
+ VkResult result = anv_CreateImage(vk_device,
+ &(VkImageCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .format = format,
+ .extent = extent,
+ .mipLevels = 1,
+ .arrayLayers = 1,
+ .samples = 1,
+ .tiling = VK_IMAGE_TILING_LINEAR,
+ .usage = usage,
+ .flags = 0,
+ }, alloc, &vk_image);
+ assert(result == VK_SUCCESS);
+
+ ANV_FROM_HANDLE(anv_image, image, vk_image);
+
+ /* We could use a vk call to bind memory, but that would require
+ * creating a dummy memory object etc. so there's really no point.
+ */
+ image->bo = buffer->bo;
+ image->offset = buffer->offset + copy->bufferOffset;
+
+ return image;
+}
+
+void anv_CmdCopyBufferToImage(
+ VkCommandBuffer commandBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_image, dest_image, destImage);
+ VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
+ const VkFormat orig_format = dest_image->format->vk_format;
+ struct anv_meta_saved_state saved_state;
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ for (unsigned r = 0; r < regionCount; r++) {
+ VkFormat proxy_format = orig_format;
+ VkImageAspectFlags proxy_aspect = pRegions[r].imageSubresource.aspectMask;
+
+ if (orig_format == VK_FORMAT_S8_UINT) {
+ proxy_format = VK_FORMAT_R8_UINT;
+ proxy_aspect = VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ struct anv_image *src_image =
+ make_image_for_buffer(vk_device, srcBuffer, proxy_format,
+ VK_IMAGE_USAGE_SAMPLED_BIT,
+ dest_image->type, &cmd_buffer->pool->alloc,
+ &pRegions[r]);
+
+ const uint32_t dest_base_array_slice =
+ meta_blit_get_dest_view_base_array_slice(dest_image,
+ &pRegions[r].imageSubresource,
+ &pRegions[r].imageOffset);
+
+ unsigned num_slices;
+ if (dest_image->type == VK_IMAGE_TYPE_3D) {
+ assert(pRegions[r].imageSubresource.layerCount == 1);
+ num_slices = pRegions[r].imageExtent.depth;
+ } else {
+ assert(pRegions[r].imageExtent.depth == 1);
+ num_slices = pRegions[r].imageSubresource.layerCount;
+ }
+
+ for (unsigned slice = 0; slice < num_slices; slice++) {
+ struct anv_image_view src_iview;
+ anv_image_view_init(&src_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = anv_image_to_handle(src_image),
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = proxy_format,
+ .subresourceRange = {
+ .aspectMask = proxy_aspect,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ },
+ cmd_buffer);
+
+ struct anv_image_view dest_iview;
+ anv_image_view_init(&dest_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = anv_image_to_handle(dest_image),
+ .viewType = anv_meta_get_view_type(dest_image),
+ .format = proxy_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = pRegions[r].imageSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = dest_base_array_slice + slice,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ VkOffset3D src_offset = { 0, 0, slice };
+
+ const VkOffset3D dest_offset = {
+ .x = pRegions[r].imageOffset.x,
+ .y = pRegions[r].imageOffset.y,
+ .z = 0,
+ };
+
+ meta_emit_blit(cmd_buffer,
+ src_image,
+ &src_iview,
+ src_offset,
+ pRegions[r].imageExtent,
+ dest_image,
+ &dest_iview,
+ dest_offset,
+ pRegions[r].imageExtent,
+ VK_FILTER_NEAREST);
+
+ /* Once we've done the blit, all of the actual information about
+ * the image is embedded in the command buffer so we can just
+ * increment the offset directly in the image effectively
+ * re-binding it to different backing memory.
+ */
+ /* XXX: Insert a real CPP */
+ src_image->offset += src_image->extent.width *
+ src_image->extent.height * 4;
+ }
+
+ anv_DestroyImage(vk_device, anv_image_to_handle(src_image),
+ &cmd_buffer->pool->alloc);
+ }
+
+ meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+void anv_CmdCopyImageToBuffer(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_image, src_image, srcImage);
+ VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
+ struct anv_meta_saved_state saved_state;
+
+ meta_prepare_blit(cmd_buffer, &saved_state);
+
+ for (unsigned r = 0; r < regionCount; r++) {
+ struct anv_image_view src_iview;
+ anv_image_view_init(&src_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = srcImage,
+ .viewType = anv_meta_get_view_type(src_image),
+ .format = src_image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = pRegions[r].imageSubresource.aspectMask,
+ .baseMipLevel = pRegions[r].imageSubresource.mipLevel,
+ .levelCount = 1,
+ .baseArrayLayer = pRegions[r].imageSubresource.baseArrayLayer,
+ .layerCount = pRegions[r].imageSubresource.layerCount,
+ },
+ },
+ cmd_buffer);
+
+ VkFormat dest_format = src_image->format->vk_format;
+ if (dest_format == VK_FORMAT_S8_UINT) {
+ dest_format = VK_FORMAT_R8_UINT;
+ }
+
+ struct anv_image *dest_image =
+ make_image_for_buffer(vk_device, destBuffer, dest_format,
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ src_image->type, &cmd_buffer->pool->alloc,
+ &pRegions[r]);
+
+ unsigned num_slices;
+ if (src_image->type == VK_IMAGE_TYPE_3D) {
+ assert(pRegions[r].imageSubresource.layerCount == 1);
+ num_slices = pRegions[r].imageExtent.depth;
+ } else {
+ assert(pRegions[r].imageExtent.depth == 1);
+ num_slices = pRegions[r].imageSubresource.layerCount;
+ }
+
+ for (unsigned slice = 0; slice < num_slices; slice++) {
+ VkOffset3D src_offset = pRegions[r].imageOffset;
+ src_offset.z += slice;
+
+ struct anv_image_view dest_iview;
+ anv_image_view_init(&dest_iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = anv_image_to_handle(dest_image),
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = dest_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ meta_emit_blit(cmd_buffer,
+ anv_image_from_handle(srcImage),
+ &src_iview,
+ src_offset,
+ pRegions[r].imageExtent,
+ dest_image,
+ &dest_iview,
+ (VkOffset3D) { 0, 0, 0 },
+ pRegions[r].imageExtent,
+ VK_FILTER_NEAREST);
+
+ /* Once we've done the blit, all of the actual information about
+ * the image is embedded in the command buffer so we can just
+ * increment the offset directly in the image effectively
+ * re-binding it to different backing memory.
+ */
+ /* XXX: Insert a real CPP */
+ dest_image->offset += dest_image->extent.width *
+ dest_image->extent.height * 4;
+ }
+
+ anv_DestroyImage(vk_device, anv_image_to_handle(dest_image),
+ &cmd_buffer->pool->alloc);
+ }
+
+ meta_finish_blit(cmd_buffer, &saved_state);
+}
+
+void anv_CmdUpdateBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize dataSize,
+ const uint32_t* pData)
+{
+ stub();
+}
+
+void anv_CmdFillBuffer(
+ VkCommandBuffer commandBuffer,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize fillSize,
+ uint32_t data)
+{
+ stub();
+}
+
+void anv_CmdResolveImage(
+ VkCommandBuffer commandBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions)
+{
+ stub();
+}
+
+void
+anv_device_init_meta(struct anv_device *device)
+{
+ anv_device_init_meta_clear_state(device);
+ anv_device_init_meta_blit_state(device);
+}
+
+void
+anv_device_finish_meta(struct anv_device *device)
+{
+ anv_device_finish_meta_clear_state(device);
+
+ /* Blit */
+ anv_DestroyRenderPass(anv_device_to_handle(device),
+ device->meta_state.blit.render_pass, NULL);
+ anv_DestroyPipeline(anv_device_to_handle(device),
+ device->meta_state.blit.pipeline_2d_src, NULL);
+ anv_DestroyPipeline(anv_device_to_handle(device),
+ device->meta_state.blit.pipeline_3d_src, NULL);
+ anv_DestroyPipelineLayout(anv_device_to_handle(device),
+ device->meta_state.blit.pipeline_layout, NULL);
+ anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
+ device->meta_state.blit.ds_layout, NULL);
+}
--- /dev/null
- #include "anv_nir_builder.h"
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_meta.h"
+#include "anv_meta_clear.h"
- nir_builder_init_simple_shader(&vs_b, MESA_SHADER_VERTEX);
- nir_builder_init_simple_shader(&fs_b, MESA_SHADER_FRAGMENT);
+#include "anv_private.h"
++#include "glsl/nir/nir_builder.h"
+
+/** Vertex attributes for color clears. */
+struct color_clear_vattrs {
+ struct anv_vue_header vue_header;
+ float position[2]; /**< 3DPRIM_RECTLIST */
+ VkClearColorValue color;
+};
+
+/** Vertex attributes for depthstencil clears. */
+struct depthstencil_clear_vattrs {
+ struct anv_vue_header vue_header;
+ float position[2]; /*<< 3DPRIM_RECTLIST */
+};
+
+static void
+meta_clear_begin(struct anv_meta_saved_state *saved_state,
+ struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_meta_save(saved_state, cmd_buffer,
+ (1 << VK_DYNAMIC_STATE_VIEWPORT) |
+ (1 << VK_DYNAMIC_STATE_SCISSOR) |
+ (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE));
+
+ cmd_buffer->state.dynamic.viewport.count = 0;
+ cmd_buffer->state.dynamic.scissor.count = 0;
+}
+
+static void
+meta_clear_end(struct anv_meta_saved_state *saved_state,
+ struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_meta_restore(saved_state, cmd_buffer);
+}
+
+static void
+build_color_shaders(struct nir_shader **out_vs,
+ struct nir_shader **out_fs)
+{
+ nir_builder vs_b;
+ nir_builder fs_b;
+
- nir_builder_init_simple_shader(&vs_b, MESA_SHADER_VERTEX);
- nir_builder_init_simple_shader(&fs_b, MESA_SHADER_FRAGMENT);
++ nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
++ nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
+
+ const struct glsl_type *position_type = glsl_vec4_type();
+ const struct glsl_type *color_type = glsl_vec4_type();
+
+ nir_variable *vs_in_pos =
+ nir_variable_create(vs_b.shader, nir_var_shader_in, position_type,
+ "a_position");
+ vs_in_pos->data.location = VERT_ATTRIB_GENERIC0;
+
+ nir_variable *vs_out_pos =
+ nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
+ "gl_Position");
+ vs_out_pos->data.location = VARYING_SLOT_POS;
+
+ nir_variable *vs_in_color =
+ nir_variable_create(vs_b.shader, nir_var_shader_in, color_type,
+ "a_color");
+ vs_in_color->data.location = VERT_ATTRIB_GENERIC1;
+
+ nir_variable *vs_out_color =
+ nir_variable_create(vs_b.shader, nir_var_shader_out, color_type,
+ "v_color");
+ vs_out_color->data.location = VARYING_SLOT_VAR0;
+ vs_out_color->data.interpolation = INTERP_QUALIFIER_FLAT;
+
+ nir_variable *fs_in_color =
+ nir_variable_create(fs_b.shader, nir_var_shader_in, color_type,
+ "v_color");
+ fs_in_color->data.location = vs_out_color->data.location;
+ fs_in_color->data.interpolation = vs_out_color->data.interpolation;
+
+ nir_variable *fs_out_color =
+ nir_variable_create(fs_b.shader, nir_var_shader_out, color_type,
+ "f_color");
+ fs_out_color->data.location = FRAG_RESULT_DATA0;
+
+ nir_copy_var(&vs_b, vs_out_pos, vs_in_pos);
+ nir_copy_var(&vs_b, vs_out_color, vs_in_color);
+ nir_copy_var(&fs_b, fs_out_color, fs_in_color);
+
+ *out_vs = vs_b.shader;
+ *out_fs = fs_b.shader;
+}
+
+static struct anv_pipeline *
+create_pipeline(struct anv_device *device,
+ struct nir_shader *vs_nir,
+ struct nir_shader *fs_nir,
+ const VkPipelineVertexInputStateCreateInfo *vi_state,
+ const VkPipelineDepthStencilStateCreateInfo *ds_state,
+ const VkPipelineColorBlendStateCreateInfo *cb_state,
+ const VkAllocationCallbacks *alloc)
+{
+ VkDevice device_h = anv_device_to_handle(device);
+
+ struct anv_shader_module vs_m = { .nir = vs_nir };
+ struct anv_shader_module fs_m = { .nir = fs_nir };
+
+ VkPipeline pipeline_h;
+ anv_graphics_pipeline_create(device_h,
+ &(VkGraphicsPipelineCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = 2,
+ .pStages = (VkPipelineShaderStageCreateInfo[]) {
+ {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_VERTEX_BIT,
+ .module = anv_shader_module_to_handle(&vs_m),
+ .pName = "main",
+ },
+ {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .module = anv_shader_module_to_handle(&fs_m),
+ .pName = "main",
+ },
+ },
+ .pVertexInputState = vi_state,
+ .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ .primitiveRestartEnable = false,
+ },
+ .pViewportState = &(VkPipelineViewportStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .pViewports = NULL, /* dynamic */
+ .scissorCount = 1,
+ .pScissors = NULL, /* dynamic */
+ },
+ .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .rasterizerDiscardEnable = false,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .cullMode = VK_CULL_MODE_NONE,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ .depthBiasEnable = false,
+ },
+ .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .rasterizationSamples = 1, /* FINISHME: Multisampling */
+ .sampleShadingEnable = false,
+ .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
+ .alphaToCoverageEnable = false,
+ .alphaToOneEnable = false,
+ },
+ .pDepthStencilState = ds_state,
+ .pColorBlendState = cb_state,
+ .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
+ /* The meta clear pipeline declares all state as dynamic.
+ * As a consequence, vkCmdBindPipeline writes no dynamic state
+ * to the cmd buffer. Therefore, at the end of the meta clear,
+ * we need only restore dynamic state was vkCmdSet.
+ */
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = 9,
+ .pDynamicStates = (VkDynamicState[]) {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_LINE_WIDTH,
+ VK_DYNAMIC_STATE_DEPTH_BIAS,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ VK_DYNAMIC_STATE_DEPTH_BOUNDS,
+ VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
+ VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ },
+ },
+ .flags = 0,
+ .renderPass = anv_render_pass_to_handle(&anv_meta_dummy_renderpass),
+ .subpass = 0,
+ },
+ &(struct anv_graphics_pipeline_create_info) {
+ .use_repclear = true,
+ .disable_viewport = true,
+ .disable_vs = true,
+ .use_rectlist = true
+ },
+ alloc,
+ &pipeline_h);
+
+ ralloc_free(vs_nir);
+ ralloc_free(fs_nir);
+
+ return anv_pipeline_from_handle(pipeline_h);
+}
+
+static void
+init_color_pipeline(struct anv_device *device)
+{
+ struct nir_shader *vs_nir;
+ struct nir_shader *fs_nir;
+ build_color_shaders(&vs_nir, &fs_nir);
+
+ const VkPipelineVertexInputStateCreateInfo vi_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 1,
+ .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+ {
+ .binding = 0,
+ .stride = sizeof(struct color_clear_vattrs),
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+ },
+ },
+ .vertexAttributeDescriptionCount = 3,
+ .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+ {
+ /* VUE Header */
+ .location = 0,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32B32A32_UINT,
+ .offset = offsetof(struct color_clear_vattrs, vue_header),
+ },
+ {
+ /* Position */
+ .location = 1,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32_SFLOAT,
+ .offset = offsetof(struct color_clear_vattrs, position),
+ },
+ {
+ /* Color */
+ .location = 2,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32B32A32_SFLOAT,
+ .offset = offsetof(struct color_clear_vattrs, color),
+ },
+ },
+ };
+
+ const VkPipelineDepthStencilStateCreateInfo ds_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = false,
+ .depthWriteEnable = false,
+ .depthBoundsTestEnable = false,
+ .stencilTestEnable = false,
+ };
+
+ const VkPipelineColorBlendStateCreateInfo cb_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = false,
+ .attachmentCount = 1,
+ .pAttachments = (VkPipelineColorBlendAttachmentState []) {
+ {
+ .blendEnable = false,
+ .colorWriteMask = VK_COLOR_COMPONENT_A_BIT |
+ VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT,
+ },
+ },
+ };
+
+ device->meta_state.clear.color_pipeline =
+ create_pipeline(device, vs_nir, fs_nir, &vi_state, &ds_state,
+ &cb_state, NULL);
+}
+
+static void
+emit_load_color_clear(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t attachment,
+ VkClearColorValue clear_value)
+{
+ struct anv_device *device = cmd_buffer->device;
+ VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+ const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+ VkPipeline pipeline_h =
+ anv_pipeline_to_handle(device->meta_state.clear.color_pipeline);
+
+ const struct color_clear_vattrs vertex_data[3] = {
+ {
+ .vue_header = { 0 },
+ .position = { 0.0, 0.0 },
+ .color = clear_value,
+ },
+ {
+ .vue_header = { 0 },
+ .position = { fb->width, 0.0 },
+ .color = clear_value,
+ },
+ {
+ .vue_header = { 0 },
+ .position = { fb->width, fb->height },
+ .color = clear_value,
+ },
+ };
+
+ struct anv_state state =
+ anv_cmd_buffer_emit_dynamic(cmd_buffer, vertex_data, sizeof(vertex_data), 16);
+
+ struct anv_buffer vertex_buffer = {
+ .device = device,
+ .size = sizeof(vertex_data),
+ .bo = &device->dynamic_state_block_pool.bo,
+ .offset = state.offset,
+ };
+
+ anv_cmd_buffer_begin_subpass(cmd_buffer,
+ &(struct anv_subpass) {
+ .color_count = 1,
+ .color_attachments = (uint32_t[]) { attachment },
+ .depth_stencil_attachment = VK_ATTACHMENT_UNUSED,
+ });
+
+ ANV_CALL(CmdSetViewport)(cmd_buffer_h, 1,
+ (VkViewport[]) {
+ {
+ .x = 0,
+ .y = 0,
+ .width = fb->width,
+ .height = fb->height,
+ .minDepth = 0.0,
+ .maxDepth = 1.0,
+ },
+ });
+
+ ANV_CALL(CmdSetScissor)(cmd_buffer_h, 1,
+ (VkRect2D[]) {
+ {
+ .offset = { 0, 0 },
+ .extent = { fb->width, fb->height },
+ }
+ });
+
+ ANV_CALL(CmdBindVertexBuffers)(cmd_buffer_h, 0, 1,
+ (VkBuffer[]) { anv_buffer_to_handle(&vertex_buffer) },
+ (VkDeviceSize[]) { 0 });
+
+ if (cmd_buffer->state.pipeline != device->meta_state.clear.color_pipeline) {
+ ANV_CALL(CmdBindPipeline)(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline_h);
+ }
+
+ ANV_CALL(CmdDraw)(cmd_buffer_h, 3, 1, 0, 0);
+}
+
+
+static void
+build_depthstencil_shaders(struct nir_shader **out_vs,
+ struct nir_shader **out_fs)
+{
+ nir_builder vs_b;
+ nir_builder fs_b;
+
++ nir_builder_init_simple_shader(&vs_b, NULL, MESA_SHADER_VERTEX, NULL);
++ nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
+
+ const struct glsl_type *position_type = glsl_vec4_type();
+
+ nir_variable *vs_in_pos =
+ nir_variable_create(vs_b.shader, nir_var_shader_in, position_type,
+ "a_position");
+ vs_in_pos->data.location = VERT_ATTRIB_GENERIC0;
+
+ nir_variable *vs_out_pos =
+ nir_variable_create(vs_b.shader, nir_var_shader_out, position_type,
+ "gl_Position");
+ vs_out_pos->data.location = VARYING_SLOT_POS;
+
+ nir_copy_var(&vs_b, vs_out_pos, vs_in_pos);
+
+ *out_vs = vs_b.shader;
+ *out_fs = fs_b.shader;
+}
+
+static struct anv_pipeline *
+create_depthstencil_pipeline(struct anv_device *device,
+ VkImageAspectFlags aspects)
+{
+ struct nir_shader *vs_nir;
+ struct nir_shader *fs_nir;
+
+ build_depthstencil_shaders(&vs_nir, &fs_nir);
+
+ const VkPipelineVertexInputStateCreateInfo vi_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = 1,
+ .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
+ {
+ .binding = 0,
+ .stride = sizeof(struct depthstencil_clear_vattrs),
+ .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
+ },
+ },
+ .vertexAttributeDescriptionCount = 2,
+ .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
+ {
+ /* VUE Header */
+ .location = 0,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32B32A32_UINT,
+ .offset = offsetof(struct depthstencil_clear_vattrs, vue_header),
+ },
+ {
+ /* Position */
+ .location = 1,
+ .binding = 0,
+ .format = VK_FORMAT_R32G32_SFLOAT,
+ .offset = offsetof(struct depthstencil_clear_vattrs, position),
+ },
+ },
+ };
+
+ const VkPipelineDepthStencilStateCreateInfo ds_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
+ .depthCompareOp = VK_COMPARE_OP_ALWAYS,
+ .depthWriteEnable = (aspects & VK_IMAGE_ASPECT_DEPTH_BIT),
+ .depthBoundsTestEnable = false,
+ .stencilTestEnable = (aspects & VK_IMAGE_ASPECT_STENCIL_BIT),
+ .front = {
+ .passOp = VK_STENCIL_OP_REPLACE,
+ .compareOp = VK_COMPARE_OP_ALWAYS,
+ .writeMask = UINT32_MAX,
+ .reference = 0, /* dynamic */
+ },
+ .back = { 0 /* dont care */ },
+ };
+
+ const VkPipelineColorBlendStateCreateInfo cb_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = false,
+ .attachmentCount = 0,
+ .pAttachments = NULL,
+ };
+
+ return create_pipeline(device, vs_nir, fs_nir, &vi_state, &ds_state,
+ &cb_state, NULL);
+}
+
+static void
+emit_load_depthstencil_clear(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t attachment,
+ VkImageAspectFlags aspects,
+ VkClearDepthStencilValue clear_value)
+{
+ struct anv_device *device = cmd_buffer->device;
+ VkCommandBuffer cmd_buffer_h = anv_cmd_buffer_to_handle(cmd_buffer);
+ const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+
+ const struct depthstencil_clear_vattrs vertex_data[3] = {
+ {
+ .vue_header = { 0 },
+ .position = { 0.0, 0.0 },
+ },
+ {
+ .vue_header = { 0 },
+ .position = { fb->width, 0.0 },
+ },
+ {
+ .vue_header = { 0 },
+ .position = { fb->width, fb->height },
+ },
+ };
+
+ struct anv_state state =
+ anv_cmd_buffer_emit_dynamic(cmd_buffer, vertex_data, sizeof(vertex_data), 16);
+
+ struct anv_buffer vertex_buffer = {
+ .device = device,
+ .size = sizeof(vertex_data),
+ .bo = &device->dynamic_state_block_pool.bo,
+ .offset = state.offset,
+ };
+
+ anv_cmd_buffer_begin_subpass(cmd_buffer,
+ &(struct anv_subpass) {
+ .color_count = 0,
+ .depth_stencil_attachment = attachment,
+ });
+
+ ANV_CALL(CmdSetViewport)(cmd_buffer_h, 1,
+ (VkViewport[]) {
+ {
+ .x = 0,
+ .y = 0,
+ .width = fb->width,
+ .height = fb->height,
+
+ /* Ignored when clearing only stencil. */
+ .minDepth = clear_value.depth,
+ .maxDepth = clear_value.depth,
+ },
+ });
+
+ ANV_CALL(CmdSetScissor)(cmd_buffer_h, 1,
+ (VkRect2D[]) {
+ {
+ .offset = { 0, 0 },
+ .extent = { fb->width, fb->height },
+ }
+ });
+
+ if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ ANV_CALL(CmdSetStencilReference)(cmd_buffer_h, VK_STENCIL_FACE_FRONT_BIT,
+ clear_value.stencil);
+ }
+
+ ANV_CALL(CmdBindVertexBuffers)(cmd_buffer_h, 0, 1,
+ (VkBuffer[]) { anv_buffer_to_handle(&vertex_buffer) },
+ (VkDeviceSize[]) { 0 });
+
+ struct anv_pipeline *pipeline;
+ switch (aspects) {
+ case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
+ pipeline = device->meta_state.clear.depthstencil_pipeline;
+ break;
+ case VK_IMAGE_ASPECT_DEPTH_BIT:
+ pipeline = device->meta_state.clear.depth_only_pipeline;
+ break;
+ case VK_IMAGE_ASPECT_STENCIL_BIT:
+ pipeline = device->meta_state.clear.stencil_only_pipeline;
+ break;
+ default:
+ unreachable("expected depth or stencil aspect");
+ }
+
+ if (cmd_buffer->state.pipeline != pipeline) {
+ ANV_CALL(CmdBindPipeline)(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ anv_pipeline_to_handle(pipeline));
+ }
+
+ ANV_CALL(CmdDraw)(cmd_buffer_h, 3, 1, 0, 0);
+}
+
+static void
+init_depthstencil_pipelines(struct anv_device *device)
+{
+ device->meta_state.clear.depth_only_pipeline =
+ create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_DEPTH_BIT);
+
+ device->meta_state.clear.stencil_only_pipeline =
+ create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_STENCIL_BIT);
+
+ device->meta_state.clear.depthstencil_pipeline =
+ create_depthstencil_pipeline(device, VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT);
+}
+
+void
+anv_device_init_meta_clear_state(struct anv_device *device)
+{
+ init_color_pipeline(device);
+ init_depthstencil_pipelines(device);
+}
+
+void
+anv_device_finish_meta_clear_state(struct anv_device *device)
+{
+ VkDevice device_h = anv_device_to_handle(device);
+
+ ANV_CALL(DestroyPipeline)(device_h,
+ anv_pipeline_to_handle(device->meta_state.clear.color_pipeline),
+ NULL);
+ ANV_CALL(DestroyPipeline)(device_h,
+ anv_pipeline_to_handle(device->meta_state.clear.depth_only_pipeline),
+ NULL);
+ ANV_CALL(DestroyPipeline)(device_h,
+ anv_pipeline_to_handle(device->meta_state.clear.stencil_only_pipeline),
+ NULL);
+ ANV_CALL(DestroyPipeline)(device_h,
+ anv_pipeline_to_handle(device->meta_state.clear.depthstencil_pipeline),
+ NULL);
+}
+
+void
+anv_cmd_buffer_clear_attachments(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_render_pass *pass,
+ const VkClearValue *clear_values)
+{
+ struct anv_meta_saved_state saved_state;
+
+ /* Figure out whether or not we actually need to clear anything to avoid
+ * trashing state when clearing is a no-op.
+ */
+ bool needs_clear = false;
+ for (uint32_t a = 0; a < pass->attachment_count; ++a) {
+ struct anv_render_pass_attachment *att = &pass->attachments[a];
+
+ if (anv_format_is_color(att->format)) {
+ if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ needs_clear = true;
+ break;
+ }
+ } else {
+ if ((att->format->depth_format &&
+ att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) ||
+ (att->format->has_stencil &&
+ att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)) {
+ needs_clear = true;
+ break;
+ }
+ }
+ }
+
+ if (!needs_clear)
+ return;
+
+ meta_clear_begin(&saved_state, cmd_buffer);
+
+ for (uint32_t a = 0; a < pass->attachment_count; ++a) {
+ struct anv_render_pass_attachment *att = &pass->attachments[a];
+
+ if (anv_format_is_color(att->format)) {
+ if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ emit_load_color_clear(cmd_buffer, a, clear_values[a].color);
+ }
+ } else {
+ VkImageAspectFlags clear_aspects = 0;
+
+ if (att->format->depth_format &&
+ att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ }
+
+ if (att->format->has_stencil &&
+ att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
+ clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ if (clear_aspects) {
+ emit_load_depthstencil_clear(cmd_buffer, a, clear_aspects,
+ clear_values[a].depthStencil);
+ }
+ }
+ }
+
+ meta_clear_end(&saved_state, cmd_buffer);
+}
+
+void anv_CmdClearColorImage(
+ VkCommandBuffer commandBuffer,
+ VkImage _image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue* pColor,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_image, image, _image);
+ struct anv_meta_saved_state saved_state;
+
+ meta_clear_begin(&saved_state, cmd_buffer);
+
+ for (uint32_t r = 0; r < rangeCount; r++) {
+ for (uint32_t l = 0; l < pRanges[r].levelCount; l++) {
+ for (uint32_t s = 0; s < pRanges[r].layerCount; s++) {
+ struct anv_image_view iview;
+ anv_image_view_init(&iview, cmd_buffer->device,
+ &(VkImageViewCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = _image,
+ .viewType = anv_meta_get_view_type(image),
+ .format = image->format->vk_format,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = pRanges[r].baseMipLevel + l,
+ .levelCount = 1,
+ .baseArrayLayer = pRanges[r].baseArrayLayer + s,
+ .layerCount = 1
+ },
+ },
+ cmd_buffer);
+
+ VkFramebuffer fb;
+ anv_CreateFramebuffer(anv_device_to_handle(cmd_buffer->device),
+ &(VkFramebufferCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .attachmentCount = 1,
+ .pAttachments = (VkImageView[]) {
+ anv_image_view_to_handle(&iview),
+ },
+ .width = iview.extent.width,
+ .height = iview.extent.height,
+ .layers = 1
+ }, &cmd_buffer->pool->alloc, &fb);
+
+ VkRenderPass pass;
+ anv_CreateRenderPass(anv_device_to_handle(cmd_buffer->device),
+ &(VkRenderPassCreateInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = 1,
+ .pAttachments = &(VkAttachmentDescription) {
+ .format = iview.format->vk_format,
+ .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
+ .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
+ .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
+ .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .subpassCount = 1,
+ .pSubpasses = &(VkSubpassDescription) {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .inputAttachmentCount = 0,
+ .colorAttachmentCount = 1,
+ .pColorAttachments = &(VkAttachmentReference) {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .pResolveAttachments = NULL,
+ .pDepthStencilAttachment = &(VkAttachmentReference) {
+ .attachment = VK_ATTACHMENT_UNUSED,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ .preserveAttachmentCount = 1,
+ .pPreserveAttachments = &(VkAttachmentReference) {
+ .attachment = 0,
+ .layout = VK_IMAGE_LAYOUT_GENERAL,
+ },
+ },
+ .dependencyCount = 0,
+ }, &cmd_buffer->pool->alloc, &pass);
+
+ ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
+ &(VkRenderPassBeginInfo) {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderArea = {
+ .offset = { 0, 0, },
+ .extent = {
+ .width = iview.extent.width,
+ .height = iview.extent.height,
+ },
+ },
+ .renderPass = pass,
+ .framebuffer = fb,
+ .clearValueCount = 1,
+ .pClearValues = (VkClearValue[]) {
+ { .color = *pColor },
+ },
+ }, VK_SUBPASS_CONTENTS_INLINE);
+
+ ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
+
+ /* XXX: We're leaking the render pass and framebuffer */
+ }
+ }
+ }
+
+ meta_clear_end(&saved_state, cmd_buffer);
+}
+
+void anv_CmdClearDepthStencilImage(
+ VkCommandBuffer commandBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges)
+{
+ stub();
+}
+
+void anv_CmdClearAttachments(
+ VkCommandBuffer commandBuffer,
+ uint32_t attachmentCount,
+ const VkClearAttachment* pAttachments,
+ uint32_t rectCount,
+ const VkClearRect* pRects)
+{
+ stub();
+}