2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include "anv_meta_clear.h"
32 #include "anv_private.h"
33 #include "glsl/nir/nir_builder.h"
35 struct anv_render_pass anv_meta_dummy_renderpass
= {0};
38 build_nir_vertex_shader(bool attr_flat
)
42 const struct glsl_type
*vertex_type
= glsl_vec4_type();
44 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_VERTEX
, NULL
);
45 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit_vs");
47 nir_variable
*pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
48 vertex_type
, "a_pos");
49 pos_in
->data
.location
= VERT_ATTRIB_GENERIC0
;
50 nir_variable
*pos_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
51 vertex_type
, "gl_Position");
52 pos_out
->data
.location
= VARYING_SLOT_POS
;
53 nir_copy_var(&b
, pos_out
, pos_in
);
55 /* Add one more pass-through attribute. For clear shaders, this is used
56 * to store the color and for blit shaders it's the texture coordinate.
58 const struct glsl_type
*attr_type
= glsl_vec4_type();
59 nir_variable
*attr_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
61 attr_in
->data
.location
= VERT_ATTRIB_GENERIC1
;
62 nir_variable
*attr_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
64 attr_out
->data
.location
= VARYING_SLOT_VAR0
;
65 attr_out
->data
.interpolation
= attr_flat
? INTERP_QUALIFIER_FLAT
:
66 INTERP_QUALIFIER_SMOOTH
;
67 nir_copy_var(&b
, attr_out
, attr_in
);
73 build_nir_copy_fragment_shader(enum glsl_sampler_dim tex_dim
)
77 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
78 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit_fs");
80 const struct glsl_type
*color_type
= glsl_vec4_type();
82 nir_variable
*tex_pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
83 glsl_vec4_type(), "v_attr");
84 tex_pos_in
->data
.location
= VARYING_SLOT_VAR0
;
86 /* Swizzle the array index which comes in as Z coordinate into the right
89 unsigned swz
[] = { 0, (tex_dim
== GLSL_SAMPLER_DIM_1D
? 2 : 1), 2 };
90 nir_ssa_def
*const tex_pos
=
91 nir_swizzle(&b
, nir_load_var(&b
, tex_pos_in
), swz
,
92 (tex_dim
== GLSL_SAMPLER_DIM_1D
? 2 : 3), false);
94 const struct glsl_type
*sampler_type
=
95 glsl_sampler_type(tex_dim
, false, tex_dim
!= GLSL_SAMPLER_DIM_3D
,
96 glsl_get_base_type(color_type
));
97 nir_variable
*sampler
= nir_variable_create(b
.shader
, nir_var_uniform
,
98 sampler_type
, "s_tex");
99 sampler
->data
.descriptor_set
= 0;
100 sampler
->data
.binding
= 0;
102 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 1);
103 tex
->sampler_dim
= tex_dim
;
104 tex
->op
= nir_texop_tex
;
105 tex
->src
[0].src_type
= nir_tex_src_coord
;
106 tex
->src
[0].src
= nir_src_for_ssa(tex_pos
);
107 tex
->dest_type
= nir_type_float
; /* TODO */
108 tex
->is_array
= glsl_sampler_type_is_array(sampler_type
);
109 tex
->coord_components
= tex_pos
->num_components
;
110 tex
->sampler
= nir_deref_var_create(tex
, sampler
);
112 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, "tex");
113 nir_builder_instr_insert(&b
, &tex
->instr
);
115 nir_variable
*color_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
116 color_type
, "f_color");
117 color_out
->data
.location
= FRAG_RESULT_DATA0
;
118 nir_store_var(&b
, color_out
, &tex
->dest
.ssa
, 4);
124 anv_meta_save(struct anv_meta_saved_state
*state
,
125 const struct anv_cmd_buffer
*cmd_buffer
,
126 uint32_t dynamic_mask
)
128 state
->old_pipeline
= cmd_buffer
->state
.pipeline
;
129 state
->old_descriptor_set0
= cmd_buffer
->state
.descriptors
[0];
130 memcpy(state
->old_vertex_bindings
, cmd_buffer
->state
.vertex_bindings
,
131 sizeof(state
->old_vertex_bindings
));
133 state
->dynamic_mask
= dynamic_mask
;
134 anv_dynamic_state_copy(&state
->dynamic
, &cmd_buffer
->state
.dynamic
,
139 anv_meta_restore(const struct anv_meta_saved_state
*state
,
140 struct anv_cmd_buffer
*cmd_buffer
)
142 cmd_buffer
->state
.pipeline
= state
->old_pipeline
;
143 cmd_buffer
->state
.descriptors
[0] = state
->old_descriptor_set0
;
144 memcpy(cmd_buffer
->state
.vertex_bindings
, state
->old_vertex_bindings
,
145 sizeof(state
->old_vertex_bindings
));
147 cmd_buffer
->state
.vb_dirty
|= (1 << ANV_META_VERTEX_BINDING_COUNT
) - 1;
148 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
149 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_FRAGMENT_BIT
;
151 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
, &state
->dynamic
,
152 state
->dynamic_mask
);
153 cmd_buffer
->state
.dirty
|= state
->dynamic_mask
;
155 /* Since we've used the pipeline with the VS disabled, set
156 * need_query_wa. See CmdBeginQuery.
158 cmd_buffer
->state
.need_query_wa
= true;
162 anv_meta_get_view_type(const struct anv_image
*image
)
164 switch (image
->type
) {
165 case VK_IMAGE_TYPE_1D
: return VK_IMAGE_VIEW_TYPE_1D
;
166 case VK_IMAGE_TYPE_2D
: return VK_IMAGE_VIEW_TYPE_2D
;
167 case VK_IMAGE_TYPE_3D
: return VK_IMAGE_VIEW_TYPE_3D
;
169 unreachable("bad VkImageViewType");
174 meta_blit_get_dest_view_base_array_slice(const struct anv_image
*dest_image
,
175 const VkImageSubresourceLayers
*dest_subresource
,
176 const VkOffset3D
*dest_offset
)
178 switch (dest_image
->type
) {
179 case VK_IMAGE_TYPE_1D
:
180 case VK_IMAGE_TYPE_2D
:
181 return dest_subresource
->baseArrayLayer
;
182 case VK_IMAGE_TYPE_3D
:
183 /* HACK: Vulkan does not allow attaching a 3D image to a framebuffer,
184 * but meta does it anyway. When doing so, we translate the
185 * destination's z offset into an array offset.
187 return dest_offset
->z
;
189 assert(!"bad VkImageType");
195 anv_device_init_meta_blit_state(struct anv_device
*device
)
199 result
= anv_CreateRenderPass(anv_device_to_handle(device
),
200 &(VkRenderPassCreateInfo
) {
201 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
202 .attachmentCount
= 1,
203 .pAttachments
= &(VkAttachmentDescription
) {
204 .format
= VK_FORMAT_UNDEFINED
, /* Our shaders don't care */
205 .loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
,
206 .storeOp
= VK_ATTACHMENT_STORE_OP_STORE
,
207 .initialLayout
= VK_IMAGE_LAYOUT_GENERAL
,
208 .finalLayout
= VK_IMAGE_LAYOUT_GENERAL
,
211 .pSubpasses
= &(VkSubpassDescription
) {
212 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
213 .inputAttachmentCount
= 0,
214 .colorAttachmentCount
= 1,
215 .pColorAttachments
= &(VkAttachmentReference
) {
217 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
219 .pResolveAttachments
= NULL
,
220 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
221 .attachment
= VK_ATTACHMENT_UNUSED
,
222 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
224 .preserveAttachmentCount
= 1,
225 .pPreserveAttachments
= (uint32_t[]) { 0 },
227 .dependencyCount
= 0,
228 }, &device
->meta_state
.alloc
, &device
->meta_state
.blit
.render_pass
);
229 if (result
!= VK_SUCCESS
)
232 /* We don't use a vertex shader for clearing, but instead build and pass
233 * the VUEs directly to the rasterization backend. However, we do need
234 * to provide GLSL source for the vertex shader so that the compiler
235 * does not dead-code our inputs.
237 struct anv_shader_module vs
= {
238 .nir
= build_nir_vertex_shader(false),
241 struct anv_shader_module fs_1d
= {
242 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_1D
),
245 struct anv_shader_module fs_2d
= {
246 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_2D
),
249 struct anv_shader_module fs_3d
= {
250 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_3D
),
253 VkPipelineVertexInputStateCreateInfo vi_create_info
= {
254 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
255 .vertexBindingDescriptionCount
= 2,
256 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
260 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
264 .stride
= 5 * sizeof(float),
265 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
268 .vertexAttributeDescriptionCount
= 3,
269 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
274 .format
= VK_FORMAT_R32G32B32A32_UINT
,
281 .format
= VK_FORMAT_R32G32_SFLOAT
,
285 /* Texture Coordinate */
288 .format
= VK_FORMAT_R32G32B32_SFLOAT
,
294 VkDescriptorSetLayoutCreateInfo ds_layout_info
= {
295 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
297 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
300 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
301 .descriptorCount
= 1,
302 .stageFlags
= VK_SHADER_STAGE_FRAGMENT_BIT
,
303 .pImmutableSamplers
= NULL
307 result
= anv_CreateDescriptorSetLayout(anv_device_to_handle(device
),
309 &device
->meta_state
.alloc
,
310 &device
->meta_state
.blit
.ds_layout
);
311 if (result
!= VK_SUCCESS
)
312 goto fail_render_pass
;
314 result
= anv_CreatePipelineLayout(anv_device_to_handle(device
),
315 &(VkPipelineLayoutCreateInfo
) {
316 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
318 .pSetLayouts
= &device
->meta_state
.blit
.ds_layout
,
320 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_layout
);
321 if (result
!= VK_SUCCESS
)
322 goto fail_descriptor_set_layout
;
324 VkPipelineShaderStageCreateInfo pipeline_shader_stages
[] = {
326 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
327 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
328 .module
= anv_shader_module_to_handle(&vs
),
330 .pSpecializationInfo
= NULL
332 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
333 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
334 .module
= VK_NULL_HANDLE
, /* TEMPLATE VALUE! FILL ME IN! */
336 .pSpecializationInfo
= NULL
340 const VkGraphicsPipelineCreateInfo vk_pipeline_info
= {
341 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
342 .stageCount
= ARRAY_SIZE(pipeline_shader_stages
),
343 .pStages
= pipeline_shader_stages
,
344 .pVertexInputState
= &vi_create_info
,
345 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
346 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
347 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
348 .primitiveRestartEnable
= false,
350 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
351 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
355 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
356 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
357 .rasterizerDiscardEnable
= false,
358 .polygonMode
= VK_POLYGON_MODE_FILL
,
359 .cullMode
= VK_CULL_MODE_NONE
,
360 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
362 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
363 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
364 .rasterizationSamples
= 1,
365 .sampleShadingEnable
= false,
366 .pSampleMask
= (VkSampleMask
[]) { UINT32_MAX
},
368 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
369 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
370 .attachmentCount
= 1,
371 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
373 VK_COLOR_COMPONENT_A_BIT
|
374 VK_COLOR_COMPONENT_R_BIT
|
375 VK_COLOR_COMPONENT_G_BIT
|
376 VK_COLOR_COMPONENT_B_BIT
},
379 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
380 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
381 .dynamicStateCount
= 9,
382 .pDynamicStates
= (VkDynamicState
[]) {
383 VK_DYNAMIC_STATE_VIEWPORT
,
384 VK_DYNAMIC_STATE_SCISSOR
,
385 VK_DYNAMIC_STATE_LINE_WIDTH
,
386 VK_DYNAMIC_STATE_DEPTH_BIAS
,
387 VK_DYNAMIC_STATE_BLEND_CONSTANTS
,
388 VK_DYNAMIC_STATE_DEPTH_BOUNDS
,
389 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
,
390 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
,
391 VK_DYNAMIC_STATE_STENCIL_REFERENCE
,
395 .layout
= device
->meta_state
.blit
.pipeline_layout
,
396 .renderPass
= device
->meta_state
.blit
.render_pass
,
400 const struct anv_graphics_pipeline_create_info anv_pipeline_info
= {
401 .color_attachment_count
= -1,
402 .use_repclear
= false,
403 .disable_viewport
= true,
404 .disable_scissor
= true,
409 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_1d
);
410 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
412 &vk_pipeline_info
, &anv_pipeline_info
,
413 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_1d_src
);
414 if (result
!= VK_SUCCESS
)
415 goto fail_pipeline_layout
;
417 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_2d
);
418 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
420 &vk_pipeline_info
, &anv_pipeline_info
,
421 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_2d_src
);
422 if (result
!= VK_SUCCESS
)
423 goto fail_pipeline_1d
;
425 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_3d
);
426 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
428 &vk_pipeline_info
, &anv_pipeline_info
,
429 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_3d_src
);
430 if (result
!= VK_SUCCESS
)
431 goto fail_pipeline_2d
;
434 ralloc_free(fs_1d
.nir
);
435 ralloc_free(fs_2d
.nir
);
436 ralloc_free(fs_3d
.nir
);
441 anv_DestroyPipeline(anv_device_to_handle(device
),
442 device
->meta_state
.blit
.pipeline_2d_src
,
443 &device
->meta_state
.alloc
);
446 anv_DestroyPipeline(anv_device_to_handle(device
),
447 device
->meta_state
.blit
.pipeline_1d_src
,
448 &device
->meta_state
.alloc
);
450 fail_pipeline_layout
:
451 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
452 device
->meta_state
.blit
.pipeline_layout
,
453 &device
->meta_state
.alloc
);
454 fail_descriptor_set_layout
:
455 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
456 device
->meta_state
.blit
.ds_layout
,
457 &device
->meta_state
.alloc
);
459 anv_DestroyRenderPass(anv_device_to_handle(device
),
460 device
->meta_state
.blit
.render_pass
,
461 &device
->meta_state
.alloc
);
464 ralloc_free(fs_1d
.nir
);
465 ralloc_free(fs_2d
.nir
);
466 ralloc_free(fs_3d
.nir
);
472 meta_prepare_blit(struct anv_cmd_buffer
*cmd_buffer
,
473 struct anv_meta_saved_state
*saved_state
)
475 anv_meta_save(saved_state
, cmd_buffer
,
476 (1 << VK_DYNAMIC_STATE_VIEWPORT
));
480 VkOffset3D src_offset
;
481 VkExtent3D src_extent
;
482 VkOffset3D dest_offset
;
483 VkExtent3D dest_extent
;
486 /* Returns the user-provided VkBufferImageCopy::imageOffset in units of
487 * elements rather than texels. One element equals one texel or one block
488 * if Image is uncompressed or compressed, respectively.
490 static struct VkOffset3D
491 meta_region_offset_el(const struct anv_image
* image
,
492 const struct VkOffset3D
* offset
)
494 const struct isl_format_layout
* isl_layout
= image
->format
->isl_layout
;
495 return (VkOffset3D
) {
496 .x
= offset
->x
/ isl_layout
->bw
,
497 .y
= offset
->y
/ isl_layout
->bh
,
498 .z
= offset
->z
/ isl_layout
->bd
,
502 /* Returns the user-provided VkBufferImageCopy::imageExtent in units of
503 * elements rather than texels. One element equals one texel or one block
504 * if Image is uncompressed or compressed, respectively.
506 static struct VkExtent3D
507 meta_region_extent_el(const VkFormat format
,
508 const struct VkExtent3D
* extent
)
510 const struct isl_format_layout
* isl_layout
=
511 anv_format_for_vk_format(format
)->isl_layout
;
512 return (VkExtent3D
) {
513 .width
= DIV_ROUND_UP(extent
->width
, isl_layout
->bw
),
514 .height
= DIV_ROUND_UP(extent
->height
, isl_layout
->bh
),
515 .depth
= DIV_ROUND_UP(extent
->depth
, isl_layout
->bd
),
520 meta_emit_blit(struct anv_cmd_buffer
*cmd_buffer
,
521 struct anv_image
*src_image
,
522 struct anv_image_view
*src_iview
,
523 VkOffset3D src_offset
,
524 VkExtent3D src_extent
,
525 struct anv_image
*dest_image
,
526 struct anv_image_view
*dest_iview
,
527 VkOffset3D dest_offset
,
528 VkExtent3D dest_extent
,
529 VkFilter blit_filter
)
531 struct anv_device
*device
= cmd_buffer
->device
;
532 VkDescriptorPool dummy_desc_pool
= (VkDescriptorPool
)1;
534 struct blit_vb_data
{
539 assert(src_image
->samples
== dest_image
->samples
);
541 unsigned vb_size
= sizeof(struct anv_vue_header
) + 3 * sizeof(*vb_data
);
543 struct anv_state vb_state
=
544 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, vb_size
, 16);
545 memset(vb_state
.map
, 0, sizeof(struct anv_vue_header
));
546 vb_data
= vb_state
.map
+ sizeof(struct anv_vue_header
);
548 vb_data
[0] = (struct blit_vb_data
) {
550 dest_offset
.x
+ dest_extent
.width
,
551 dest_offset
.y
+ dest_extent
.height
,
554 (float)(src_offset
.x
+ src_extent
.width
) / (float)src_iview
->extent
.width
,
555 (float)(src_offset
.y
+ src_extent
.height
) / (float)src_iview
->extent
.height
,
556 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
560 vb_data
[1] = (struct blit_vb_data
) {
563 dest_offset
.y
+ dest_extent
.height
,
566 (float)src_offset
.x
/ (float)src_iview
->extent
.width
,
567 (float)(src_offset
.y
+ src_extent
.height
) / (float)src_iview
->extent
.height
,
568 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
572 vb_data
[2] = (struct blit_vb_data
) {
578 (float)src_offset
.x
/ (float)src_iview
->extent
.width
,
579 (float)src_offset
.y
/ (float)src_iview
->extent
.height
,
580 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
584 anv_state_clflush(vb_state
);
586 struct anv_buffer vertex_buffer
= {
589 .bo
= &device
->dynamic_state_block_pool
.bo
,
590 .offset
= vb_state
.offset
,
593 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 2,
595 anv_buffer_to_handle(&vertex_buffer
),
596 anv_buffer_to_handle(&vertex_buffer
)
600 sizeof(struct anv_vue_header
),
604 ANV_CALL(CreateSampler
)(anv_device_to_handle(device
),
605 &(VkSamplerCreateInfo
) {
606 .sType
= VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
,
607 .magFilter
= blit_filter
,
608 .minFilter
= blit_filter
,
609 }, &cmd_buffer
->pool
->alloc
, &sampler
);
612 anv_AllocateDescriptorSets(anv_device_to_handle(device
),
613 &(VkDescriptorSetAllocateInfo
) {
614 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO
,
615 .descriptorPool
= dummy_desc_pool
,
616 .descriptorSetCount
= 1,
617 .pSetLayouts
= &device
->meta_state
.blit
.ds_layout
619 anv_UpdateDescriptorSets(anv_device_to_handle(device
),
621 (VkWriteDescriptorSet
[]) {
623 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
626 .dstArrayElement
= 0,
627 .descriptorCount
= 1,
628 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
629 .pImageInfo
= (VkDescriptorImageInfo
[]) {
632 .imageView
= anv_image_view_to_handle(src_iview
),
633 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
640 anv_CreateFramebuffer(anv_device_to_handle(device
),
641 &(VkFramebufferCreateInfo
) {
642 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
643 .attachmentCount
= 1,
644 .pAttachments
= (VkImageView
[]) {
645 anv_image_view_to_handle(dest_iview
),
647 .width
= dest_iview
->extent
.width
,
648 .height
= dest_iview
->extent
.height
,
650 }, &cmd_buffer
->pool
->alloc
, &fb
);
652 ANV_CALL(CmdBeginRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
),
653 &(VkRenderPassBeginInfo
) {
654 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
655 .renderPass
= device
->meta_state
.blit
.render_pass
,
658 .offset
= { dest_offset
.x
, dest_offset
.y
},
659 .extent
= { dest_extent
.width
, dest_extent
.height
},
661 .clearValueCount
= 0,
662 .pClearValues
= NULL
,
663 }, VK_SUBPASS_CONTENTS_INLINE
);
667 switch (src_image
->type
) {
668 case VK_IMAGE_TYPE_1D
:
669 pipeline
= device
->meta_state
.blit
.pipeline_1d_src
;
671 case VK_IMAGE_TYPE_2D
:
672 pipeline
= device
->meta_state
.blit
.pipeline_2d_src
;
674 case VK_IMAGE_TYPE_3D
:
675 pipeline
= device
->meta_state
.blit
.pipeline_3d_src
;
678 unreachable(!"bad VkImageType");
681 if (cmd_buffer
->state
.pipeline
!= anv_pipeline_from_handle(pipeline
)) {
682 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer
),
683 VK_PIPELINE_BIND_POINT_GRAPHICS
, pipeline
);
686 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
690 .width
= dest_iview
->extent
.width
,
691 .height
= dest_iview
->extent
.height
,
696 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer
),
697 VK_PIPELINE_BIND_POINT_GRAPHICS
,
698 device
->meta_state
.blit
.pipeline_layout
, 0, 1,
701 ANV_CALL(CmdDraw
)(anv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
703 ANV_CALL(CmdEndRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
));
705 /* At the point where we emit the draw call, all data from the
706 * descriptor sets, etc. has been used. We are free to delete it.
708 anv_descriptor_set_destroy(device
, anv_descriptor_set_from_handle(set
));
709 anv_DestroySampler(anv_device_to_handle(device
), sampler
,
710 &cmd_buffer
->pool
->alloc
);
711 anv_DestroyFramebuffer(anv_device_to_handle(device
), fb
,
712 &cmd_buffer
->pool
->alloc
);
716 meta_finish_blit(struct anv_cmd_buffer
*cmd_buffer
,
717 const struct anv_meta_saved_state
*saved_state
)
719 anv_meta_restore(saved_state
, cmd_buffer
);
723 vk_format_for_size(int bs
)
725 /* Note: We intentionally use the 4-channel formats whenever we can.
726 * This is so that, when we do a RGB <-> RGBX copy, the two formats will
727 * line up even though one of them is 3/4 the size of the other.
730 case 1: return VK_FORMAT_R8_UINT
;
731 case 2: return VK_FORMAT_R8G8_UINT
;
732 case 3: return VK_FORMAT_R8G8B8_UINT
;
733 case 4: return VK_FORMAT_R8G8B8A8_UINT
;
734 case 6: return VK_FORMAT_R16G16B16_UINT
;
735 case 8: return VK_FORMAT_R16G16B16A16_UINT
;
736 case 12: return VK_FORMAT_R32G32B32_UINT
;
737 case 16: return VK_FORMAT_R32G32B32A32_UINT
;
739 unreachable("Invalid format block size");
744 do_buffer_copy(struct anv_cmd_buffer
*cmd_buffer
,
745 struct anv_bo
*src
, uint64_t src_offset
,
746 struct anv_bo
*dest
, uint64_t dest_offset
,
747 int width
, int height
, VkFormat copy_format
)
749 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
751 VkImageCreateInfo image_info
= {
752 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
753 .imageType
= VK_IMAGE_TYPE_2D
,
754 .format
= copy_format
,
763 .tiling
= VK_IMAGE_TILING_LINEAR
,
769 image_info
.usage
= VK_IMAGE_USAGE_SAMPLED_BIT
;
770 anv_CreateImage(vk_device
, &image_info
,
771 &cmd_buffer
->pool
->alloc
, &src_image
);
774 image_info
.usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
775 anv_CreateImage(vk_device
, &image_info
,
776 &cmd_buffer
->pool
->alloc
, &dest_image
);
778 /* We could use a vk call to bind memory, but that would require
779 * creating a dummy memory object etc. so there's really no point.
781 anv_image_from_handle(src_image
)->bo
= src
;
782 anv_image_from_handle(src_image
)->offset
= src_offset
;
783 anv_image_from_handle(dest_image
)->bo
= dest
;
784 anv_image_from_handle(dest_image
)->offset
= dest_offset
;
786 struct anv_image_view src_iview
;
787 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
788 &(VkImageViewCreateInfo
) {
789 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
791 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
792 .format
= copy_format
,
793 .subresourceRange
= {
794 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
803 struct anv_image_view dest_iview
;
804 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
805 &(VkImageViewCreateInfo
) {
806 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
808 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
809 .format
= copy_format
,
810 .subresourceRange
= {
811 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
820 meta_emit_blit(cmd_buffer
,
821 anv_image_from_handle(src_image
),
823 (VkOffset3D
) { 0, 0, 0 },
824 (VkExtent3D
) { width
, height
, 1 },
825 anv_image_from_handle(dest_image
),
827 (VkOffset3D
) { 0, 0, 0 },
828 (VkExtent3D
) { width
, height
, 1 },
831 anv_DestroyImage(vk_device
, src_image
, &cmd_buffer
->pool
->alloc
);
832 anv_DestroyImage(vk_device
, dest_image
, &cmd_buffer
->pool
->alloc
);
835 void anv_CmdCopyBuffer(
836 VkCommandBuffer commandBuffer
,
839 uint32_t regionCount
,
840 const VkBufferCopy
* pRegions
)
842 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
843 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
844 ANV_FROM_HANDLE(anv_buffer
, dest_buffer
, destBuffer
);
846 struct anv_meta_saved_state saved_state
;
848 meta_prepare_blit(cmd_buffer
, &saved_state
);
850 for (unsigned r
= 0; r
< regionCount
; r
++) {
851 uint64_t src_offset
= src_buffer
->offset
+ pRegions
[r
].srcOffset
;
852 uint64_t dest_offset
= dest_buffer
->offset
+ pRegions
[r
].dstOffset
;
853 uint64_t copy_size
= pRegions
[r
].size
;
855 /* First, we compute the biggest format that can be used with the
856 * given offsets and size.
860 int fs
= ffs(src_offset
) - 1;
862 bs
= MIN2(bs
, 1 << fs
);
863 assert(src_offset
% bs
== 0);
865 fs
= ffs(dest_offset
) - 1;
867 bs
= MIN2(bs
, 1 << fs
);
868 assert(dest_offset
% bs
== 0);
870 fs
= ffs(pRegions
[r
].size
) - 1;
872 bs
= MIN2(bs
, 1 << fs
);
873 assert(pRegions
[r
].size
% bs
== 0);
875 VkFormat copy_format
= vk_format_for_size(bs
);
877 /* This is maximum possible width/height our HW can handle */
878 uint64_t max_surface_dim
= 1 << 14;
880 /* First, we make a bunch of max-sized copies */
881 uint64_t max_copy_size
= max_surface_dim
* max_surface_dim
* bs
;
882 while (copy_size
>= max_copy_size
) {
883 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
884 dest_buffer
->bo
, dest_offset
,
885 max_surface_dim
, max_surface_dim
, copy_format
);
886 copy_size
-= max_copy_size
;
887 src_offset
+= max_copy_size
;
888 dest_offset
+= max_copy_size
;
891 uint64_t height
= copy_size
/ (max_surface_dim
* bs
);
892 assert(height
< max_surface_dim
);
894 uint64_t rect_copy_size
= height
* max_surface_dim
* bs
;
895 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
896 dest_buffer
->bo
, dest_offset
,
897 max_surface_dim
, height
, copy_format
);
898 copy_size
-= rect_copy_size
;
899 src_offset
+= rect_copy_size
;
900 dest_offset
+= rect_copy_size
;
903 if (copy_size
!= 0) {
904 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
905 dest_buffer
->bo
, dest_offset
,
906 copy_size
/ bs
, 1, copy_format
);
910 meta_finish_blit(cmd_buffer
, &saved_state
);
913 void anv_CmdUpdateBuffer(
914 VkCommandBuffer commandBuffer
,
916 VkDeviceSize dstOffset
,
917 VkDeviceSize dataSize
,
918 const uint32_t* pData
)
920 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
921 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
922 struct anv_meta_saved_state saved_state
;
924 meta_prepare_blit(cmd_buffer
, &saved_state
);
926 /* We can't quite grab a full block because the state stream needs a
927 * little data at the top to build its linked list.
929 const uint32_t max_update_size
=
930 cmd_buffer
->device
->dynamic_state_block_pool
.block_size
- 64;
932 assert(max_update_size
< (1 << 14) * 4);
935 const uint32_t copy_size
= MIN2(dataSize
, max_update_size
);
937 struct anv_state tmp_data
=
938 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, copy_size
, 64);
940 memcpy(tmp_data
.map
, pData
, copy_size
);
944 if ((copy_size
& 15) == 0 && (dstOffset
& 15) == 0) {
945 format
= VK_FORMAT_R32G32B32A32_UINT
;
947 } else if ((copy_size
& 7) == 0 && (dstOffset
& 7) == 0) {
948 format
= VK_FORMAT_R32G32_UINT
;
951 assert((copy_size
& 3) == 0 && (dstOffset
& 3) == 0);
952 format
= VK_FORMAT_R32_UINT
;
956 do_buffer_copy(cmd_buffer
,
957 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
959 dst_buffer
->bo
, dst_buffer
->offset
+ dstOffset
,
960 copy_size
/ bs
, 1, format
);
962 dataSize
-= copy_size
;
963 dstOffset
+= copy_size
;
964 pData
= (void *)pData
+ copy_size
;
969 choose_iview_format(struct anv_image
*image
, VkImageAspectFlagBits aspect
)
971 assert(__builtin_popcount(aspect
) == 1);
973 struct isl_surf
*surf
=
974 &anv_image_get_surface_for_aspect_mask(image
, aspect
)->isl
;
976 /* vkCmdCopyImage behaves like memcpy. Therefore we choose identical UINT
977 * formats for the source and destination image views.
979 * From the Vulkan spec (2015-12-30):
981 * vkCmdCopyImage performs image copies in a similar manner to a host
982 * memcpy. It does not perform general-purpose conversions such as
983 * scaling, resizing, blending, color-space conversion, or format
984 * conversions. Rather, it simply copies raw image data. vkCmdCopyImage
985 * can copy between images with different formats, provided the formats
986 * are compatible as defined below.
988 * [The spec later defines compatibility as having the same number of
991 return vk_format_for_size(isl_format_layouts
[surf
->format
].bs
);
995 choose_buffer_format(struct anv_image
*image
, VkImageAspectFlagBits aspect
)
997 assert(__builtin_popcount(aspect
) == 1);
999 /* vkCmdCopy* commands behave like memcpy. Therefore we choose
1000 * compatable UINT formats for the source and destination image views.
1002 * For the buffer, we go back to the original image format and get a
1003 * the format as if it were linear. This way, for RGB formats, we get
1004 * an RGB format here even if the tiled image is RGBA. XXX: This doesn't
1005 * work if the buffer is the destination.
1007 enum isl_format linear_format
= anv_get_isl_format(image
->vk_format
, aspect
,
1008 VK_IMAGE_TILING_LINEAR
,
1011 return vk_format_for_size(isl_format_layouts
[linear_format
].bs
);
1014 void anv_CmdCopyImage(
1015 VkCommandBuffer commandBuffer
,
1017 VkImageLayout srcImageLayout
,
1019 VkImageLayout destImageLayout
,
1020 uint32_t regionCount
,
1021 const VkImageCopy
* pRegions
)
1023 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1024 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1025 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
1026 struct anv_meta_saved_state saved_state
;
1028 /* From the Vulkan 1.0 spec:
1030 * vkCmdCopyImage can be used to copy image data between multisample
1031 * images, but both images must have the same number of samples.
1033 assert(src_image
->samples
== dest_image
->samples
);
1035 meta_prepare_blit(cmd_buffer
, &saved_state
);
1037 for (unsigned r
= 0; r
< regionCount
; r
++) {
1038 assert(pRegions
[r
].srcSubresource
.aspectMask
==
1039 pRegions
[r
].dstSubresource
.aspectMask
);
1041 VkImageAspectFlags aspect
= pRegions
[r
].srcSubresource
.aspectMask
;
1043 VkFormat src_format
= choose_iview_format(src_image
, aspect
);
1044 VkFormat dst_format
= choose_iview_format(dest_image
, aspect
);
1046 struct anv_image_view src_iview
;
1047 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
1048 &(VkImageViewCreateInfo
) {
1049 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1051 .viewType
= anv_meta_get_view_type(src_image
),
1052 .format
= src_format
,
1053 .subresourceRange
= {
1054 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1055 .baseMipLevel
= pRegions
[r
].srcSubresource
.mipLevel
,
1057 .baseArrayLayer
= pRegions
[r
].srcSubresource
.baseArrayLayer
,
1058 .layerCount
= pRegions
[r
].dstSubresource
.layerCount
,
1063 const VkOffset3D dest_offset
= {
1064 .x
= pRegions
[r
].dstOffset
.x
,
1065 .y
= pRegions
[r
].dstOffset
.y
,
1069 unsigned num_slices
;
1070 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
1071 assert(pRegions
[r
].srcSubresource
.layerCount
== 1 &&
1072 pRegions
[r
].dstSubresource
.layerCount
== 1);
1073 num_slices
= pRegions
[r
].extent
.depth
;
1075 assert(pRegions
[r
].srcSubresource
.layerCount
==
1076 pRegions
[r
].dstSubresource
.layerCount
);
1077 assert(pRegions
[r
].extent
.depth
== 1);
1078 num_slices
= pRegions
[r
].dstSubresource
.layerCount
;
1081 const uint32_t dest_base_array_slice
=
1082 meta_blit_get_dest_view_base_array_slice(dest_image
,
1083 &pRegions
[r
].dstSubresource
,
1084 &pRegions
[r
].dstOffset
);
1086 for (unsigned slice
= 0; slice
< num_slices
; slice
++) {
1087 VkOffset3D src_offset
= pRegions
[r
].srcOffset
;
1088 src_offset
.z
+= slice
;
1090 struct anv_image_view dest_iview
;
1091 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1092 &(VkImageViewCreateInfo
) {
1093 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1095 .viewType
= anv_meta_get_view_type(dest_image
),
1096 .format
= dst_format
,
1097 .subresourceRange
= {
1098 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1099 .baseMipLevel
= pRegions
[r
].dstSubresource
.mipLevel
,
1101 .baseArrayLayer
= dest_base_array_slice
+ slice
,
1107 meta_emit_blit(cmd_buffer
,
1108 src_image
, &src_iview
,
1111 dest_image
, &dest_iview
,
1118 meta_finish_blit(cmd_buffer
, &saved_state
);
1121 void anv_CmdBlitImage(
1122 VkCommandBuffer commandBuffer
,
1124 VkImageLayout srcImageLayout
,
1126 VkImageLayout destImageLayout
,
1127 uint32_t regionCount
,
1128 const VkImageBlit
* pRegions
,
1132 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1133 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1134 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
1135 struct anv_meta_saved_state saved_state
;
1137 /* From the Vulkan 1.0 spec:
1139 * vkCmdBlitImage must not be used for multisampled source or
1140 * destination images. Use vkCmdResolveImage for this purpose.
1142 assert(src_image
->samples
== 1);
1143 assert(dest_image
->samples
== 1);
1145 anv_finishme("respect VkFilter");
1147 meta_prepare_blit(cmd_buffer
, &saved_state
);
1149 for (unsigned r
= 0; r
< regionCount
; r
++) {
1150 struct anv_image_view src_iview
;
1151 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
1152 &(VkImageViewCreateInfo
) {
1153 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1155 .viewType
= anv_meta_get_view_type(src_image
),
1156 .format
= src_image
->vk_format
,
1157 .subresourceRange
= {
1158 .aspectMask
= pRegions
[r
].srcSubresource
.aspectMask
,
1159 .baseMipLevel
= pRegions
[r
].srcSubresource
.mipLevel
,
1161 .baseArrayLayer
= pRegions
[r
].srcSubresource
.baseArrayLayer
,
1167 const VkOffset3D dest_offset
= {
1168 .x
= pRegions
[r
].dstOffsets
[0].x
,
1169 .y
= pRegions
[r
].dstOffsets
[0].y
,
1173 if (pRegions
[r
].dstOffsets
[1].x
< pRegions
[r
].dstOffsets
[0].x
||
1174 pRegions
[r
].dstOffsets
[1].y
< pRegions
[r
].dstOffsets
[0].y
||
1175 pRegions
[r
].srcOffsets
[1].x
< pRegions
[r
].srcOffsets
[0].x
||
1176 pRegions
[r
].srcOffsets
[1].y
< pRegions
[r
].srcOffsets
[0].y
)
1177 anv_finishme("FINISHME: Allow flipping in blits");
1179 const VkExtent3D dest_extent
= {
1180 .width
= pRegions
[r
].dstOffsets
[1].x
- pRegions
[r
].dstOffsets
[0].x
,
1181 .height
= pRegions
[r
].dstOffsets
[1].y
- pRegions
[r
].dstOffsets
[0].y
,
1184 const VkExtent3D src_extent
= {
1185 .width
= pRegions
[r
].srcOffsets
[1].x
- pRegions
[r
].srcOffsets
[0].x
,
1186 .height
= pRegions
[r
].srcOffsets
[1].y
- pRegions
[r
].srcOffsets
[0].y
,
1189 const uint32_t dest_array_slice
=
1190 meta_blit_get_dest_view_base_array_slice(dest_image
,
1191 &pRegions
[r
].dstSubresource
,
1192 &pRegions
[r
].dstOffsets
[0]);
1194 if (pRegions
[r
].srcSubresource
.layerCount
> 1)
1195 anv_finishme("FINISHME: copy multiple array layers");
1197 if (pRegions
[r
].srcOffsets
[0].z
+ 1 != pRegions
[r
].srcOffsets
[1].z
||
1198 pRegions
[r
].dstOffsets
[0].z
+ 1 != pRegions
[r
].dstOffsets
[1].z
)
1199 anv_finishme("FINISHME: copy multiple depth layers");
1201 struct anv_image_view dest_iview
;
1202 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1203 &(VkImageViewCreateInfo
) {
1204 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1206 .viewType
= anv_meta_get_view_type(dest_image
),
1207 .format
= dest_image
->vk_format
,
1208 .subresourceRange
= {
1209 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1210 .baseMipLevel
= pRegions
[r
].dstSubresource
.mipLevel
,
1212 .baseArrayLayer
= dest_array_slice
,
1218 meta_emit_blit(cmd_buffer
,
1219 src_image
, &src_iview
,
1220 pRegions
[r
].srcOffsets
[0], src_extent
,
1221 dest_image
, &dest_iview
,
1222 dest_offset
, dest_extent
,
1226 meta_finish_blit(cmd_buffer
, &saved_state
);
1229 static struct anv_image
*
1230 make_image_for_buffer(VkDevice vk_device
, VkBuffer vk_buffer
, VkFormat format
,
1231 VkImageUsageFlags usage
,
1232 VkImageType image_type
,
1233 const VkAllocationCallbacks
*alloc
,
1234 const VkBufferImageCopy
*copy
)
1236 ANV_FROM_HANDLE(anv_buffer
, buffer
, vk_buffer
);
1238 VkExtent3D extent
= copy
->imageExtent
;
1239 if (copy
->bufferRowLength
)
1240 extent
.width
= copy
->bufferRowLength
;
1241 if (copy
->bufferImageHeight
)
1242 extent
.height
= copy
->bufferImageHeight
;
1246 VkResult result
= anv_CreateImage(vk_device
,
1247 &(VkImageCreateInfo
) {
1248 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
1249 .imageType
= VK_IMAGE_TYPE_2D
,
1255 .tiling
= VK_IMAGE_TILING_LINEAR
,
1258 }, alloc
, &vk_image
);
1259 assert(result
== VK_SUCCESS
);
1261 ANV_FROM_HANDLE(anv_image
, image
, vk_image
);
1263 /* We could use a vk call to bind memory, but that would require
1264 * creating a dummy memory object etc. so there's really no point.
1266 image
->bo
= buffer
->bo
;
1267 image
->offset
= buffer
->offset
+ copy
->bufferOffset
;
1272 void anv_CmdCopyBufferToImage(
1273 VkCommandBuffer commandBuffer
,
1276 VkImageLayout destImageLayout
,
1277 uint32_t regionCount
,
1278 const VkBufferImageCopy
* pRegions
)
1280 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1281 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
1282 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
1283 struct anv_meta_saved_state saved_state
;
1285 /* The Vulkan 1.0 spec says "dstImage must have a sample count equal to
1286 * VK_SAMPLE_COUNT_1_BIT."
1288 assert(dest_image
->samples
== 1);
1290 meta_prepare_blit(cmd_buffer
, &saved_state
);
1292 for (unsigned r
= 0; r
< regionCount
; r
++) {
1293 VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
1295 VkFormat image_format
= choose_iview_format(dest_image
, aspect
);
1296 VkFormat buffer_format
= choose_buffer_format(dest_image
, aspect
);
1298 struct anv_image
*src_image
=
1299 make_image_for_buffer(vk_device
, srcBuffer
, buffer_format
,
1300 VK_IMAGE_USAGE_SAMPLED_BIT
,
1301 dest_image
->type
, &cmd_buffer
->pool
->alloc
,
1304 const uint32_t dest_base_array_slice
=
1305 meta_blit_get_dest_view_base_array_slice(dest_image
,
1306 &pRegions
[r
].imageSubresource
,
1307 &pRegions
[r
].imageOffset
);
1309 unsigned num_slices
;
1310 if (dest_image
->type
== VK_IMAGE_TYPE_3D
) {
1311 assert(pRegions
[r
].imageSubresource
.layerCount
== 1);
1312 num_slices
= pRegions
[r
].imageExtent
.depth
;
1314 assert(pRegions
[r
].imageExtent
.depth
== 1);
1315 num_slices
= pRegions
[r
].imageSubresource
.layerCount
;
1318 for (unsigned slice
= 0; slice
< num_slices
; slice
++) {
1319 struct anv_image_view src_iview
;
1320 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
1321 &(VkImageViewCreateInfo
) {
1322 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1323 .image
= anv_image_to_handle(src_image
),
1324 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
1325 .format
= buffer_format
,
1326 .subresourceRange
= {
1327 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1330 .baseArrayLayer
= 0,
1336 struct anv_image_view dest_iview
;
1337 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1338 &(VkImageViewCreateInfo
) {
1339 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1340 .image
= anv_image_to_handle(dest_image
),
1341 .viewType
= anv_meta_get_view_type(dest_image
),
1342 .format
= image_format
,
1343 .subresourceRange
= {
1344 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1345 .baseMipLevel
= pRegions
[r
].imageSubresource
.mipLevel
,
1347 .baseArrayLayer
= dest_base_array_slice
+ slice
,
1353 VkOffset3D dest_offset_el
= meta_region_offset_el(dest_image
,
1354 &pRegions
[r
].imageOffset
);
1355 dest_offset_el
.z
= 0;
1356 const VkExtent3D img_extent_el
= meta_region_extent_el(dest_image
->vk_format
,
1357 &pRegions
[r
].imageExtent
);
1359 meta_emit_blit(cmd_buffer
,
1362 (VkOffset3D
){0, 0, 0},
1370 /* Once we've done the blit, all of the actual information about
1371 * the image is embedded in the command buffer so we can just
1372 * increment the offset directly in the image effectively
1373 * re-binding it to different backing memory.
1375 src_image
->offset
+= src_image
->extent
.width
*
1376 src_image
->extent
.height
*
1377 src_image
->format
->isl_layout
->bs
;
1380 anv_DestroyImage(vk_device
, anv_image_to_handle(src_image
),
1381 &cmd_buffer
->pool
->alloc
);
1384 meta_finish_blit(cmd_buffer
, &saved_state
);
1387 void anv_CmdCopyImageToBuffer(
1388 VkCommandBuffer commandBuffer
,
1390 VkImageLayout srcImageLayout
,
1391 VkBuffer destBuffer
,
1392 uint32_t regionCount
,
1393 const VkBufferImageCopy
* pRegions
)
1395 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1396 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1397 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
1398 struct anv_meta_saved_state saved_state
;
1401 /* The Vulkan 1.0 spec says "srcImage must have a sample count equal to
1402 * VK_SAMPLE_COUNT_1_BIT."
1404 assert(src_image
->samples
== 1);
1406 meta_prepare_blit(cmd_buffer
, &saved_state
);
1408 for (unsigned r
= 0; r
< regionCount
; r
++) {
1409 VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
1411 VkFormat image_format
= choose_iview_format(src_image
, aspect
);
1412 VkFormat buffer_format
= choose_buffer_format(src_image
, aspect
);
1414 struct anv_image_view src_iview
;
1415 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
1416 &(VkImageViewCreateInfo
) {
1417 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1419 .viewType
= anv_meta_get_view_type(src_image
),
1420 .format
= image_format
,
1421 .subresourceRange
= {
1422 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1423 .baseMipLevel
= pRegions
[r
].imageSubresource
.mipLevel
,
1425 .baseArrayLayer
= pRegions
[r
].imageSubresource
.baseArrayLayer
,
1426 .layerCount
= pRegions
[r
].imageSubresource
.layerCount
,
1431 struct anv_image
*dest_image
=
1432 make_image_for_buffer(vk_device
, destBuffer
, buffer_format
,
1433 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
1434 src_image
->type
, &cmd_buffer
->pool
->alloc
,
1437 unsigned num_slices
;
1438 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
1439 assert(pRegions
[r
].imageSubresource
.layerCount
== 1);
1440 num_slices
= pRegions
[r
].imageExtent
.depth
;
1442 assert(pRegions
[r
].imageExtent
.depth
== 1);
1443 num_slices
= pRegions
[r
].imageSubresource
.layerCount
;
1446 for (unsigned slice
= 0; slice
< num_slices
; slice
++) {
1447 VkOffset3D src_offset
= pRegions
[r
].imageOffset
;
1448 src_offset
.z
+= slice
;
1450 struct anv_image_view dest_iview
;
1451 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1452 &(VkImageViewCreateInfo
) {
1453 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1454 .image
= anv_image_to_handle(dest_image
),
1455 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
1456 .format
= buffer_format
,
1457 .subresourceRange
= {
1458 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1461 .baseArrayLayer
= 0,
1467 meta_emit_blit(cmd_buffer
,
1468 anv_image_from_handle(srcImage
),
1471 pRegions
[r
].imageExtent
,
1474 (VkOffset3D
) { 0, 0, 0 },
1475 pRegions
[r
].imageExtent
,
1478 /* Once we've done the blit, all of the actual information about
1479 * the image is embedded in the command buffer so we can just
1480 * increment the offset directly in the image effectively
1481 * re-binding it to different backing memory.
1483 dest_image
->offset
+= dest_image
->extent
.width
*
1484 dest_image
->extent
.height
*
1485 src_image
->format
->isl_layout
->bs
;
1488 anv_DestroyImage(vk_device
, anv_image_to_handle(dest_image
),
1489 &cmd_buffer
->pool
->alloc
);
1492 meta_finish_blit(cmd_buffer
, &saved_state
);
1495 void anv_CmdResolveImage(
1496 VkCommandBuffer commandBuffer
,
1498 VkImageLayout srcImageLayout
,
1500 VkImageLayout destImageLayout
,
1501 uint32_t regionCount
,
1502 const VkImageResolve
* pRegions
)
1508 meta_alloc(void* _device
, size_t size
, size_t alignment
,
1509 VkSystemAllocationScope allocationScope
)
1511 struct anv_device
*device
= _device
;
1512 return device
->alloc
.pfnAllocation(device
->alloc
.pUserData
, size
, alignment
,
1513 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1517 meta_realloc(void* _device
, void *original
, size_t size
, size_t alignment
,
1518 VkSystemAllocationScope allocationScope
)
1520 struct anv_device
*device
= _device
;
1521 return device
->alloc
.pfnReallocation(device
->alloc
.pUserData
, original
,
1523 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
1527 meta_free(void* _device
, void *data
)
1529 struct anv_device
*device
= _device
;
1530 return device
->alloc
.pfnFree(device
->alloc
.pUserData
, data
);
1534 anv_device_init_meta(struct anv_device
*device
)
1536 device
->meta_state
.alloc
= (VkAllocationCallbacks
) {
1537 .pUserData
= device
,
1538 .pfnAllocation
= meta_alloc
,
1539 .pfnReallocation
= meta_realloc
,
1540 .pfnFree
= meta_free
,
1544 result
= anv_device_init_meta_clear_state(device
);
1545 if (result
!= VK_SUCCESS
)
1548 result
= anv_device_init_meta_blit_state(device
);
1549 if (result
!= VK_SUCCESS
) {
1550 anv_device_finish_meta_clear_state(device
);
1558 anv_device_finish_meta(struct anv_device
*device
)
1560 anv_device_finish_meta_clear_state(device
);
1563 anv_DestroyRenderPass(anv_device_to_handle(device
),
1564 device
->meta_state
.blit
.render_pass
,
1565 &device
->meta_state
.alloc
);
1566 anv_DestroyPipeline(anv_device_to_handle(device
),
1567 device
->meta_state
.blit
.pipeline_1d_src
,
1568 &device
->meta_state
.alloc
);
1569 anv_DestroyPipeline(anv_device_to_handle(device
),
1570 device
->meta_state
.blit
.pipeline_2d_src
,
1571 &device
->meta_state
.alloc
);
1572 anv_DestroyPipeline(anv_device_to_handle(device
),
1573 device
->meta_state
.blit
.pipeline_3d_src
,
1574 &device
->meta_state
.alloc
);
1575 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
1576 device
->meta_state
.blit
.pipeline_layout
,
1577 &device
->meta_state
.alloc
);
1578 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
1579 device
->meta_state
.blit
.ds_layout
,
1580 &device
->meta_state
.alloc
);