2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir/nir_builder.h"
28 VkOffset3D src_offset
;
29 VkExtent3D src_extent
;
30 VkOffset3D dest_offset
;
31 VkExtent3D dest_extent
;
35 build_nir_vertex_shader(void)
37 const struct glsl_type
*vec4
= glsl_vec4_type();
40 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_VERTEX
, NULL
);
41 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit_vs");
43 nir_variable
*pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
45 pos_in
->data
.location
= VERT_ATTRIB_GENERIC0
;
46 nir_variable
*pos_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
48 pos_out
->data
.location
= VARYING_SLOT_POS
;
49 nir_copy_var(&b
, pos_out
, pos_in
);
51 nir_variable
*tex_pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
53 tex_pos_in
->data
.location
= VERT_ATTRIB_GENERIC1
;
54 nir_variable
*tex_pos_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
56 tex_pos_out
->data
.location
= VARYING_SLOT_VAR0
;
57 tex_pos_out
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
58 nir_copy_var(&b
, tex_pos_out
, tex_pos_in
);
64 build_nir_copy_fragment_shader(enum glsl_sampler_dim tex_dim
)
66 const struct glsl_type
*vec4
= glsl_vec4_type();
69 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
70 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit_fs");
72 nir_variable
*tex_pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
74 tex_pos_in
->data
.location
= VARYING_SLOT_VAR0
;
76 /* Swizzle the array index which comes in as Z coordinate into the right
79 unsigned swz
[] = { 0, (tex_dim
== GLSL_SAMPLER_DIM_1D
? 2 : 1), 2 };
80 nir_ssa_def
*const tex_pos
=
81 nir_swizzle(&b
, nir_load_var(&b
, tex_pos_in
), swz
,
82 (tex_dim
== GLSL_SAMPLER_DIM_1D
? 2 : 3), false);
84 const struct glsl_type
*sampler_type
=
85 glsl_sampler_type(tex_dim
, false, tex_dim
!= GLSL_SAMPLER_DIM_3D
,
86 glsl_get_base_type(vec4
));
87 nir_variable
*sampler
= nir_variable_create(b
.shader
, nir_var_uniform
,
88 sampler_type
, "s_tex");
89 sampler
->data
.descriptor_set
= 0;
90 sampler
->data
.binding
= 0;
92 nir_tex_instr
*tex
= nir_tex_instr_create(b
.shader
, 1);
93 tex
->sampler_dim
= tex_dim
;
94 tex
->op
= nir_texop_tex
;
95 tex
->src
[0].src_type
= nir_tex_src_coord
;
96 tex
->src
[0].src
= nir_src_for_ssa(tex_pos
);
97 tex
->dest_type
= nir_type_float
; /* TODO */
98 tex
->is_array
= glsl_sampler_type_is_array(sampler_type
);
99 tex
->coord_components
= tex_pos
->num_components
;
100 tex
->texture
= nir_deref_var_create(tex
, sampler
);
101 tex
->sampler
= nir_deref_var_create(tex
, sampler
);
103 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, "tex");
104 nir_builder_instr_insert(&b
, &tex
->instr
);
106 nir_variable
*color_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
108 color_out
->data
.location
= FRAG_RESULT_DATA0
;
109 nir_store_var(&b
, color_out
, &tex
->dest
.ssa
, 4);
115 meta_prepare_blit(struct anv_cmd_buffer
*cmd_buffer
,
116 struct anv_meta_saved_state
*saved_state
)
118 anv_meta_save(saved_state
, cmd_buffer
,
119 (1 << VK_DYNAMIC_STATE_VIEWPORT
));
122 /* Returns the user-provided VkBufferImageCopy::imageOffset in units of
123 * elements rather than texels. One element equals one texel or one block
124 * if Image is uncompressed or compressed, respectively.
126 static struct VkOffset3D
127 meta_region_offset_el(const struct anv_image
* image
,
128 const struct VkOffset3D
* offset
)
130 const struct isl_format_layout
* isl_layout
= image
->format
->isl_layout
;
131 return (VkOffset3D
) {
132 .x
= offset
->x
/ isl_layout
->bw
,
133 .y
= offset
->y
/ isl_layout
->bh
,
134 .z
= offset
->z
/ isl_layout
->bd
,
138 /* Returns the user-provided VkBufferImageCopy::imageExtent in units of
139 * elements rather than texels. One element equals one texel or one block
140 * if Image is uncompressed or compressed, respectively.
142 static struct VkExtent3D
143 meta_region_extent_el(const VkFormat format
,
144 const struct VkExtent3D
* extent
)
146 const struct isl_format_layout
* isl_layout
=
147 anv_format_for_vk_format(format
)->isl_layout
;
148 return (VkExtent3D
) {
149 .width
= DIV_ROUND_UP(extent
->width
, isl_layout
->bw
),
150 .height
= DIV_ROUND_UP(extent
->height
, isl_layout
->bh
),
151 .depth
= DIV_ROUND_UP(extent
->depth
, isl_layout
->bd
),
156 meta_emit_blit(struct anv_cmd_buffer
*cmd_buffer
,
157 struct anv_image
*src_image
,
158 struct anv_image_view
*src_iview
,
159 VkOffset3D src_offset
,
160 VkExtent3D src_extent
,
161 struct anv_image
*dest_image
,
162 struct anv_image_view
*dest_iview
,
163 VkOffset3D dest_offset
,
164 VkExtent3D dest_extent
,
165 VkFilter blit_filter
)
167 struct anv_device
*device
= cmd_buffer
->device
;
169 struct blit_vb_data
{
174 assert(src_image
->samples
== dest_image
->samples
);
176 unsigned vb_size
= sizeof(struct anv_vue_header
) + 3 * sizeof(*vb_data
);
178 struct anv_state vb_state
=
179 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, vb_size
, 16);
180 memset(vb_state
.map
, 0, sizeof(struct anv_vue_header
));
181 vb_data
= vb_state
.map
+ sizeof(struct anv_vue_header
);
183 vb_data
[0] = (struct blit_vb_data
) {
185 dest_offset
.x
+ dest_extent
.width
,
186 dest_offset
.y
+ dest_extent
.height
,
189 (float)(src_offset
.x
+ src_extent
.width
) / (float)src_iview
->extent
.width
,
190 (float)(src_offset
.y
+ src_extent
.height
) / (float)src_iview
->extent
.height
,
191 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
195 vb_data
[1] = (struct blit_vb_data
) {
198 dest_offset
.y
+ dest_extent
.height
,
201 (float)src_offset
.x
/ (float)src_iview
->extent
.width
,
202 (float)(src_offset
.y
+ src_extent
.height
) / (float)src_iview
->extent
.height
,
203 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
207 vb_data
[2] = (struct blit_vb_data
) {
213 (float)src_offset
.x
/ (float)src_iview
->extent
.width
,
214 (float)src_offset
.y
/ (float)src_iview
->extent
.height
,
215 (float)src_offset
.z
/ (float)src_iview
->extent
.depth
,
219 anv_state_clflush(vb_state
);
221 struct anv_buffer vertex_buffer
= {
224 .bo
= &device
->dynamic_state_block_pool
.bo
,
225 .offset
= vb_state
.offset
,
228 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 2,
230 anv_buffer_to_handle(&vertex_buffer
),
231 anv_buffer_to_handle(&vertex_buffer
)
235 sizeof(struct anv_vue_header
),
239 ANV_CALL(CreateSampler
)(anv_device_to_handle(device
),
240 &(VkSamplerCreateInfo
) {
241 .sType
= VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
,
242 .magFilter
= blit_filter
,
243 .minFilter
= blit_filter
,
244 }, &cmd_buffer
->pool
->alloc
, &sampler
);
246 VkDescriptorPool desc_pool
;
247 anv_CreateDescriptorPool(anv_device_to_handle(device
),
248 &(const VkDescriptorPoolCreateInfo
) {
249 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
,
254 .pPoolSizes
= (VkDescriptorPoolSize
[]) {
256 .type
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
260 }, &cmd_buffer
->pool
->alloc
, &desc_pool
);
263 anv_AllocateDescriptorSets(anv_device_to_handle(device
),
264 &(VkDescriptorSetAllocateInfo
) {
265 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO
,
266 .descriptorPool
= desc_pool
,
267 .descriptorSetCount
= 1,
268 .pSetLayouts
= &device
->meta_state
.blit
.ds_layout
271 anv_UpdateDescriptorSets(anv_device_to_handle(device
),
273 (VkWriteDescriptorSet
[]) {
275 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
278 .dstArrayElement
= 0,
279 .descriptorCount
= 1,
280 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
281 .pImageInfo
= (VkDescriptorImageInfo
[]) {
284 .imageView
= anv_image_view_to_handle(src_iview
),
285 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
292 anv_CreateFramebuffer(anv_device_to_handle(device
),
293 &(VkFramebufferCreateInfo
) {
294 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
295 .attachmentCount
= 1,
296 .pAttachments
= (VkImageView
[]) {
297 anv_image_view_to_handle(dest_iview
),
299 .width
= dest_iview
->extent
.width
,
300 .height
= dest_iview
->extent
.height
,
302 }, &cmd_buffer
->pool
->alloc
, &fb
);
304 ANV_CALL(CmdBeginRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
),
305 &(VkRenderPassBeginInfo
) {
306 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
307 .renderPass
= device
->meta_state
.blit
.render_pass
,
310 .offset
= { dest_offset
.x
, dest_offset
.y
},
311 .extent
= { dest_extent
.width
, dest_extent
.height
},
313 .clearValueCount
= 0,
314 .pClearValues
= NULL
,
315 }, VK_SUBPASS_CONTENTS_INLINE
);
319 switch (src_image
->type
) {
320 case VK_IMAGE_TYPE_1D
:
321 pipeline
= device
->meta_state
.blit
.pipeline_1d_src
;
323 case VK_IMAGE_TYPE_2D
:
324 pipeline
= device
->meta_state
.blit
.pipeline_2d_src
;
326 case VK_IMAGE_TYPE_3D
:
327 pipeline
= device
->meta_state
.blit
.pipeline_3d_src
;
330 unreachable(!"bad VkImageType");
333 if (cmd_buffer
->state
.pipeline
!= anv_pipeline_from_handle(pipeline
)) {
334 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer
),
335 VK_PIPELINE_BIND_POINT_GRAPHICS
, pipeline
);
338 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
342 .width
= dest_iview
->extent
.width
,
343 .height
= dest_iview
->extent
.height
,
348 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer
),
349 VK_PIPELINE_BIND_POINT_GRAPHICS
,
350 device
->meta_state
.blit
.pipeline_layout
, 0, 1,
353 ANV_CALL(CmdDraw
)(anv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
355 ANV_CALL(CmdEndRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
));
357 /* At the point where we emit the draw call, all data from the
358 * descriptor sets, etc. has been used. We are free to delete it.
360 anv_DestroyDescriptorPool(anv_device_to_handle(device
),
361 desc_pool
, &cmd_buffer
->pool
->alloc
);
362 anv_DestroySampler(anv_device_to_handle(device
), sampler
,
363 &cmd_buffer
->pool
->alloc
);
364 anv_DestroyFramebuffer(anv_device_to_handle(device
), fb
,
365 &cmd_buffer
->pool
->alloc
);
369 meta_finish_blit(struct anv_cmd_buffer
*cmd_buffer
,
370 const struct anv_meta_saved_state
*saved_state
)
372 anv_meta_restore(saved_state
, cmd_buffer
);
376 vk_format_for_size(int bs
)
378 /* The choice of UNORM and UINT formats is very intentional here. Most of
379 * the time, we want to use a UINT format to avoid any rounding error in
380 * the blit. For stencil blits, R8_UINT is required by the hardware.
381 * (It's the only format allowed in conjunction with W-tiling.) Also we
382 * intentionally use the 4-channel formats whenever we can. This is so
383 * that, when we do a RGB <-> RGBX copy, the two formats will line up even
384 * though one of them is 3/4 the size of the other. The choice of UNORM
385 * vs. UINT is also very intentional because Haswell doesn't handle 8 or
386 * 16-bit RGB UINT formats at all so we have to use UNORM there.
387 * Fortunately, the only time we should ever use two different formats in
388 * the table below is for RGB -> RGBA blits and so we will never have any
389 * UNORM/UINT mismatch.
392 case 1: return VK_FORMAT_R8_UINT
;
393 case 2: return VK_FORMAT_R8G8_UINT
;
394 case 3: return VK_FORMAT_R8G8B8_UNORM
;
395 case 4: return VK_FORMAT_R8G8B8A8_UNORM
;
396 case 6: return VK_FORMAT_R16G16B16_UNORM
;
397 case 8: return VK_FORMAT_R16G16B16A16_UNORM
;
398 case 12: return VK_FORMAT_R32G32B32_UINT
;
399 case 16: return VK_FORMAT_R32G32B32A32_UINT
;
401 unreachable("Invalid format block size");
406 do_buffer_copy(struct anv_cmd_buffer
*cmd_buffer
,
407 struct anv_bo
*src
, uint64_t src_offset
,
408 struct anv_bo
*dest
, uint64_t dest_offset
,
409 int width
, int height
, VkFormat copy_format
)
411 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
413 VkImageCreateInfo image_info
= {
414 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
415 .imageType
= VK_IMAGE_TYPE_2D
,
416 .format
= copy_format
,
425 .tiling
= VK_IMAGE_TILING_LINEAR
,
431 image_info
.usage
= VK_IMAGE_USAGE_SAMPLED_BIT
;
432 anv_CreateImage(vk_device
, &image_info
,
433 &cmd_buffer
->pool
->alloc
, &src_image
);
436 image_info
.usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
437 anv_CreateImage(vk_device
, &image_info
,
438 &cmd_buffer
->pool
->alloc
, &dest_image
);
440 /* We could use a vk call to bind memory, but that would require
441 * creating a dummy memory object etc. so there's really no point.
443 anv_image_from_handle(src_image
)->bo
= src
;
444 anv_image_from_handle(src_image
)->offset
= src_offset
;
445 anv_image_from_handle(dest_image
)->bo
= dest
;
446 anv_image_from_handle(dest_image
)->offset
= dest_offset
;
448 struct anv_image_view src_iview
;
449 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
450 &(VkImageViewCreateInfo
) {
451 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
453 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
454 .format
= copy_format
,
455 .subresourceRange
= {
456 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
463 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
465 struct anv_image_view dest_iview
;
466 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
467 &(VkImageViewCreateInfo
) {
468 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
470 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
471 .format
= copy_format
,
472 .subresourceRange
= {
473 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
480 cmd_buffer
, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
482 meta_emit_blit(cmd_buffer
,
483 anv_image_from_handle(src_image
),
485 (VkOffset3D
) { 0, 0, 0 },
486 (VkExtent3D
) { width
, height
, 1 },
487 anv_image_from_handle(dest_image
),
489 (VkOffset3D
) { 0, 0, 0 },
490 (VkExtent3D
) { width
, height
, 1 },
493 anv_DestroyImage(vk_device
, src_image
, &cmd_buffer
->pool
->alloc
);
494 anv_DestroyImage(vk_device
, dest_image
, &cmd_buffer
->pool
->alloc
);
497 void anv_CmdCopyBuffer(
498 VkCommandBuffer commandBuffer
,
501 uint32_t regionCount
,
502 const VkBufferCopy
* pRegions
)
504 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
505 ANV_FROM_HANDLE(anv_buffer
, src_buffer
, srcBuffer
);
506 ANV_FROM_HANDLE(anv_buffer
, dest_buffer
, destBuffer
);
508 struct anv_meta_saved_state saved_state
;
510 meta_prepare_blit(cmd_buffer
, &saved_state
);
512 for (unsigned r
= 0; r
< regionCount
; r
++) {
513 uint64_t src_offset
= src_buffer
->offset
+ pRegions
[r
].srcOffset
;
514 uint64_t dest_offset
= dest_buffer
->offset
+ pRegions
[r
].dstOffset
;
515 uint64_t copy_size
= pRegions
[r
].size
;
517 /* First, we compute the biggest format that can be used with the
518 * given offsets and size.
522 int fs
= ffs(src_offset
) - 1;
524 bs
= MIN2(bs
, 1 << fs
);
525 assert(src_offset
% bs
== 0);
527 fs
= ffs(dest_offset
) - 1;
529 bs
= MIN2(bs
, 1 << fs
);
530 assert(dest_offset
% bs
== 0);
532 fs
= ffs(pRegions
[r
].size
) - 1;
534 bs
= MIN2(bs
, 1 << fs
);
535 assert(pRegions
[r
].size
% bs
== 0);
537 VkFormat copy_format
= vk_format_for_size(bs
);
539 /* This is maximum possible width/height our HW can handle */
540 uint64_t max_surface_dim
= 1 << 14;
542 /* First, we make a bunch of max-sized copies */
543 uint64_t max_copy_size
= max_surface_dim
* max_surface_dim
* bs
;
544 while (copy_size
>= max_copy_size
) {
545 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
546 dest_buffer
->bo
, dest_offset
,
547 max_surface_dim
, max_surface_dim
, copy_format
);
548 copy_size
-= max_copy_size
;
549 src_offset
+= max_copy_size
;
550 dest_offset
+= max_copy_size
;
553 uint64_t height
= copy_size
/ (max_surface_dim
* bs
);
554 assert(height
< max_surface_dim
);
556 uint64_t rect_copy_size
= height
* max_surface_dim
* bs
;
557 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
558 dest_buffer
->bo
, dest_offset
,
559 max_surface_dim
, height
, copy_format
);
560 copy_size
-= rect_copy_size
;
561 src_offset
+= rect_copy_size
;
562 dest_offset
+= rect_copy_size
;
565 if (copy_size
!= 0) {
566 do_buffer_copy(cmd_buffer
, src_buffer
->bo
, src_offset
,
567 dest_buffer
->bo
, dest_offset
,
568 copy_size
/ bs
, 1, copy_format
);
572 meta_finish_blit(cmd_buffer
, &saved_state
);
575 void anv_CmdUpdateBuffer(
576 VkCommandBuffer commandBuffer
,
578 VkDeviceSize dstOffset
,
579 VkDeviceSize dataSize
,
580 const uint32_t* pData
)
582 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
583 ANV_FROM_HANDLE(anv_buffer
, dst_buffer
, dstBuffer
);
584 struct anv_meta_saved_state saved_state
;
586 meta_prepare_blit(cmd_buffer
, &saved_state
);
588 /* We can't quite grab a full block because the state stream needs a
589 * little data at the top to build its linked list.
591 const uint32_t max_update_size
=
592 cmd_buffer
->device
->dynamic_state_block_pool
.block_size
- 64;
594 assert(max_update_size
< (1 << 14) * 4);
597 const uint32_t copy_size
= MIN2(dataSize
, max_update_size
);
599 struct anv_state tmp_data
=
600 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, copy_size
, 64);
602 memcpy(tmp_data
.map
, pData
, copy_size
);
606 if ((copy_size
& 15) == 0 && (dstOffset
& 15) == 0) {
607 format
= VK_FORMAT_R32G32B32A32_UINT
;
609 } else if ((copy_size
& 7) == 0 && (dstOffset
& 7) == 0) {
610 format
= VK_FORMAT_R32G32_UINT
;
613 assert((copy_size
& 3) == 0 && (dstOffset
& 3) == 0);
614 format
= VK_FORMAT_R32_UINT
;
618 do_buffer_copy(cmd_buffer
,
619 &cmd_buffer
->device
->dynamic_state_block_pool
.bo
,
621 dst_buffer
->bo
, dst_buffer
->offset
+ dstOffset
,
622 copy_size
/ bs
, 1, format
);
624 dataSize
-= copy_size
;
625 dstOffset
+= copy_size
;
626 pData
= (void *)pData
+ copy_size
;
631 choose_iview_format(struct anv_image
*image
, VkImageAspectFlagBits aspect
)
633 assert(__builtin_popcount(aspect
) == 1);
635 struct isl_surf
*surf
=
636 &anv_image_get_surface_for_aspect_mask(image
, aspect
)->isl
;
638 /* vkCmdCopyImage behaves like memcpy. Therefore we choose identical UINT
639 * formats for the source and destination image views.
641 * From the Vulkan spec (2015-12-30):
643 * vkCmdCopyImage performs image copies in a similar manner to a host
644 * memcpy. It does not perform general-purpose conversions such as
645 * scaling, resizing, blending, color-space conversion, or format
646 * conversions. Rather, it simply copies raw image data. vkCmdCopyImage
647 * can copy between images with different formats, provided the formats
648 * are compatible as defined below.
650 * [The spec later defines compatibility as having the same number of
653 return vk_format_for_size(isl_format_layouts
[surf
->format
].bs
);
657 choose_buffer_format(VkFormat format
, VkImageAspectFlagBits aspect
)
659 assert(__builtin_popcount(aspect
) == 1);
661 /* vkCmdCopy* commands behave like memcpy. Therefore we choose
662 * compatable UINT formats for the source and destination image views.
664 * For the buffer, we go back to the original image format and get a
665 * the format as if it were linear. This way, for RGB formats, we get
666 * an RGB format here even if the tiled image is RGBA. XXX: This doesn't
667 * work if the buffer is the destination.
669 enum isl_format linear_format
= anv_get_isl_format(format
, aspect
,
670 VK_IMAGE_TILING_LINEAR
,
673 return vk_format_for_size(isl_format_layouts
[linear_format
].bs
);
676 void anv_CmdCopyImage(
677 VkCommandBuffer commandBuffer
,
679 VkImageLayout srcImageLayout
,
681 VkImageLayout destImageLayout
,
682 uint32_t regionCount
,
683 const VkImageCopy
* pRegions
)
685 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
686 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
687 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
688 struct anv_meta_saved_state saved_state
;
690 /* From the Vulkan 1.0 spec:
692 * vkCmdCopyImage can be used to copy image data between multisample
693 * images, but both images must have the same number of samples.
695 assert(src_image
->samples
== dest_image
->samples
);
697 meta_prepare_blit(cmd_buffer
, &saved_state
);
699 for (unsigned r
= 0; r
< regionCount
; r
++) {
700 assert(pRegions
[r
].srcSubresource
.aspectMask
==
701 pRegions
[r
].dstSubresource
.aspectMask
);
703 VkImageAspectFlags aspect
= pRegions
[r
].srcSubresource
.aspectMask
;
705 VkFormat src_format
= choose_iview_format(src_image
, aspect
);
706 VkFormat dst_format
= choose_iview_format(dest_image
, aspect
);
708 struct anv_image_view src_iview
;
709 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
710 &(VkImageViewCreateInfo
) {
711 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
713 .viewType
= anv_meta_get_view_type(src_image
),
714 .format
= src_format
,
715 .subresourceRange
= {
716 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
717 .baseMipLevel
= pRegions
[r
].srcSubresource
.mipLevel
,
719 .baseArrayLayer
= pRegions
[r
].srcSubresource
.baseArrayLayer
,
720 .layerCount
= pRegions
[r
].dstSubresource
.layerCount
,
723 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
725 const uint32_t dest_base_array_slice
=
726 anv_meta_get_iview_layer(dest_image
, &pRegions
[r
].dstSubresource
,
727 &pRegions
[r
].dstOffset
);
730 unsigned num_slices_3d
= pRegions
[r
].extent
.depth
;
731 unsigned num_slices_array
= pRegions
[r
].dstSubresource
.layerCount
;
732 unsigned slice_3d
= 0;
733 unsigned slice_array
= 0;
734 while (slice_3d
< num_slices_3d
&& slice_array
< num_slices_array
) {
735 VkOffset3D src_offset
= pRegions
[r
].srcOffset
;
736 src_offset
.z
+= slice_3d
+ slice_array
;
741 if (isl_format_is_compressed(dest_image
->format
->isl_format
))
742 isl_surf_get_image_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
743 &dest_image
->color_surface
.isl
,
744 pRegions
[r
].dstSubresource
.mipLevel
,
745 pRegions
[r
].dstSubresource
.baseArrayLayer
+ slice_array
,
746 pRegions
[r
].dstOffset
.z
+ slice_3d
,
747 &img_o
, &img_x
, &img_y
);
749 VkOffset3D dest_offset_el
= meta_region_offset_el(dest_image
, &pRegions
[r
].dstOffset
);
750 dest_offset_el
.x
+= img_x
;
751 dest_offset_el
.y
+= img_y
;
752 dest_offset_el
.z
= 0;
754 struct anv_image_view dest_iview
;
755 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
756 &(VkImageViewCreateInfo
) {
757 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
759 .viewType
= anv_meta_get_view_type(dest_image
),
760 .format
= dst_format
,
761 .subresourceRange
= {
762 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
763 .baseMipLevel
= pRegions
[r
].dstSubresource
.mipLevel
,
765 .baseArrayLayer
= dest_base_array_slice
+
766 slice_array
+ slice_3d
,
770 cmd_buffer
, img_o
, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
772 const VkExtent3D img_extent_el
= meta_region_extent_el(dest_image
->vk_format
,
773 &pRegions
[r
].extent
);
775 meta_emit_blit(cmd_buffer
,
776 src_image
, &src_iview
,
779 dest_image
, &dest_iview
,
784 if (dest_image
->type
== VK_IMAGE_TYPE_3D
)
791 meta_finish_blit(cmd_buffer
, &saved_state
);
794 void anv_CmdBlitImage(
795 VkCommandBuffer commandBuffer
,
797 VkImageLayout srcImageLayout
,
799 VkImageLayout destImageLayout
,
800 uint32_t regionCount
,
801 const VkImageBlit
* pRegions
,
805 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
806 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
807 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
808 struct anv_meta_saved_state saved_state
;
810 /* From the Vulkan 1.0 spec:
812 * vkCmdBlitImage must not be used for multisampled source or
813 * destination images. Use vkCmdResolveImage for this purpose.
815 assert(src_image
->samples
== 1);
816 assert(dest_image
->samples
== 1);
818 anv_finishme("respect VkFilter");
820 meta_prepare_blit(cmd_buffer
, &saved_state
);
822 for (unsigned r
= 0; r
< regionCount
; r
++) {
823 struct anv_image_view src_iview
;
824 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
825 &(VkImageViewCreateInfo
) {
826 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
828 .viewType
= anv_meta_get_view_type(src_image
),
829 .format
= src_image
->vk_format
,
830 .subresourceRange
= {
831 .aspectMask
= pRegions
[r
].srcSubresource
.aspectMask
,
832 .baseMipLevel
= pRegions
[r
].srcSubresource
.mipLevel
,
834 .baseArrayLayer
= pRegions
[r
].srcSubresource
.baseArrayLayer
,
838 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
840 const VkOffset3D dest_offset
= {
841 .x
= pRegions
[r
].dstOffsets
[0].x
,
842 .y
= pRegions
[r
].dstOffsets
[0].y
,
846 if (pRegions
[r
].dstOffsets
[1].x
< pRegions
[r
].dstOffsets
[0].x
||
847 pRegions
[r
].dstOffsets
[1].y
< pRegions
[r
].dstOffsets
[0].y
||
848 pRegions
[r
].srcOffsets
[1].x
< pRegions
[r
].srcOffsets
[0].x
||
849 pRegions
[r
].srcOffsets
[1].y
< pRegions
[r
].srcOffsets
[0].y
)
850 anv_finishme("FINISHME: Allow flipping in blits");
852 const VkExtent3D dest_extent
= {
853 .width
= pRegions
[r
].dstOffsets
[1].x
- pRegions
[r
].dstOffsets
[0].x
,
854 .height
= pRegions
[r
].dstOffsets
[1].y
- pRegions
[r
].dstOffsets
[0].y
,
857 const VkExtent3D src_extent
= {
858 .width
= pRegions
[r
].srcOffsets
[1].x
- pRegions
[r
].srcOffsets
[0].x
,
859 .height
= pRegions
[r
].srcOffsets
[1].y
- pRegions
[r
].srcOffsets
[0].y
,
862 const uint32_t dest_array_slice
=
863 anv_meta_get_iview_layer(dest_image
, &pRegions
[r
].dstSubresource
,
864 &pRegions
[r
].dstOffsets
[0]);
866 if (pRegions
[r
].srcSubresource
.layerCount
> 1)
867 anv_finishme("FINISHME: copy multiple array layers");
869 if (pRegions
[r
].srcOffsets
[0].z
+ 1 != pRegions
[r
].srcOffsets
[1].z
||
870 pRegions
[r
].dstOffsets
[0].z
+ 1 != pRegions
[r
].dstOffsets
[1].z
)
871 anv_finishme("FINISHME: copy multiple depth layers");
873 struct anv_image_view dest_iview
;
874 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
875 &(VkImageViewCreateInfo
) {
876 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
878 .viewType
= anv_meta_get_view_type(dest_image
),
879 .format
= dest_image
->vk_format
,
880 .subresourceRange
= {
881 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
882 .baseMipLevel
= pRegions
[r
].dstSubresource
.mipLevel
,
884 .baseArrayLayer
= dest_array_slice
,
888 cmd_buffer
, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
890 meta_emit_blit(cmd_buffer
,
891 src_image
, &src_iview
,
892 pRegions
[r
].srcOffsets
[0], src_extent
,
893 dest_image
, &dest_iview
,
894 dest_offset
, dest_extent
,
898 meta_finish_blit(cmd_buffer
, &saved_state
);
901 static struct anv_image
*
902 make_image_for_buffer(VkDevice vk_device
, VkBuffer vk_buffer
, VkFormat format
,
903 VkImageUsageFlags usage
,
904 VkImageType image_type
,
905 const VkAllocationCallbacks
*alloc
,
906 const VkBufferImageCopy
*copy
)
908 ANV_FROM_HANDLE(anv_buffer
, buffer
, vk_buffer
);
910 VkExtent3D extent
= copy
->imageExtent
;
911 if (copy
->bufferRowLength
)
912 extent
.width
= copy
->bufferRowLength
;
913 if (copy
->bufferImageHeight
)
914 extent
.height
= copy
->bufferImageHeight
;
916 extent
= meta_region_extent_el(format
, &extent
);
918 VkImageAspectFlags aspect
= copy
->imageSubresource
.aspectMask
;
919 VkFormat buffer_format
= choose_buffer_format(format
, aspect
);
922 VkResult result
= anv_CreateImage(vk_device
,
923 &(VkImageCreateInfo
) {
924 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
925 .imageType
= VK_IMAGE_TYPE_2D
,
926 .format
= buffer_format
,
931 .tiling
= VK_IMAGE_TILING_LINEAR
,
934 }, alloc
, &vk_image
);
935 assert(result
== VK_SUCCESS
);
937 ANV_FROM_HANDLE(anv_image
, image
, vk_image
);
939 /* We could use a vk call to bind memory, but that would require
940 * creating a dummy memory object etc. so there's really no point.
942 image
->bo
= buffer
->bo
;
943 image
->offset
= buffer
->offset
+ copy
->bufferOffset
;
948 void anv_CmdCopyBufferToImage(
949 VkCommandBuffer commandBuffer
,
952 VkImageLayout destImageLayout
,
953 uint32_t regionCount
,
954 const VkBufferImageCopy
* pRegions
)
956 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
957 ANV_FROM_HANDLE(anv_image
, dest_image
, destImage
);
958 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
959 struct anv_meta_saved_state saved_state
;
961 /* The Vulkan 1.0 spec says "dstImage must have a sample count equal to
962 * VK_SAMPLE_COUNT_1_BIT."
964 assert(dest_image
->samples
== 1);
966 meta_prepare_blit(cmd_buffer
, &saved_state
);
968 for (unsigned r
= 0; r
< regionCount
; r
++) {
969 VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
971 VkFormat image_format
= choose_iview_format(dest_image
, aspect
);
973 struct anv_image
*src_image
=
974 make_image_for_buffer(vk_device
, srcBuffer
, dest_image
->vk_format
,
975 VK_IMAGE_USAGE_SAMPLED_BIT
,
976 dest_image
->type
, &cmd_buffer
->pool
->alloc
,
979 const uint32_t dest_base_array_slice
=
980 anv_meta_get_iview_layer(dest_image
, &pRegions
[r
].imageSubresource
,
981 &pRegions
[r
].imageOffset
);
983 unsigned num_slices_3d
= pRegions
[r
].imageExtent
.depth
;
984 unsigned num_slices_array
= pRegions
[r
].imageSubresource
.layerCount
;
985 unsigned slice_3d
= 0;
986 unsigned slice_array
= 0;
987 while (slice_3d
< num_slices_3d
&& slice_array
< num_slices_array
) {
988 struct anv_image_view src_iview
;
989 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
990 &(VkImageViewCreateInfo
) {
991 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
992 .image
= anv_image_to_handle(src_image
),
993 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
994 .format
= src_image
->vk_format
,
995 .subresourceRange
= {
996 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1003 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
1008 if (isl_format_is_compressed(dest_image
->format
->isl_format
))
1009 isl_surf_get_image_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
1010 &dest_image
->color_surface
.isl
,
1011 pRegions
[r
].imageSubresource
.mipLevel
,
1012 pRegions
[r
].imageSubresource
.baseArrayLayer
+ slice_array
,
1013 pRegions
[r
].imageOffset
.z
+ slice_3d
,
1014 &img_o
, &img_x
, &img_y
);
1016 VkOffset3D dest_offset_el
= meta_region_offset_el(dest_image
, & pRegions
[r
].imageOffset
);
1017 dest_offset_el
.x
+= img_x
;
1018 dest_offset_el
.y
+= img_y
;
1019 dest_offset_el
.z
= 0;
1021 struct anv_image_view dest_iview
;
1022 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1023 &(VkImageViewCreateInfo
) {
1024 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1025 .image
= anv_image_to_handle(dest_image
),
1026 .viewType
= anv_meta_get_view_type(dest_image
),
1027 .format
= image_format
,
1028 .subresourceRange
= {
1029 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1030 .baseMipLevel
= pRegions
[r
].imageSubresource
.mipLevel
,
1032 .baseArrayLayer
= dest_base_array_slice
+
1033 slice_array
+ slice_3d
,
1037 cmd_buffer
, img_o
, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
1039 const VkExtent3D img_extent_el
= meta_region_extent_el(dest_image
->vk_format
,
1040 &pRegions
[r
].imageExtent
);
1042 meta_emit_blit(cmd_buffer
,
1045 (VkOffset3D
){0, 0, 0},
1053 /* Once we've done the blit, all of the actual information about
1054 * the image is embedded in the command buffer so we can just
1055 * increment the offset directly in the image effectively
1056 * re-binding it to different backing memory.
1058 src_image
->offset
+= src_image
->extent
.width
*
1059 src_image
->extent
.height
*
1060 src_image
->format
->isl_layout
->bs
;
1062 if (dest_image
->type
== VK_IMAGE_TYPE_3D
)
1068 anv_DestroyImage(vk_device
, anv_image_to_handle(src_image
),
1069 &cmd_buffer
->pool
->alloc
);
1072 meta_finish_blit(cmd_buffer
, &saved_state
);
1075 void anv_CmdCopyImageToBuffer(
1076 VkCommandBuffer commandBuffer
,
1078 VkImageLayout srcImageLayout
,
1079 VkBuffer destBuffer
,
1080 uint32_t regionCount
,
1081 const VkBufferImageCopy
* pRegions
)
1083 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1084 ANV_FROM_HANDLE(anv_image
, src_image
, srcImage
);
1085 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
1086 struct anv_meta_saved_state saved_state
;
1089 /* The Vulkan 1.0 spec says "srcImage must have a sample count equal to
1090 * VK_SAMPLE_COUNT_1_BIT."
1092 assert(src_image
->samples
== 1);
1094 meta_prepare_blit(cmd_buffer
, &saved_state
);
1096 for (unsigned r
= 0; r
< regionCount
; r
++) {
1097 VkImageAspectFlags aspect
= pRegions
[r
].imageSubresource
.aspectMask
;
1099 VkFormat image_format
= choose_iview_format(src_image
, aspect
);
1101 struct anv_image_view src_iview
;
1102 anv_image_view_init(&src_iview
, cmd_buffer
->device
,
1103 &(VkImageViewCreateInfo
) {
1104 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1106 .viewType
= anv_meta_get_view_type(src_image
),
1107 .format
= image_format
,
1108 .subresourceRange
= {
1109 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1110 .baseMipLevel
= pRegions
[r
].imageSubresource
.mipLevel
,
1112 .baseArrayLayer
= pRegions
[r
].imageSubresource
.baseArrayLayer
,
1113 .layerCount
= pRegions
[r
].imageSubresource
.layerCount
,
1116 cmd_buffer
, 0, VK_IMAGE_USAGE_SAMPLED_BIT
);
1118 struct anv_image
*dest_image
=
1119 make_image_for_buffer(vk_device
, destBuffer
, src_image
->vk_format
,
1120 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
1121 src_image
->type
, &cmd_buffer
->pool
->alloc
,
1124 unsigned num_slices
;
1125 if (src_image
->type
== VK_IMAGE_TYPE_3D
) {
1126 assert(pRegions
[r
].imageSubresource
.layerCount
== 1);
1127 num_slices
= pRegions
[r
].imageExtent
.depth
;
1129 assert(pRegions
[r
].imageExtent
.depth
== 1);
1130 num_slices
= pRegions
[r
].imageSubresource
.layerCount
;
1133 for (unsigned slice
= 0; slice
< num_slices
; slice
++) {
1134 VkOffset3D src_offset
= pRegions
[r
].imageOffset
;
1135 src_offset
.z
+= slice
;
1137 struct anv_image_view dest_iview
;
1138 anv_image_view_init(&dest_iview
, cmd_buffer
->device
,
1139 &(VkImageViewCreateInfo
) {
1140 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
1141 .image
= anv_image_to_handle(dest_image
),
1142 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
1143 .format
= dest_image
->vk_format
,
1144 .subresourceRange
= {
1145 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
1148 .baseArrayLayer
= 0,
1152 cmd_buffer
, 0, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
);
1154 meta_emit_blit(cmd_buffer
,
1155 anv_image_from_handle(srcImage
),
1158 pRegions
[r
].imageExtent
,
1161 (VkOffset3D
) { 0, 0, 0 },
1162 pRegions
[r
].imageExtent
,
1165 /* Once we've done the blit, all of the actual information about
1166 * the image is embedded in the command buffer so we can just
1167 * increment the offset directly in the image effectively
1168 * re-binding it to different backing memory.
1170 dest_image
->offset
+= dest_image
->extent
.width
*
1171 dest_image
->extent
.height
*
1172 src_image
->format
->isl_layout
->bs
;
1175 anv_DestroyImage(vk_device
, anv_image_to_handle(dest_image
),
1176 &cmd_buffer
->pool
->alloc
);
1179 meta_finish_blit(cmd_buffer
, &saved_state
);
1183 anv_device_finish_meta_blit_state(struct anv_device
*device
)
1185 anv_DestroyRenderPass(anv_device_to_handle(device
),
1186 device
->meta_state
.blit
.render_pass
,
1187 &device
->meta_state
.alloc
);
1188 anv_DestroyPipeline(anv_device_to_handle(device
),
1189 device
->meta_state
.blit
.pipeline_1d_src
,
1190 &device
->meta_state
.alloc
);
1191 anv_DestroyPipeline(anv_device_to_handle(device
),
1192 device
->meta_state
.blit
.pipeline_2d_src
,
1193 &device
->meta_state
.alloc
);
1194 anv_DestroyPipeline(anv_device_to_handle(device
),
1195 device
->meta_state
.blit
.pipeline_3d_src
,
1196 &device
->meta_state
.alloc
);
1197 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
1198 device
->meta_state
.blit
.pipeline_layout
,
1199 &device
->meta_state
.alloc
);
1200 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
1201 device
->meta_state
.blit
.ds_layout
,
1202 &device
->meta_state
.alloc
);
1206 anv_device_init_meta_blit_state(struct anv_device
*device
)
1210 result
= anv_CreateRenderPass(anv_device_to_handle(device
),
1211 &(VkRenderPassCreateInfo
) {
1212 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
1213 .attachmentCount
= 1,
1214 .pAttachments
= &(VkAttachmentDescription
) {
1215 .format
= VK_FORMAT_UNDEFINED
, /* Our shaders don't care */
1216 .loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
,
1217 .storeOp
= VK_ATTACHMENT_STORE_OP_STORE
,
1218 .initialLayout
= VK_IMAGE_LAYOUT_GENERAL
,
1219 .finalLayout
= VK_IMAGE_LAYOUT_GENERAL
,
1222 .pSubpasses
= &(VkSubpassDescription
) {
1223 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
1224 .inputAttachmentCount
= 0,
1225 .colorAttachmentCount
= 1,
1226 .pColorAttachments
= &(VkAttachmentReference
) {
1228 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
1230 .pResolveAttachments
= NULL
,
1231 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
1232 .attachment
= VK_ATTACHMENT_UNUSED
,
1233 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
1235 .preserveAttachmentCount
= 1,
1236 .pPreserveAttachments
= (uint32_t[]) { 0 },
1238 .dependencyCount
= 0,
1239 }, &device
->meta_state
.alloc
, &device
->meta_state
.blit
.render_pass
);
1240 if (result
!= VK_SUCCESS
)
1243 /* We don't use a vertex shader for blitting, but instead build and pass
1244 * the VUEs directly to the rasterization backend. However, we do need
1245 * to provide GLSL source for the vertex shader so that the compiler
1246 * does not dead-code our inputs.
1248 struct anv_shader_module vs
= {
1249 .nir
= build_nir_vertex_shader(),
1252 struct anv_shader_module fs_1d
= {
1253 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_1D
),
1256 struct anv_shader_module fs_2d
= {
1257 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_2D
),
1260 struct anv_shader_module fs_3d
= {
1261 .nir
= build_nir_copy_fragment_shader(GLSL_SAMPLER_DIM_3D
),
1264 VkPipelineVertexInputStateCreateInfo vi_create_info
= {
1265 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
1266 .vertexBindingDescriptionCount
= 2,
1267 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
1271 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
1275 .stride
= 5 * sizeof(float),
1276 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
1279 .vertexAttributeDescriptionCount
= 3,
1280 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
1285 .format
= VK_FORMAT_R32G32B32A32_UINT
,
1292 .format
= VK_FORMAT_R32G32_SFLOAT
,
1296 /* Texture Coordinate */
1299 .format
= VK_FORMAT_R32G32B32_SFLOAT
,
1305 VkDescriptorSetLayoutCreateInfo ds_layout_info
= {
1306 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
1308 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
1311 .descriptorType
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
,
1312 .descriptorCount
= 1,
1313 .stageFlags
= VK_SHADER_STAGE_FRAGMENT_BIT
,
1314 .pImmutableSamplers
= NULL
1318 result
= anv_CreateDescriptorSetLayout(anv_device_to_handle(device
),
1320 &device
->meta_state
.alloc
,
1321 &device
->meta_state
.blit
.ds_layout
);
1322 if (result
!= VK_SUCCESS
)
1323 goto fail_render_pass
;
1325 result
= anv_CreatePipelineLayout(anv_device_to_handle(device
),
1326 &(VkPipelineLayoutCreateInfo
) {
1327 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
1328 .setLayoutCount
= 1,
1329 .pSetLayouts
= &device
->meta_state
.blit
.ds_layout
,
1331 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_layout
);
1332 if (result
!= VK_SUCCESS
)
1333 goto fail_descriptor_set_layout
;
1335 VkPipelineShaderStageCreateInfo pipeline_shader_stages
[] = {
1337 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1338 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
1339 .module
= anv_shader_module_to_handle(&vs
),
1341 .pSpecializationInfo
= NULL
1343 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1344 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
1345 .module
= VK_NULL_HANDLE
, /* TEMPLATE VALUE! FILL ME IN! */
1347 .pSpecializationInfo
= NULL
1351 const VkGraphicsPipelineCreateInfo vk_pipeline_info
= {
1352 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
1353 .stageCount
= ARRAY_SIZE(pipeline_shader_stages
),
1354 .pStages
= pipeline_shader_stages
,
1355 .pVertexInputState
= &vi_create_info
,
1356 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
1357 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
1358 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
1359 .primitiveRestartEnable
= false,
1361 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
1362 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
1366 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
1367 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
1368 .rasterizerDiscardEnable
= false,
1369 .polygonMode
= VK_POLYGON_MODE_FILL
,
1370 .cullMode
= VK_CULL_MODE_NONE
,
1371 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
1373 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
1374 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
1375 .rasterizationSamples
= 1,
1376 .sampleShadingEnable
= false,
1377 .pSampleMask
= (VkSampleMask
[]) { UINT32_MAX
},
1379 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
1380 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
1381 .attachmentCount
= 1,
1382 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
1384 VK_COLOR_COMPONENT_A_BIT
|
1385 VK_COLOR_COMPONENT_R_BIT
|
1386 VK_COLOR_COMPONENT_G_BIT
|
1387 VK_COLOR_COMPONENT_B_BIT
},
1390 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
1391 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
1392 .dynamicStateCount
= 9,
1393 .pDynamicStates
= (VkDynamicState
[]) {
1394 VK_DYNAMIC_STATE_VIEWPORT
,
1395 VK_DYNAMIC_STATE_SCISSOR
,
1396 VK_DYNAMIC_STATE_LINE_WIDTH
,
1397 VK_DYNAMIC_STATE_DEPTH_BIAS
,
1398 VK_DYNAMIC_STATE_BLEND_CONSTANTS
,
1399 VK_DYNAMIC_STATE_DEPTH_BOUNDS
,
1400 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
,
1401 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
,
1402 VK_DYNAMIC_STATE_STENCIL_REFERENCE
,
1406 .layout
= device
->meta_state
.blit
.pipeline_layout
,
1407 .renderPass
= device
->meta_state
.blit
.render_pass
,
1411 const struct anv_graphics_pipeline_create_info anv_pipeline_info
= {
1412 .color_attachment_count
= -1,
1413 .use_repclear
= false,
1414 .disable_viewport
= true,
1415 .disable_scissor
= true,
1417 .use_rectlist
= true
1420 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_1d
);
1421 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
1423 &vk_pipeline_info
, &anv_pipeline_info
,
1424 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_1d_src
);
1425 if (result
!= VK_SUCCESS
)
1426 goto fail_pipeline_layout
;
1428 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_2d
);
1429 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
1431 &vk_pipeline_info
, &anv_pipeline_info
,
1432 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_2d_src
);
1433 if (result
!= VK_SUCCESS
)
1434 goto fail_pipeline_1d
;
1436 pipeline_shader_stages
[1].module
= anv_shader_module_to_handle(&fs_3d
);
1437 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
1439 &vk_pipeline_info
, &anv_pipeline_info
,
1440 &device
->meta_state
.alloc
, &device
->meta_state
.blit
.pipeline_3d_src
);
1441 if (result
!= VK_SUCCESS
)
1442 goto fail_pipeline_2d
;
1444 ralloc_free(vs
.nir
);
1445 ralloc_free(fs_1d
.nir
);
1446 ralloc_free(fs_2d
.nir
);
1447 ralloc_free(fs_3d
.nir
);
1452 anv_DestroyPipeline(anv_device_to_handle(device
),
1453 device
->meta_state
.blit
.pipeline_2d_src
,
1454 &device
->meta_state
.alloc
);
1457 anv_DestroyPipeline(anv_device_to_handle(device
),
1458 device
->meta_state
.blit
.pipeline_1d_src
,
1459 &device
->meta_state
.alloc
);
1461 fail_pipeline_layout
:
1462 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
1463 device
->meta_state
.blit
.pipeline_layout
,
1464 &device
->meta_state
.alloc
);
1465 fail_descriptor_set_layout
:
1466 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
1467 device
->meta_state
.blit
.ds_layout
,
1468 &device
->meta_state
.alloc
);
1470 anv_DestroyRenderPass(anv_device_to_handle(device
),
1471 device
->meta_state
.blit
.render_pass
,
1472 &device
->meta_state
.alloc
);
1474 ralloc_free(vs
.nir
);
1475 ralloc_free(fs_1d
.nir
);
1476 ralloc_free(fs_2d
.nir
);
1477 ralloc_free(fs_3d
.nir
);