2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir/nir_builder.h"
27 enum blit2d_src_type
{
28 /* We can make a "normal" image view of this source and just texture
29 * from it like you would in any other shader.
31 BLIT2D_SRC_TYPE_NORMAL
,
33 /* The source is W-tiled and we need to detile manually in the shader.
34 * This will work on any platform but is needed for all W-tiled sources
37 BLIT2D_SRC_TYPE_W_DETILE
,
42 enum blit2d_dst_type
{
43 /* We can bind this destination as a "normal" render target and render
44 * to it just like you would anywhere else.
46 BLIT2D_DST_TYPE_NORMAL
,
48 /* The destination is W-tiled and we need to do the tiling manually in
49 * the shader. This is required for all W-tiled destinations.
51 * Sky Lake adds a feature for providing explicit stencil values in the
52 * shader but mesa doesn't support that yet so neither do we.
54 BLIT2D_DST_TYPE_W_TILE
,
56 /* The destination has a 3-channel RGB format. Since we can't render to
57 * non-power-of-two textures, we have to bind it as a red texture and
58 * select the correct component for the given red pixel in the shader.
66 vk_format_for_size(int bs
)
68 /* The choice of UNORM and UINT formats is very intentional here. Most of
69 * the time, we want to use a UINT format to avoid any rounding error in
70 * the blit. For stencil blits, R8_UINT is required by the hardware.
71 * (It's the only format allowed in conjunction with W-tiling.) Also we
72 * intentionally use the 4-channel formats whenever we can. This is so
73 * that, when we do a RGB <-> RGBX copy, the two formats will line up even
74 * though one of them is 3/4 the size of the other. The choice of UNORM
75 * vs. UINT is also very intentional because Haswell doesn't handle 8 or
76 * 16-bit RGB UINT formats at all so we have to use UNORM there.
77 * Fortunately, the only time we should ever use two different formats in
78 * the table below is for RGB -> RGBA blits and so we will never have any
79 * UNORM/UINT mismatch.
82 case 1: return VK_FORMAT_R8_UINT
;
83 case 2: return VK_FORMAT_R8G8_UINT
;
84 case 3: return VK_FORMAT_R8G8B8_UNORM
;
85 case 4: return VK_FORMAT_R8G8B8A8_UNORM
;
86 case 6: return VK_FORMAT_R16G16B16_UNORM
;
87 case 8: return VK_FORMAT_R16G16B16A16_UNORM
;
88 case 12: return VK_FORMAT_R32G32B32_UINT
;
89 case 16: return VK_FORMAT_R32G32B32A32_UINT
;
91 unreachable("Invalid format block size");
96 create_iview(struct anv_cmd_buffer
*cmd_buffer
,
97 struct anv_meta_blit2d_surf
*surf
,
99 VkImageUsageFlags usage
,
103 struct anv_image_view
*iview
)
105 const VkImageCreateInfo image_info
= {
106 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
107 .imageType
= VK_IMAGE_TYPE_2D
,
108 .format
= vk_format_for_size(surf
->bs
),
117 .tiling
= surf
->tiling
== ISL_TILING_LINEAR
?
118 VK_IMAGE_TILING_LINEAR
: VK_IMAGE_TILING_OPTIMAL
,
122 /* Create the VkImage that is bound to the surface's memory. */
123 anv_image_create(anv_device_to_handle(cmd_buffer
->device
),
124 &(struct anv_image_create_info
) {
125 .vk_info
= &image_info
,
126 .isl_tiling_flags
= 1 << surf
->tiling
,
127 .stride
= surf
->pitch
,
128 }, &cmd_buffer
->pool
->alloc
, img
);
130 /* We could use a vk call to bind memory, but that would require
131 * creating a dummy memory object etc. so there's really no point.
133 anv_image_from_handle(*img
)->bo
= surf
->bo
;
134 anv_image_from_handle(*img
)->offset
= surf
->base_offset
+ offset
;
136 anv_image_view_init(iview
, cmd_buffer
->device
,
137 &(VkImageViewCreateInfo
) {
138 .sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
,
140 .viewType
= VK_IMAGE_VIEW_TYPE_2D
,
141 .format
= image_info
.format
,
142 .subresourceRange
= {
143 .aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
,
149 }, cmd_buffer
, usage
);
152 struct blit2d_src_temps
{
154 struct anv_image_view iview
;
156 struct anv_buffer buffer
;
157 struct anv_buffer_view bview
;
159 VkDescriptorPool desc_pool
;
164 blit2d_bind_src(struct anv_cmd_buffer
*cmd_buffer
,
165 struct anv_meta_blit2d_surf
*src
,
166 enum blit2d_src_type src_type
,
167 struct anv_meta_blit2d_rect
*rect
,
168 struct blit2d_src_temps
*tmp
)
170 struct anv_device
*device
= cmd_buffer
->device
;
171 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
173 if (src_type
== BLIT2D_SRC_TYPE_NORMAL
) {
175 isl_tiling_get_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
176 src
->tiling
, src
->bs
, src
->pitch
,
177 rect
->src_x
, rect
->src_y
,
178 &offset
, &rect
->src_x
, &rect
->src_y
);
180 create_iview(cmd_buffer
, src
, offset
, VK_IMAGE_USAGE_SAMPLED_BIT
,
181 rect
->src_x
+ rect
->width
, rect
->src_y
+ rect
->height
,
182 &tmp
->image
, &tmp
->iview
);
184 anv_CreateDescriptorPool(vk_device
,
185 &(const VkDescriptorPoolCreateInfo
) {
186 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
,
191 .pPoolSizes
= (VkDescriptorPoolSize
[]) {
193 .type
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
197 }, &cmd_buffer
->pool
->alloc
, &tmp
->desc_pool
);
199 anv_AllocateDescriptorSets(vk_device
,
200 &(VkDescriptorSetAllocateInfo
) {
201 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO
,
202 .descriptorPool
= tmp
->desc_pool
,
203 .descriptorSetCount
= 1,
204 .pSetLayouts
= &device
->meta_state
.blit2d
.img_ds_layout
207 anv_UpdateDescriptorSets(vk_device
,
209 (VkWriteDescriptorSet
[]) {
211 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
214 .dstArrayElement
= 0,
215 .descriptorCount
= 1,
216 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
217 .pImageInfo
= (VkDescriptorImageInfo
[]) {
220 .imageView
= anv_image_view_to_handle(&tmp
->iview
),
221 .imageLayout
= VK_IMAGE_LAYOUT_GENERAL
,
227 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer
),
228 VK_PIPELINE_BIND_POINT_GRAPHICS
,
229 device
->meta_state
.blit2d
.img_p_layout
, 0, 1,
232 assert(src_type
== BLIT2D_SRC_TYPE_W_DETILE
);
233 assert(src
->tiling
== ISL_TILING_W
);
234 assert(src
->bs
== 1);
236 uint32_t tile_offset
= 0;
237 isl_tiling_get_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
238 ISL_TILING_W
, 1, src
->pitch
,
239 rect
->src_x
, rect
->src_y
,
241 &rect
->src_x
, &rect
->src_y
);
243 tmp
->buffer
= (struct anv_buffer
) {
245 .size
= align_u32(rect
->src_y
+ rect
->height
, 64) * src
->pitch
,
246 .usage
= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT
,
248 .offset
= src
->base_offset
+ tile_offset
,
251 anv_buffer_view_init(&tmp
->bview
, device
,
252 &(VkBufferViewCreateInfo
) {
253 .sType
= VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
,
254 .buffer
= anv_buffer_to_handle(&tmp
->buffer
),
255 .format
= VK_FORMAT_R8_UINT
,
257 .range
= VK_WHOLE_SIZE
,
260 anv_CreateDescriptorPool(vk_device
,
261 &(const VkDescriptorPoolCreateInfo
) {
262 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
,
267 .pPoolSizes
= (VkDescriptorPoolSize
[]) {
269 .type
= VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
,
273 }, &cmd_buffer
->pool
->alloc
, &tmp
->desc_pool
);
275 anv_AllocateDescriptorSets(vk_device
,
276 &(VkDescriptorSetAllocateInfo
) {
277 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO
,
278 .descriptorPool
= tmp
->desc_pool
,
279 .descriptorSetCount
= 1,
280 .pSetLayouts
= &device
->meta_state
.blit2d
.buf_ds_layout
283 anv_UpdateDescriptorSets(vk_device
,
285 (VkWriteDescriptorSet
[]) {
287 .sType
= VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
,
290 .dstArrayElement
= 0,
291 .descriptorCount
= 1,
292 .descriptorType
= VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
,
293 .pTexelBufferView
= (VkBufferView
[]) {
294 anv_buffer_view_to_handle(&tmp
->bview
),
299 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer
),
300 VK_PIPELINE_BIND_POINT_GRAPHICS
,
301 device
->meta_state
.blit2d
.buf_p_layout
, 0, 1,
307 blit2d_unbind_src(struct anv_cmd_buffer
*cmd_buffer
,
308 enum blit2d_src_type src_type
,
309 struct blit2d_src_temps
*tmp
)
311 anv_DestroyDescriptorPool(anv_device_to_handle(cmd_buffer
->device
),
312 tmp
->desc_pool
, &cmd_buffer
->pool
->alloc
);
313 if (src_type
== BLIT2D_SRC_TYPE_NORMAL
) {
314 anv_DestroyImage(anv_device_to_handle(cmd_buffer
->device
),
315 tmp
->image
, &cmd_buffer
->pool
->alloc
);
319 struct blit2d_dst_temps
{
321 struct anv_image_view iview
;
326 blit2d_bind_dst(struct anv_cmd_buffer
*cmd_buffer
,
327 struct anv_meta_blit2d_surf
*dst
,
331 struct blit2d_dst_temps
*tmp
)
333 create_iview(cmd_buffer
, dst
, offset
, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
334 width
, height
, &tmp
->image
, &tmp
->iview
);
336 anv_CreateFramebuffer(anv_device_to_handle(cmd_buffer
->device
),
337 &(VkFramebufferCreateInfo
) {
338 .sType
= VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
,
339 .attachmentCount
= 1,
340 .pAttachments
= (VkImageView
[]) {
341 anv_image_view_to_handle(&tmp
->iview
),
346 }, &cmd_buffer
->pool
->alloc
, &tmp
->fb
);
349 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 1,
361 blit2d_unbind_dst(struct anv_cmd_buffer
*cmd_buffer
,
362 struct blit2d_dst_temps
*tmp
)
364 VkDevice vk_device
= anv_device_to_handle(cmd_buffer
->device
);
365 anv_DestroyFramebuffer(vk_device
, tmp
->fb
, &cmd_buffer
->pool
->alloc
);
366 anv_DestroyImage(vk_device
, tmp
->image
, &cmd_buffer
->pool
->alloc
);
370 anv_meta_end_blit2d(struct anv_cmd_buffer
*cmd_buffer
,
371 struct anv_meta_saved_state
*save
)
373 anv_meta_restore(save
, cmd_buffer
);
377 anv_meta_begin_blit2d(struct anv_cmd_buffer
*cmd_buffer
,
378 struct anv_meta_saved_state
*save
)
380 anv_meta_save(save
, cmd_buffer
,
381 (1 << VK_DYNAMIC_STATE_VIEWPORT
));
385 bind_pipeline(struct anv_cmd_buffer
*cmd_buffer
,
386 enum blit2d_src_type src_type
,
387 enum blit2d_dst_type dst_type
)
389 VkPipeline pipeline
=
390 cmd_buffer
->device
->meta_state
.blit2d
.pipelines
[src_type
][dst_type
];
392 if (cmd_buffer
->state
.pipeline
!= anv_pipeline_from_handle(pipeline
)) {
393 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer
),
394 VK_PIPELINE_BIND_POINT_GRAPHICS
, pipeline
);
399 anv_meta_blit2d_normal_dst(struct anv_cmd_buffer
*cmd_buffer
,
400 struct anv_meta_blit2d_surf
*src
,
401 enum blit2d_src_type src_type
,
402 struct anv_meta_blit2d_surf
*dst
,
404 struct anv_meta_blit2d_rect
*rects
)
406 struct anv_device
*device
= cmd_buffer
->device
;
408 for (unsigned r
= 0; r
< num_rects
; ++r
) {
409 struct blit2d_src_temps src_temps
;
410 blit2d_bind_src(cmd_buffer
, src
, src_type
, &rects
[r
], &src_temps
);
413 isl_tiling_get_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
414 dst
->tiling
, dst
->bs
, dst
->pitch
,
415 rects
[r
].dst_x
, rects
[r
].dst_y
,
417 &rects
[r
].dst_x
, &rects
[r
].dst_y
);
419 struct blit2d_dst_temps dst_temps
;
420 blit2d_bind_dst(cmd_buffer
, dst
, offset
, rects
[r
].dst_x
+ rects
[r
].width
,
421 rects
[r
].dst_y
+ rects
[r
].height
, &dst_temps
);
423 struct blit_vb_data
{
428 unsigned vb_size
= sizeof(struct anv_vue_header
) + 3 * sizeof(*vb_data
);
430 struct anv_state vb_state
=
431 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, vb_size
, 16);
432 memset(vb_state
.map
, 0, sizeof(struct anv_vue_header
));
433 vb_data
= vb_state
.map
+ sizeof(struct anv_vue_header
);
435 vb_data
[0] = (struct blit_vb_data
) {
437 rects
[r
].dst_x
+ rects
[r
].width
,
438 rects
[r
].dst_y
+ rects
[r
].height
,
441 rects
[r
].src_x
+ rects
[r
].width
,
442 rects
[r
].src_y
+ rects
[r
].height
,
447 vb_data
[1] = (struct blit_vb_data
) {
450 rects
[r
].dst_y
+ rects
[r
].height
,
454 rects
[r
].src_y
+ rects
[r
].height
,
459 vb_data
[2] = (struct blit_vb_data
) {
471 if (!device
->info
.has_llc
)
472 anv_state_clflush(vb_state
);
474 struct anv_buffer vertex_buffer
= {
477 .bo
= &device
->dynamic_state_block_pool
.bo
,
478 .offset
= vb_state
.offset
,
481 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 2,
483 anv_buffer_to_handle(&vertex_buffer
),
484 anv_buffer_to_handle(&vertex_buffer
)
488 sizeof(struct anv_vue_header
),
491 ANV_CALL(CmdBeginRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
),
492 &(VkRenderPassBeginInfo
) {
493 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
494 .renderPass
= device
->meta_state
.blit2d
.render_pass
,
495 .framebuffer
= dst_temps
.fb
,
497 .offset
= { rects
[r
].dst_x
, rects
[r
].dst_y
, },
498 .extent
= { rects
[r
].width
, rects
[r
].height
},
500 .clearValueCount
= 0,
501 .pClearValues
= NULL
,
502 }, VK_SUBPASS_CONTENTS_INLINE
);
504 bind_pipeline(cmd_buffer
, src_type
, BLIT2D_DST_TYPE_NORMAL
);
506 ANV_CALL(CmdDraw
)(anv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
508 ANV_CALL(CmdEndRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
));
510 /* At the point where we emit the draw call, all data from the
511 * descriptor sets, etc. has been used. We are free to delete it.
513 blit2d_unbind_src(cmd_buffer
, src_type
, &src_temps
);
514 blit2d_unbind_dst(cmd_buffer
, &dst_temps
);
519 anv_meta_blit2d_w_tiled_dst(struct anv_cmd_buffer
*cmd_buffer
,
520 struct anv_meta_blit2d_surf
*src
,
521 enum blit2d_src_type src_type
,
522 struct anv_meta_blit2d_surf
*dst
,
524 struct anv_meta_blit2d_rect
*rects
)
526 struct anv_device
*device
= cmd_buffer
->device
;
528 for (unsigned r
= 0; r
< num_rects
; ++r
) {
529 struct blit2d_src_temps src_temps
;
530 blit2d_bind_src(cmd_buffer
, src
, src_type
, &rects
[r
], &src_temps
);
532 assert(dst
->bs
== 1);
534 isl_tiling_get_intratile_offset_el(&cmd_buffer
->device
->isl_dev
,
535 ISL_TILING_W
, 1, dst
->pitch
,
536 rects
[r
].dst_x
, rects
[r
].dst_y
,
538 &rects
[r
].dst_x
, &rects
[r
].dst_y
);
540 /* The original coordinates were in terms of an actual W-tiled offset
541 * but we are binding this image as Y-tiled. We need to adjust our
542 * rectangle accordingly.
544 uint32_t xmin_Y
, xmax_Y
, ymin_Y
, ymax_Y
;
545 xmin_Y
= (rects
[r
].dst_x
/ 8) * 16;
546 xmax_Y
= DIV_ROUND_UP(rects
[r
].dst_x
+ rects
[r
].width
, 8) * 16;
547 ymin_Y
= (rects
[r
].dst_y
/ 4) * 2;
548 ymax_Y
= DIV_ROUND_UP(rects
[r
].dst_y
+ rects
[r
].height
, 4) * 2;
550 struct anv_meta_blit2d_surf dst_Y
= {
552 .tiling
= ISL_TILING_Y0
,
553 .base_offset
= dst
->base_offset
,
555 .pitch
= dst
->pitch
* 2,
558 struct blit2d_dst_temps dst_temps
;
559 blit2d_bind_dst(cmd_buffer
, &dst_Y
, offset
, xmax_Y
, ymax_Y
, &dst_temps
);
561 struct blit_vb_header
{
562 struct anv_vue_header vue
;
563 int32_t tex_offset
[2];
568 struct blit_vb_data
{
572 unsigned vb_size
= sizeof(*vb_header
) + 3 * sizeof(*vb_data
);
574 struct anv_state vb_state
=
575 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, vb_size
, 16);
576 vb_header
= vb_state
.map
;
578 *vb_header
= (struct blit_vb_header
) {
580 rects
[r
].src_x
- rects
[r
].dst_x
,
581 rects
[r
].src_y
- rects
[r
].dst_y
,
583 .tex_pitch
= src
->pitch
,
587 rects
[r
].dst_x
+ rects
[r
].width
,
588 rects
[r
].dst_y
+ rects
[r
].height
,
592 vb_data
= (void *)(vb_header
+ 1);
594 vb_data
[0] = (struct blit_vb_data
) {
601 vb_data
[1] = (struct blit_vb_data
) {
608 vb_data
[2] = (struct blit_vb_data
) {
615 if (!device
->info
.has_llc
)
616 anv_state_clflush(vb_state
);
618 struct anv_buffer vertex_buffer
= {
621 .bo
= &device
->dynamic_state_block_pool
.bo
,
622 .offset
= vb_state
.offset
,
625 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer
), 0, 2,
627 anv_buffer_to_handle(&vertex_buffer
),
628 anv_buffer_to_handle(&vertex_buffer
)
632 (void *)vb_data
- vb_state
.map
,
635 ANV_CALL(CmdBeginRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
),
636 &(VkRenderPassBeginInfo
) {
637 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
,
638 .renderPass
= device
->meta_state
.blit2d
.render_pass
,
639 .framebuffer
= dst_temps
.fb
,
641 .offset
= { xmin_Y
, ymin_Y
, },
642 .extent
= { xmax_Y
- xmin_Y
, ymax_Y
- ymin_Y
},
644 .clearValueCount
= 0,
645 .pClearValues
= NULL
,
646 }, VK_SUBPASS_CONTENTS_INLINE
);
648 bind_pipeline(cmd_buffer
, src_type
, BLIT2D_DST_TYPE_W_TILE
);
650 ANV_CALL(CmdDraw
)(anv_cmd_buffer_to_handle(cmd_buffer
), 3, 1, 0, 0);
652 ANV_CALL(CmdEndRenderPass
)(anv_cmd_buffer_to_handle(cmd_buffer
));
654 /* At the point where we emit the draw call, all data from the
655 * descriptor sets, etc. has been used. We are free to delete it.
657 blit2d_unbind_src(cmd_buffer
, src_type
, &src_temps
);
658 blit2d_unbind_dst(cmd_buffer
, &dst_temps
);
663 anv_meta_blit2d(struct anv_cmd_buffer
*cmd_buffer
,
664 struct anv_meta_blit2d_surf
*src
,
665 struct anv_meta_blit2d_surf
*dst
,
667 struct anv_meta_blit2d_rect
*rects
)
669 enum blit2d_src_type src_type
;
670 if (src
->tiling
== ISL_TILING_W
&& cmd_buffer
->device
->info
.gen
< 8) {
671 src_type
= BLIT2D_SRC_TYPE_W_DETILE
;
673 src_type
= BLIT2D_SRC_TYPE_NORMAL
;
676 if (dst
->tiling
== ISL_TILING_W
) {
677 anv_meta_blit2d_w_tiled_dst(cmd_buffer
, src
, src_type
, dst
,
680 } else if (dst
->bs
% 3 == 0) {
681 anv_finishme("Blitting to RGB destinations not yet supported");
684 assert(util_is_power_of_two(dst
->bs
));
685 anv_meta_blit2d_normal_dst(cmd_buffer
, src
, src_type
, dst
,
691 build_nir_vertex_shader(void)
693 const struct glsl_type
*vec4
= glsl_vec4_type();
696 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_VERTEX
, NULL
);
697 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit_vs");
699 nir_variable
*pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
701 pos_in
->data
.location
= VERT_ATTRIB_GENERIC0
;
702 nir_variable
*pos_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
703 vec4
, "gl_Position");
704 pos_out
->data
.location
= VARYING_SLOT_POS
;
705 nir_copy_var(&b
, pos_out
, pos_in
);
707 nir_variable
*tex_pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
709 tex_pos_in
->data
.location
= VERT_ATTRIB_GENERIC1
;
710 nir_variable
*tex_pos_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
712 tex_pos_out
->data
.location
= VARYING_SLOT_VAR0
;
713 tex_pos_out
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
714 nir_copy_var(&b
, tex_pos_out
, tex_pos_in
);
716 nir_variable
*other_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
718 other_in
->data
.location
= VERT_ATTRIB_GENERIC2
;
719 nir_variable
*other_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
721 other_out
->data
.location
= VARYING_SLOT_VAR1
;
722 other_out
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
723 nir_copy_var(&b
, other_out
, other_in
);
728 typedef nir_ssa_def
* (*texel_fetch_build_func
)(struct nir_builder
*,
730 nir_ssa_def
*, nir_ssa_def
*);
733 nir_copy_bits(struct nir_builder
*b
, nir_ssa_def
*dst
, unsigned dst_offset
,
734 nir_ssa_def
*src
, unsigned src_offset
, unsigned num_bits
)
736 unsigned src_mask
= (~1u >> (32 - num_bits
)) << src_offset
;
737 nir_ssa_def
*masked
= nir_iand(b
, src
, nir_imm_int(b
, src_mask
));
739 nir_ssa_def
*shifted
;
740 if (dst_offset
> src_offset
) {
741 shifted
= nir_ishl(b
, masked
, nir_imm_int(b
, dst_offset
- src_offset
));
742 } else if (dst_offset
< src_offset
) {
743 shifted
= nir_ushr(b
, masked
, nir_imm_int(b
, src_offset
- dst_offset
));
745 assert(dst_offset
== src_offset
);
749 return nir_ior(b
, dst
, shifted
);
753 build_nir_w_tiled_fetch(struct nir_builder
*b
, struct anv_device
*device
,
754 nir_ssa_def
*tex_pos
, nir_ssa_def
*tex_pitch
)
756 nir_ssa_def
*x
= nir_channel(b
, tex_pos
, 0);
757 nir_ssa_def
*y
= nir_channel(b
, tex_pos
, 1);
759 /* First, compute the block-aligned offset */
760 nir_ssa_def
*x_major
= nir_ushr(b
, x
, nir_imm_int(b
, 6));
761 nir_ssa_def
*y_major
= nir_ushr(b
, y
, nir_imm_int(b
, 6));
762 nir_ssa_def
*offset
=
763 nir_iadd(b
, nir_imul(b
, y_major
,
764 nir_imul(b
, tex_pitch
, nir_imm_int(b
, 64))),
765 nir_imul(b
, x_major
, nir_imm_int(b
, 4096)));
767 /* Compute the bottom 12 bits of the offset */
768 offset
= nir_copy_bits(b
, offset
, 0, x
, 0, 1);
769 offset
= nir_copy_bits(b
, offset
, 1, y
, 0, 1);
770 offset
= nir_copy_bits(b
, offset
, 2, x
, 1, 1);
771 offset
= nir_copy_bits(b
, offset
, 3, y
, 1, 1);
772 offset
= nir_copy_bits(b
, offset
, 4, x
, 2, 1);
773 offset
= nir_copy_bits(b
, offset
, 5, y
, 2, 4);
774 offset
= nir_copy_bits(b
, offset
, 9, x
, 3, 3);
776 if (device
->isl_dev
.has_bit6_swizzling
) {
777 offset
= nir_ixor(b
, offset
,
778 nir_ushr(b
, nir_iand(b
, offset
, nir_imm_int(b
, 0x0200)),
782 const struct glsl_type
*sampler_type
=
783 glsl_sampler_type(GLSL_SAMPLER_DIM_BUF
, false, false, GLSL_TYPE_FLOAT
);
784 nir_variable
*sampler
= nir_variable_create(b
->shader
, nir_var_uniform
,
785 sampler_type
, "s_tex");
786 sampler
->data
.descriptor_set
= 0;
787 sampler
->data
.binding
= 0;
789 nir_tex_instr
*tex
= nir_tex_instr_create(b
->shader
, 1);
790 tex
->sampler_dim
= GLSL_SAMPLER_DIM_BUF
;
791 tex
->op
= nir_texop_txf
;
792 tex
->src
[0].src_type
= nir_tex_src_coord
;
793 tex
->src
[0].src
= nir_src_for_ssa(offset
);
794 tex
->dest_type
= nir_type_float
; /* TODO */
795 tex
->is_array
= false;
796 tex
->coord_components
= 1;
797 tex
->texture
= nir_deref_var_create(tex
, sampler
);
800 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
801 nir_builder_instr_insert(b
, &tex
->instr
);
803 return &tex
->dest
.ssa
;
807 build_nir_texel_fetch(struct nir_builder
*b
, struct anv_device
*device
,
808 nir_ssa_def
*tex_pos
, nir_ssa_def
*tex_pitch
)
810 const struct glsl_type
*sampler_type
=
811 glsl_sampler_type(GLSL_SAMPLER_DIM_2D
, false, false, GLSL_TYPE_FLOAT
);
812 nir_variable
*sampler
= nir_variable_create(b
->shader
, nir_var_uniform
,
813 sampler_type
, "s_tex");
814 sampler
->data
.descriptor_set
= 0;
815 sampler
->data
.binding
= 0;
817 nir_tex_instr
*tex
= nir_tex_instr_create(b
->shader
, 2);
818 tex
->sampler_dim
= GLSL_SAMPLER_DIM_2D
;
819 tex
->op
= nir_texop_txf
;
820 tex
->src
[0].src_type
= nir_tex_src_coord
;
821 tex
->src
[0].src
= nir_src_for_ssa(tex_pos
);
822 tex
->src
[1].src_type
= nir_tex_src_lod
;
823 tex
->src
[1].src
= nir_src_for_ssa(nir_imm_int(b
, 0));
824 tex
->dest_type
= nir_type_float
; /* TODO */
825 tex
->is_array
= false;
826 tex
->coord_components
= 2;
827 tex
->texture
= nir_deref_var_create(tex
, sampler
);
830 nir_ssa_dest_init(&tex
->instr
, &tex
->dest
, 4, 32, "tex");
831 nir_builder_instr_insert(b
, &tex
->instr
);
833 return &tex
->dest
.ssa
;
836 static const VkPipelineVertexInputStateCreateInfo normal_vi_create_info
= {
837 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
838 .vertexBindingDescriptionCount
= 2,
839 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
843 .inputRate
= VK_VERTEX_INPUT_RATE_INSTANCE
847 .stride
= 5 * sizeof(float),
848 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
851 .vertexAttributeDescriptionCount
= 3,
852 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
857 .format
= VK_FORMAT_R32G32B32A32_UINT
,
864 .format
= VK_FORMAT_R32G32_SFLOAT
,
868 /* Texture Coordinate */
871 .format
= VK_FORMAT_R32G32B32_SFLOAT
,
878 build_nir_copy_fragment_shader(struct anv_device
*device
,
879 texel_fetch_build_func txf_func
)
881 const struct glsl_type
*vec4
= glsl_vec4_type();
882 const struct glsl_type
*vec3
= glsl_vector_type(GLSL_TYPE_FLOAT
, 3);
885 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
886 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit2d_fs");
888 nir_variable
*tex_pos_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
890 tex_pos_in
->data
.location
= VARYING_SLOT_VAR0
;
892 nir_variable
*color_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
894 color_out
->data
.location
= FRAG_RESULT_DATA0
;
896 nir_ssa_def
*pos_int
= nir_f2i(&b
, nir_load_var(&b
, tex_pos_in
));
897 unsigned swiz
[4] = { 0, 1 };
898 nir_ssa_def
*tex_pos
= nir_swizzle(&b
, pos_int
, swiz
, 2, false);
899 nir_ssa_def
*tex_pitch
= nir_channel(&b
, pos_int
, 2);
901 nir_ssa_def
*color
= txf_func(&b
, device
, tex_pos
, tex_pitch
);
902 nir_store_var(&b
, color_out
, color
, 0xf);
907 static const VkPipelineVertexInputStateCreateInfo w_tiled_vi_create_info
= {
908 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
,
909 .vertexBindingDescriptionCount
= 2,
910 .pVertexBindingDescriptions
= (VkVertexInputBindingDescription
[]) {
914 .inputRate
= VK_VERTEX_INPUT_RATE_INSTANCE
918 .stride
= 2 * sizeof(float),
919 .inputRate
= VK_VERTEX_INPUT_RATE_VERTEX
922 .vertexAttributeDescriptionCount
= 4,
923 .pVertexAttributeDescriptions
= (VkVertexInputAttributeDescription
[]) {
928 .format
= VK_FORMAT_R32G32B32A32_UINT
,
935 .format
= VK_FORMAT_R32G32_SFLOAT
,
942 .format
= VK_FORMAT_R32G32B32_UINT
,
946 /* Destination bounds */
949 .format
= VK_FORMAT_R32G32B32A32_UINT
,
956 build_nir_w_tiled_fragment_shader(struct anv_device
*device
,
957 texel_fetch_build_func txf_func
)
959 const struct glsl_type
*vec4
= glsl_vec4_type();
960 const struct glsl_type
*ivec3
= glsl_vector_type(GLSL_TYPE_INT
, 3);
961 const struct glsl_type
*uvec4
= glsl_vector_type(GLSL_TYPE_UINT
, 4);
964 nir_builder_init_simple_shader(&b
, NULL
, MESA_SHADER_FRAGMENT
, NULL
);
965 b
.shader
->info
.name
= ralloc_strdup(b
.shader
, "meta_blit2d_fs");
967 /* We need gl_FragCoord so we know our Y-tiled position */
968 nir_variable
*frag_coord_in
= nir_variable_create(b
.shader
,
970 vec4
, "gl_FragCoord");
971 frag_coord_in
->data
.location
= VARYING_SLOT_POS
;
972 frag_coord_in
->data
.origin_upper_left
= true;
974 /* In location 0 we have an ivec3 that has the offset from dest to
975 * source in the first two components and the stride in the third.
977 nir_variable
*tex_off_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
979 tex_off_in
->data
.location
= VARYING_SLOT_VAR0
;
980 tex_off_in
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
982 /* In location 1 we have a uvec4 that gives us the bounds of the
983 * destination. We need to discard if we get outside this boundary.
985 nir_variable
*bounds_in
= nir_variable_create(b
.shader
, nir_var_shader_in
,
987 bounds_in
->data
.location
= VARYING_SLOT_VAR1
;
988 bounds_in
->data
.interpolation
= INTERP_QUALIFIER_FLAT
;
990 nir_variable
*color_out
= nir_variable_create(b
.shader
, nir_var_shader_out
,
992 color_out
->data
.location
= FRAG_RESULT_DATA0
;
994 nir_ssa_def
*frag_coord_int
= nir_f2i(&b
, nir_load_var(&b
, frag_coord_in
));
995 nir_ssa_def
*x_Y
= nir_channel(&b
, frag_coord_int
, 0);
996 nir_ssa_def
*y_Y
= nir_channel(&b
, frag_coord_int
, 1);
998 /* Compute the W-tiled position from the Y-tiled position */
999 nir_ssa_def
*x_W
= nir_iand(&b
, x_Y
, nir_imm_int(&b
, 0xffffff80));
1000 x_W
= nir_ushr(&b
, x_W
, nir_imm_int(&b
, 1));
1001 x_W
= nir_copy_bits(&b
, x_W
, 0, x_Y
, 0, 1);
1002 x_W
= nir_copy_bits(&b
, x_W
, 1, x_Y
, 2, 1);
1003 x_W
= nir_copy_bits(&b
, x_W
, 2, y_Y
, 0, 1);
1004 x_W
= nir_copy_bits(&b
, x_W
, 3, x_Y
, 4, 3);
1006 nir_ssa_def
*y_W
= nir_iand(&b
, y_Y
, nir_imm_int(&b
, 0xffffffe0));
1007 y_W
= nir_ishl(&b
, y_W
, nir_imm_int(&b
, 1));
1008 y_W
= nir_copy_bits(&b
, y_W
, 0, x_Y
, 1, 1);
1009 y_W
= nir_copy_bits(&b
, y_W
, 1, x_Y
, 3, 1);
1010 y_W
= nir_copy_bits(&b
, y_W
, 2, y_Y
, 1, 4);
1012 /* Figure out if we are out-of-bounds and discard */
1013 nir_ssa_def
*bounds
= nir_load_var(&b
, bounds_in
);
1015 nir_ior(&b
, nir_ult(&b
, x_W
, nir_channel(&b
, bounds
, 0)),
1016 nir_ior(&b
, nir_ult(&b
, y_W
, nir_channel(&b
, bounds
, 1)),
1017 nir_ior(&b
, nir_uge(&b
, x_W
, nir_channel(&b
, bounds
, 2)),
1018 nir_uge(&b
, y_W
, nir_channel(&b
, bounds
, 3)))));
1020 nir_intrinsic_instr
*discard
=
1021 nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_discard_if
);
1022 discard
->src
[0] = nir_src_for_ssa(oob
);
1023 nir_builder_instr_insert(&b
, &discard
->instr
);
1025 unsigned swiz
[4] = { 0, 1, 0, 0 };
1026 nir_ssa_def
*tex_off
=
1027 nir_swizzle(&b
, nir_load_var(&b
, tex_off_in
), swiz
, 2, false);
1028 nir_ssa_def
*tex_pos
= nir_iadd(&b
, nir_vec2(&b
, x_W
, y_W
), tex_off
);
1029 nir_ssa_def
*tex_pitch
= nir_channel(&b
, nir_load_var(&b
, tex_off_in
), 2);
1031 nir_ssa_def
*color
= txf_func(&b
, device
, tex_pos
, tex_pitch
);
1032 nir_store_var(&b
, color_out
, color
, 0xf);
1038 anv_device_finish_meta_blit2d_state(struct anv_device
*device
)
1040 if (device
->meta_state
.blit2d
.render_pass
) {
1041 anv_DestroyRenderPass(anv_device_to_handle(device
),
1042 device
->meta_state
.blit2d
.render_pass
,
1043 &device
->meta_state
.alloc
);
1046 if (device
->meta_state
.blit2d
.img_p_layout
) {
1047 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
1048 device
->meta_state
.blit2d
.img_p_layout
,
1049 &device
->meta_state
.alloc
);
1052 if (device
->meta_state
.blit2d
.img_ds_layout
) {
1053 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
1054 device
->meta_state
.blit2d
.img_ds_layout
,
1055 &device
->meta_state
.alloc
);
1058 if (device
->meta_state
.blit2d
.buf_p_layout
) {
1059 anv_DestroyPipelineLayout(anv_device_to_handle(device
),
1060 device
->meta_state
.blit2d
.buf_p_layout
,
1061 &device
->meta_state
.alloc
);
1064 if (device
->meta_state
.blit2d
.buf_ds_layout
) {
1065 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device
),
1066 device
->meta_state
.blit2d
.buf_ds_layout
,
1067 &device
->meta_state
.alloc
);
1070 for (unsigned src
= 0; src
< BLIT2D_NUM_SRC_TYPES
; src
++) {
1071 for (unsigned dst
= 0; dst
< BLIT2D_NUM_DST_TYPES
; dst
++) {
1072 if (device
->meta_state
.blit2d
.pipelines
[src
][dst
]) {
1073 anv_DestroyPipeline(anv_device_to_handle(device
),
1074 device
->meta_state
.blit2d
.pipelines
[src
][dst
],
1075 &device
->meta_state
.alloc
);
1082 blit2d_init_pipeline(struct anv_device
*device
,
1083 enum blit2d_src_type src_type
,
1084 enum blit2d_dst_type dst_type
)
1088 texel_fetch_build_func src_func
;
1090 case BLIT2D_SRC_TYPE_NORMAL
:
1091 src_func
= build_nir_texel_fetch
;
1093 case BLIT2D_SRC_TYPE_W_DETILE
:
1094 src_func
= build_nir_w_tiled_fetch
;
1097 unreachable("Invalid blit2d source type");
1100 const VkPipelineVertexInputStateCreateInfo
*vi_create_info
;
1101 struct anv_shader_module fs
= { .nir
= NULL
};
1103 case BLIT2D_DST_TYPE_NORMAL
:
1104 fs
.nir
= build_nir_copy_fragment_shader(device
, src_func
);
1105 vi_create_info
= &normal_vi_create_info
;
1107 case BLIT2D_DST_TYPE_W_TILE
:
1108 fs
.nir
= build_nir_w_tiled_fragment_shader(device
, src_func
);
1109 vi_create_info
= &w_tiled_vi_create_info
;
1111 case BLIT2D_DST_TYPE_RGB
:
1112 /* Not yet supported */
1117 /* We don't use a vertex shader for blitting, but instead build and pass
1118 * the VUEs directly to the rasterization backend. However, we do need
1119 * to provide GLSL source for the vertex shader so that the compiler
1120 * does not dead-code our inputs.
1122 struct anv_shader_module vs
= {
1123 .nir
= build_nir_vertex_shader(),
1126 VkPipelineShaderStageCreateInfo pipeline_shader_stages
[] = {
1128 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1129 .stage
= VK_SHADER_STAGE_VERTEX_BIT
,
1130 .module
= anv_shader_module_to_handle(&vs
),
1132 .pSpecializationInfo
= NULL
1134 .sType
= VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
,
1135 .stage
= VK_SHADER_STAGE_FRAGMENT_BIT
,
1136 .module
= anv_shader_module_to_handle(&fs
),
1138 .pSpecializationInfo
= NULL
1142 const VkGraphicsPipelineCreateInfo vk_pipeline_info
= {
1143 .sType
= VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
,
1144 .stageCount
= ARRAY_SIZE(pipeline_shader_stages
),
1145 .pStages
= pipeline_shader_stages
,
1146 .pVertexInputState
= vi_create_info
,
1147 .pInputAssemblyState
= &(VkPipelineInputAssemblyStateCreateInfo
) {
1148 .sType
= VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
,
1149 .topology
= VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
,
1150 .primitiveRestartEnable
= false,
1152 .pViewportState
= &(VkPipelineViewportStateCreateInfo
) {
1153 .sType
= VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
,
1157 .pRasterizationState
= &(VkPipelineRasterizationStateCreateInfo
) {
1158 .sType
= VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
,
1159 .rasterizerDiscardEnable
= false,
1160 .polygonMode
= VK_POLYGON_MODE_FILL
,
1161 .cullMode
= VK_CULL_MODE_NONE
,
1162 .frontFace
= VK_FRONT_FACE_COUNTER_CLOCKWISE
1164 .pMultisampleState
= &(VkPipelineMultisampleStateCreateInfo
) {
1165 .sType
= VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
,
1166 .rasterizationSamples
= 1,
1167 .sampleShadingEnable
= false,
1168 .pSampleMask
= (VkSampleMask
[]) { UINT32_MAX
},
1170 .pColorBlendState
= &(VkPipelineColorBlendStateCreateInfo
) {
1171 .sType
= VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
,
1172 .attachmentCount
= 1,
1173 .pAttachments
= (VkPipelineColorBlendAttachmentState
[]) {
1175 VK_COLOR_COMPONENT_A_BIT
|
1176 VK_COLOR_COMPONENT_R_BIT
|
1177 VK_COLOR_COMPONENT_G_BIT
|
1178 VK_COLOR_COMPONENT_B_BIT
},
1181 .pDynamicState
= &(VkPipelineDynamicStateCreateInfo
) {
1182 .sType
= VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
,
1183 .dynamicStateCount
= 9,
1184 .pDynamicStates
= (VkDynamicState
[]) {
1185 VK_DYNAMIC_STATE_VIEWPORT
,
1186 VK_DYNAMIC_STATE_SCISSOR
,
1187 VK_DYNAMIC_STATE_LINE_WIDTH
,
1188 VK_DYNAMIC_STATE_DEPTH_BIAS
,
1189 VK_DYNAMIC_STATE_BLEND_CONSTANTS
,
1190 VK_DYNAMIC_STATE_DEPTH_BOUNDS
,
1191 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
,
1192 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
,
1193 VK_DYNAMIC_STATE_STENCIL_REFERENCE
,
1197 .layout
= device
->meta_state
.blit2d
.img_p_layout
,
1198 .renderPass
= device
->meta_state
.blit2d
.render_pass
,
1202 const struct anv_graphics_pipeline_create_info anv_pipeline_info
= {
1203 .color_attachment_count
= -1,
1204 .use_repclear
= false,
1205 .disable_viewport
= true,
1206 .disable_scissor
= true,
1208 .use_rectlist
= true
1211 result
= anv_graphics_pipeline_create(anv_device_to_handle(device
),
1213 &vk_pipeline_info
, &anv_pipeline_info
,
1214 &device
->meta_state
.alloc
,
1215 &device
->meta_state
.blit2d
.pipelines
[src_type
][dst_type
]);
1217 ralloc_free(vs
.nir
);
1218 ralloc_free(fs
.nir
);
1224 anv_device_init_meta_blit2d_state(struct anv_device
*device
)
1228 zero(device
->meta_state
.blit2d
);
1230 result
= anv_CreateRenderPass(anv_device_to_handle(device
),
1231 &(VkRenderPassCreateInfo
) {
1232 .sType
= VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
,
1233 .attachmentCount
= 1,
1234 .pAttachments
= &(VkAttachmentDescription
) {
1235 .format
= VK_FORMAT_UNDEFINED
, /* Our shaders don't care */
1236 .loadOp
= VK_ATTACHMENT_LOAD_OP_LOAD
,
1237 .storeOp
= VK_ATTACHMENT_STORE_OP_STORE
,
1238 .initialLayout
= VK_IMAGE_LAYOUT_GENERAL
,
1239 .finalLayout
= VK_IMAGE_LAYOUT_GENERAL
,
1242 .pSubpasses
= &(VkSubpassDescription
) {
1243 .pipelineBindPoint
= VK_PIPELINE_BIND_POINT_GRAPHICS
,
1244 .inputAttachmentCount
= 0,
1245 .colorAttachmentCount
= 1,
1246 .pColorAttachments
= &(VkAttachmentReference
) {
1248 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
1250 .pResolveAttachments
= NULL
,
1251 .pDepthStencilAttachment
= &(VkAttachmentReference
) {
1252 .attachment
= VK_ATTACHMENT_UNUSED
,
1253 .layout
= VK_IMAGE_LAYOUT_GENERAL
,
1255 .preserveAttachmentCount
= 1,
1256 .pPreserveAttachments
= (uint32_t[]) { 0 },
1258 .dependencyCount
= 0,
1259 }, &device
->meta_state
.alloc
, &device
->meta_state
.blit2d
.render_pass
);
1260 if (result
!= VK_SUCCESS
)
1263 result
= anv_CreateDescriptorSetLayout(anv_device_to_handle(device
),
1264 &(VkDescriptorSetLayoutCreateInfo
) {
1265 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
1267 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
1270 .descriptorType
= VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
,
1271 .descriptorCount
= 1,
1272 .stageFlags
= VK_SHADER_STAGE_FRAGMENT_BIT
,
1273 .pImmutableSamplers
= NULL
1276 }, &device
->meta_state
.alloc
, &device
->meta_state
.blit2d
.img_ds_layout
);
1277 if (result
!= VK_SUCCESS
)
1280 result
= anv_CreatePipelineLayout(anv_device_to_handle(device
),
1281 &(VkPipelineLayoutCreateInfo
) {
1282 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
1283 .setLayoutCount
= 1,
1284 .pSetLayouts
= &device
->meta_state
.blit2d
.img_ds_layout
,
1286 &device
->meta_state
.alloc
, &device
->meta_state
.blit2d
.img_p_layout
);
1287 if (result
!= VK_SUCCESS
)
1290 result
= anv_CreateDescriptorSetLayout(anv_device_to_handle(device
),
1291 &(VkDescriptorSetLayoutCreateInfo
) {
1292 .sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
,
1294 .pBindings
= (VkDescriptorSetLayoutBinding
[]) {
1297 .descriptorType
= VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
,
1298 .descriptorCount
= 1,
1299 .stageFlags
= VK_SHADER_STAGE_FRAGMENT_BIT
,
1300 .pImmutableSamplers
= NULL
1303 }, &device
->meta_state
.alloc
, &device
->meta_state
.blit2d
.buf_ds_layout
);
1304 if (result
!= VK_SUCCESS
)
1307 result
= anv_CreatePipelineLayout(anv_device_to_handle(device
),
1308 &(VkPipelineLayoutCreateInfo
) {
1309 .sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
,
1310 .setLayoutCount
= 1,
1311 .pSetLayouts
= &device
->meta_state
.blit2d
.buf_ds_layout
,
1313 &device
->meta_state
.alloc
, &device
->meta_state
.blit2d
.buf_p_layout
);
1314 if (result
!= VK_SUCCESS
)
1317 for (unsigned src
= 0; src
< BLIT2D_NUM_SRC_TYPES
; src
++) {
1318 for (unsigned dst
= 0; dst
< BLIT2D_NUM_DST_TYPES
; dst
++) {
1319 result
= blit2d_init_pipeline(device
, src
, dst
);
1320 if (result
!= VK_SUCCESS
)
1328 anv_device_finish_meta_blit2d_state(device
);