8e63eee462df9706f97e1447af874c247166f783
[mesa.git] / src / intel / vulkan / anv_meta_blit2d.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_meta.h"
25 #include "nir/nir_builder.h"
26
27 enum blit2d_src_type {
28 /* We can make a "normal" image view of this source and just texture
29 * from it like you would in any other shader.
30 */
31 BLIT2D_SRC_TYPE_NORMAL,
32
33 /* The source is W-tiled and we need to detile manually in the shader.
34 * This will work on any platform but is needed for all W-tiled sources
35 * prior to Broadwell.
36 */
37 BLIT2D_SRC_TYPE_W_DETILE,
38
39 BLIT2D_NUM_SRC_TYPES,
40 };
41
42 enum blit2d_dst_type {
43 /* We can bind this destination as a "normal" render target and render
44 * to it just like you would anywhere else.
45 */
46 BLIT2D_DST_TYPE_NORMAL,
47
48 /* The destination is W-tiled and we need to do the tiling manually in
49 * the shader. This is required for all W-tiled destinations.
50 *
51 * Sky Lake adds a feature for providing explicit stencil values in the
52 * shader but mesa doesn't support that yet so neither do we.
53 */
54 BLIT2D_DST_TYPE_W_TILE,
55
56 /* The destination has a 3-channel RGB format. Since we can't render to
57 * non-power-of-two textures, we have to bind it as a red texture and
58 * select the correct component for the given red pixel in the shader.
59 */
60 BLIT2D_DST_TYPE_RGB,
61
62 BLIT2D_NUM_DST_TYPES,
63 };
64
65 static VkFormat
66 vk_format_for_size(int bs)
67 {
68 /* The choice of UNORM and UINT formats is very intentional here. Most of
69 * the time, we want to use a UINT format to avoid any rounding error in
70 * the blit. For stencil blits, R8_UINT is required by the hardware.
71 * (It's the only format allowed in conjunction with W-tiling.) Also we
72 * intentionally use the 4-channel formats whenever we can. This is so
73 * that, when we do a RGB <-> RGBX copy, the two formats will line up even
74 * though one of them is 3/4 the size of the other. The choice of UNORM
75 * vs. UINT is also very intentional because Haswell doesn't handle 8 or
76 * 16-bit RGB UINT formats at all so we have to use UNORM there.
77 * Fortunately, the only time we should ever use two different formats in
78 * the table below is for RGB -> RGBA blits and so we will never have any
79 * UNORM/UINT mismatch.
80 */
81 switch (bs) {
82 case 1: return VK_FORMAT_R8_UINT;
83 case 2: return VK_FORMAT_R8G8_UINT;
84 case 3: return VK_FORMAT_R8G8B8_UNORM;
85 case 4: return VK_FORMAT_R8G8B8A8_UNORM;
86 case 6: return VK_FORMAT_R16G16B16_UNORM;
87 case 8: return VK_FORMAT_R16G16B16A16_UNORM;
88 case 12: return VK_FORMAT_R32G32B32_UINT;
89 case 16: return VK_FORMAT_R32G32B32A32_UINT;
90 default:
91 unreachable("Invalid format block size");
92 }
93 }
94
95 static void
96 create_iview(struct anv_cmd_buffer *cmd_buffer,
97 struct anv_meta_blit2d_surf *surf,
98 struct anv_meta_blit2d_rect *rect,
99 VkImageUsageFlags usage,
100 VkImage *img,
101 struct anv_image_view *iview)
102 {
103 struct isl_tile_info tile_info;
104 isl_tiling_get_info(&cmd_buffer->device->isl_dev,
105 surf->tiling, surf->bs, &tile_info);
106 const unsigned tile_width_px = tile_info.width > surf->bs ?
107 tile_info.width / surf->bs : 1;
108 uint32_t *rect_y = (usage == VK_IMAGE_USAGE_SAMPLED_BIT) ?
109 &rect->src_y : &rect->dst_y;
110 uint32_t *rect_x = (usage == VK_IMAGE_USAGE_SAMPLED_BIT) ?
111 &rect->src_x : &rect->dst_x;
112
113 /* Define the shared state among all created image views */
114 const VkImageCreateInfo image_info = {
115 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
116 .imageType = VK_IMAGE_TYPE_2D,
117 .format = vk_format_for_size(surf->bs),
118 .extent = {
119 .width = rect->width + (*rect_x) % tile_width_px,
120 .height = rect->height + (*rect_y) % tile_info.height,
121 .depth = 1,
122 },
123 .mipLevels = 1,
124 .arrayLayers = 1,
125 .samples = 1,
126 .tiling = surf->tiling == ISL_TILING_LINEAR ?
127 VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL,
128 .usage = usage,
129 };
130
131 /* Create the VkImage that is bound to the surface's memory. */
132 anv_image_create(anv_device_to_handle(cmd_buffer->device),
133 &(struct anv_image_create_info) {
134 .vk_info = &image_info,
135 .isl_tiling_flags = 1 << surf->tiling,
136 .stride = surf->pitch,
137 }, &cmd_buffer->pool->alloc, img);
138
139 /* We could use a vk call to bind memory, but that would require
140 * creating a dummy memory object etc. so there's really no point.
141 */
142 anv_image_from_handle(*img)->bo = surf->bo;
143 anv_image_from_handle(*img)->offset = surf->base_offset;
144
145 /* Create a VkImageView that starts at the tile aligned offset closest
146 * to the provided x/y offset into the surface.
147 */
148 uint32_t img_o = 0;
149 isl_surf_get_image_intratile_offset_el_xy(&cmd_buffer->device->isl_dev,
150 &anv_image_from_handle(*img)->
151 color_surface.isl,
152 *rect_x, *rect_y,
153 &img_o, rect_x, rect_y);
154 anv_image_view_init(iview, cmd_buffer->device,
155 &(VkImageViewCreateInfo) {
156 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
157 .image = *img,
158 .viewType = VK_IMAGE_VIEW_TYPE_2D,
159 .format = image_info.format,
160 .subresourceRange = {
161 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
162 .baseMipLevel = 0,
163 .levelCount = 1,
164 .baseArrayLayer = 0,
165 .layerCount = 1
166 },
167 }, cmd_buffer, img_o, usage);
168 }
169
170 struct blit2d_src_temps {
171 VkImage image;
172 struct anv_image_view iview;
173 VkDescriptorPool desc_pool;
174 VkDescriptorSet set;
175 };
176
177 static void
178 blit2d_bind_src(struct anv_cmd_buffer *cmd_buffer,
179 struct anv_meta_blit2d_surf *src,
180 enum blit2d_src_type src_type,
181 struct anv_meta_blit2d_rect *rect,
182 struct blit2d_src_temps *tmp)
183 {
184 struct anv_device *device = cmd_buffer->device;
185 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
186
187 create_iview(cmd_buffer, src, rect, VK_IMAGE_USAGE_SAMPLED_BIT,
188 &tmp->image, &tmp->iview);
189
190 anv_CreateDescriptorPool(vk_device,
191 &(const VkDescriptorPoolCreateInfo) {
192 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
193 .pNext = NULL,
194 .flags = 0,
195 .maxSets = 1,
196 .poolSizeCount = 1,
197 .pPoolSizes = (VkDescriptorPoolSize[]) {
198 {
199 .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
200 .descriptorCount = 1
201 },
202 }
203 }, &cmd_buffer->pool->alloc, &tmp->desc_pool);
204
205 anv_AllocateDescriptorSets(vk_device,
206 &(VkDescriptorSetAllocateInfo) {
207 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
208 .descriptorPool = tmp->desc_pool,
209 .descriptorSetCount = 1,
210 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout
211 }, &tmp->set);
212
213 anv_UpdateDescriptorSets(vk_device,
214 1, /* writeCount */
215 (VkWriteDescriptorSet[]) {
216 {
217 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
218 .dstSet = tmp->set,
219 .dstBinding = 0,
220 .dstArrayElement = 0,
221 .descriptorCount = 1,
222 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
223 .pImageInfo = (VkDescriptorImageInfo[]) {
224 {
225 .sampler = NULL,
226 .imageView = anv_image_view_to_handle(&tmp->iview),
227 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
228 },
229 }
230 }
231 }, 0, NULL);
232
233 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
234 VK_PIPELINE_BIND_POINT_GRAPHICS,
235 device->meta_state.blit2d.img_p_layout, 0, 1,
236 &tmp->set, 0, NULL);
237 }
238
239 static void
240 blit2d_unbind_src(struct anv_cmd_buffer *cmd_buffer,
241 enum blit2d_src_type src_type,
242 struct blit2d_src_temps *tmp)
243 {
244 anv_DestroyDescriptorPool(anv_device_to_handle(cmd_buffer->device),
245 tmp->desc_pool, &cmd_buffer->pool->alloc);
246 anv_DestroyImage(anv_device_to_handle(cmd_buffer->device),
247 tmp->image, &cmd_buffer->pool->alloc);
248 }
249
250 void
251 anv_meta_end_blit2d(struct anv_cmd_buffer *cmd_buffer,
252 struct anv_meta_saved_state *save)
253 {
254 anv_meta_restore(save, cmd_buffer);
255 }
256
257 void
258 anv_meta_begin_blit2d(struct anv_cmd_buffer *cmd_buffer,
259 struct anv_meta_saved_state *save)
260 {
261 anv_meta_save(save, cmd_buffer,
262 (1 << VK_DYNAMIC_STATE_VIEWPORT));
263 }
264
265 static void
266 bind_pipeline(struct anv_cmd_buffer *cmd_buffer,
267 enum blit2d_src_type src_type,
268 enum blit2d_dst_type dst_type)
269 {
270 VkPipeline pipeline =
271 cmd_buffer->device->meta_state.blit2d.pipelines[src_type][dst_type];
272
273 if (cmd_buffer->state.pipeline != anv_pipeline_from_handle(pipeline)) {
274 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer),
275 VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
276 }
277 }
278
279 static void
280 anv_meta_blit2d_normal_dst(struct anv_cmd_buffer *cmd_buffer,
281 struct anv_meta_blit2d_surf *src,
282 enum blit2d_src_type src_type,
283 struct anv_meta_blit2d_surf *dst,
284 unsigned num_rects,
285 struct anv_meta_blit2d_rect *rects)
286 {
287 struct anv_device *device = cmd_buffer->device;
288 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
289 VkImageUsageFlags dst_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
290
291 for (unsigned r = 0; r < num_rects; ++r) {
292 struct blit2d_src_temps src_temps;
293 blit2d_bind_src(cmd_buffer, src, src_type, &rects[r], &src_temps);
294
295 VkImage dst_img;
296 struct anv_image_view dst_iview;
297 create_iview(cmd_buffer, dst, &rects[r], dst_usage, &dst_img, &dst_iview);
298
299 struct blit_vb_data {
300 float pos[2];
301 float tex_coord[3];
302 } *vb_data;
303
304 unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
305
306 struct anv_state vb_state =
307 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, vb_size, 16);
308 memset(vb_state.map, 0, sizeof(struct anv_vue_header));
309 vb_data = vb_state.map + sizeof(struct anv_vue_header);
310
311 vb_data[0] = (struct blit_vb_data) {
312 .pos = {
313 rects[r].dst_x + rects[r].width,
314 rects[r].dst_y + rects[r].height,
315 },
316 .tex_coord = {
317 rects[r].src_x + rects[r].width,
318 rects[r].src_y + rects[r].height,
319 src->pitch,
320 },
321 };
322
323 vb_data[1] = (struct blit_vb_data) {
324 .pos = {
325 rects[r].dst_x,
326 rects[r].dst_y + rects[r].height,
327 },
328 .tex_coord = {
329 rects[r].src_x,
330 rects[r].src_y + rects[r].height,
331 src->pitch,
332 },
333 };
334
335 vb_data[2] = (struct blit_vb_data) {
336 .pos = {
337 rects[r].dst_x,
338 rects[r].dst_y,
339 },
340 .tex_coord = {
341 rects[r].src_x,
342 rects[r].src_y,
343 src->pitch,
344 },
345 };
346
347 anv_state_clflush(vb_state);
348
349 struct anv_buffer vertex_buffer = {
350 .device = device,
351 .size = vb_size,
352 .bo = &device->dynamic_state_block_pool.bo,
353 .offset = vb_state.offset,
354 };
355
356 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer), 0, 2,
357 (VkBuffer[]) {
358 anv_buffer_to_handle(&vertex_buffer),
359 anv_buffer_to_handle(&vertex_buffer)
360 },
361 (VkDeviceSize[]) {
362 0,
363 sizeof(struct anv_vue_header),
364 });
365
366 VkFramebuffer fb;
367 anv_CreateFramebuffer(vk_device,
368 &(VkFramebufferCreateInfo) {
369 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
370 .attachmentCount = 1,
371 .pAttachments = (VkImageView[]) {
372 anv_image_view_to_handle(&dst_iview),
373 },
374 .width = dst_iview.extent.width,
375 .height = dst_iview.extent.height,
376 .layers = 1
377 }, &cmd_buffer->pool->alloc, &fb);
378
379 ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
380 &(VkRenderPassBeginInfo) {
381 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
382 .renderPass = device->meta_state.blit2d.render_pass,
383 .framebuffer = fb,
384 .renderArea = {
385 .offset = { rects[r].dst_x, rects[r].dst_y, },
386 .extent = { rects[r].width, rects[r].height },
387 },
388 .clearValueCount = 0,
389 .pClearValues = NULL,
390 }, VK_SUBPASS_CONTENTS_INLINE);
391
392 bind_pipeline(cmd_buffer, src_type, BLIT2D_DST_TYPE_NORMAL);
393
394 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
395 &(VkViewport) {
396 .x = 0.0f,
397 .y = 0.0f,
398 .width = dst_iview.extent.width,
399 .height = dst_iview.extent.height,
400 .minDepth = 0.0f,
401 .maxDepth = 1.0f,
402 });
403
404 ANV_CALL(CmdDraw)(anv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
405
406 ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
407
408 /* At the point where we emit the draw call, all data from the
409 * descriptor sets, etc. has been used. We are free to delete it.
410 */
411 blit2d_unbind_src(cmd_buffer, src_type, &src_temps);
412 anv_DestroyFramebuffer(vk_device, fb, &cmd_buffer->pool->alloc);
413 anv_DestroyImage(vk_device, dst_img, &cmd_buffer->pool->alloc);
414 }
415 }
416
417 void
418 anv_meta_blit2d(struct anv_cmd_buffer *cmd_buffer,
419 struct anv_meta_blit2d_surf *src,
420 struct anv_meta_blit2d_surf *dst,
421 unsigned num_rects,
422 struct anv_meta_blit2d_rect *rects)
423 {
424 enum blit2d_src_type src_type;
425 if (src->tiling == ISL_TILING_W && cmd_buffer->device->info.gen < 8) {
426 src_type = BLIT2D_SRC_TYPE_W_DETILE;
427 } else {
428 src_type = BLIT2D_SRC_TYPE_NORMAL;
429 }
430
431 if (dst->tiling == ISL_TILING_W) {
432 assert(dst->bs == 1);
433 anv_finishme("Blitting to w-tiled destinations not yet supported");
434 return;
435 } else if (dst->bs % 3 == 0) {
436 anv_finishme("Blitting to RGB destinations not yet supported");
437 return;
438 } else {
439 assert(util_is_power_of_two(dst->bs));
440 anv_meta_blit2d_normal_dst(cmd_buffer, src, src_type, dst,
441 num_rects, rects);
442 }
443 }
444
445 static nir_shader *
446 build_nir_vertex_shader(void)
447 {
448 const struct glsl_type *vec4 = glsl_vec4_type();
449 nir_builder b;
450
451 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
452 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
453
454 nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
455 vec4, "a_pos");
456 pos_in->data.location = VERT_ATTRIB_GENERIC0;
457 nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
458 vec4, "gl_Position");
459 pos_out->data.location = VARYING_SLOT_POS;
460 nir_copy_var(&b, pos_out, pos_in);
461
462 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
463 vec4, "a_tex_pos");
464 tex_pos_in->data.location = VERT_ATTRIB_GENERIC1;
465 nir_variable *tex_pos_out = nir_variable_create(b.shader, nir_var_shader_out,
466 vec4, "v_tex_pos");
467 tex_pos_out->data.location = VARYING_SLOT_VAR0;
468 tex_pos_out->data.interpolation = INTERP_QUALIFIER_SMOOTH;
469 nir_copy_var(&b, tex_pos_out, tex_pos_in);
470
471 return b.shader;
472 }
473
474 typedef nir_ssa_def* (*texel_fetch_build_func)(struct nir_builder *,
475 struct anv_device *,
476 nir_ssa_def *, nir_ssa_def *);
477
478 static nir_ssa_def *
479 build_nir_texel_fetch(struct nir_builder *b, struct anv_device *device,
480 nir_ssa_def *tex_pos, nir_ssa_def *tex_pitch)
481 {
482 const struct glsl_type *sampler_type =
483 glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false, GLSL_TYPE_FLOAT);
484 nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
485 sampler_type, "s_tex");
486 sampler->data.descriptor_set = 0;
487 sampler->data.binding = 0;
488
489 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
490 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
491 tex->op = nir_texop_txf;
492 tex->src[0].src_type = nir_tex_src_coord;
493 tex->src[0].src = nir_src_for_ssa(tex_pos);
494 tex->src[1].src_type = nir_tex_src_lod;
495 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0));
496 tex->dest_type = nir_type_float; /* TODO */
497 tex->is_array = false;
498 tex->coord_components = 2;
499 tex->texture = nir_deref_var_create(tex, sampler);
500 tex->sampler = NULL;
501
502 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
503 nir_builder_instr_insert(b, &tex->instr);
504
505 return &tex->dest.ssa;
506 }
507
508 static nir_shader *
509 build_nir_copy_fragment_shader(struct anv_device *device,
510 texel_fetch_build_func txf_func)
511 {
512 const struct glsl_type *vec4 = glsl_vec4_type();
513 const struct glsl_type *vec3 = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
514 nir_builder b;
515
516 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
517 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit2d_fs");
518
519 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
520 vec3, "v_tex_pos");
521 tex_pos_in->data.location = VARYING_SLOT_VAR0;
522
523 nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
524 vec4, "f_color");
525 color_out->data.location = FRAG_RESULT_DATA0;
526
527 nir_ssa_def *pos_int = nir_f2i(&b, nir_load_var(&b, tex_pos_in));
528 unsigned swiz[4] = { 0, 1 };
529 nir_ssa_def *tex_pos = nir_swizzle(&b, pos_int, swiz, 2, false);
530 nir_ssa_def *tex_pitch = nir_channel(&b, pos_int, 2);
531
532 nir_ssa_def *color = txf_func(&b, device, tex_pos, tex_pitch);
533 nir_store_var(&b, color_out, color, 0xf);
534
535 return b.shader;
536 }
537
538 void
539 anv_device_finish_meta_blit2d_state(struct anv_device *device)
540 {
541 if (device->meta_state.blit2d.render_pass) {
542 anv_DestroyRenderPass(anv_device_to_handle(device),
543 device->meta_state.blit2d.render_pass,
544 &device->meta_state.alloc);
545 }
546
547 if (device->meta_state.blit2d.img_p_layout) {
548 anv_DestroyPipelineLayout(anv_device_to_handle(device),
549 device->meta_state.blit2d.img_p_layout,
550 &device->meta_state.alloc);
551 }
552
553 if (device->meta_state.blit2d.img_ds_layout) {
554 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
555 device->meta_state.blit2d.img_ds_layout,
556 &device->meta_state.alloc);
557 }
558
559 if (device->meta_state.blit2d.buf_p_layout) {
560 anv_DestroyPipelineLayout(anv_device_to_handle(device),
561 device->meta_state.blit2d.buf_p_layout,
562 &device->meta_state.alloc);
563 }
564
565 if (device->meta_state.blit2d.buf_ds_layout) {
566 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
567 device->meta_state.blit2d.buf_ds_layout,
568 &device->meta_state.alloc);
569 }
570
571 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
572 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
573 if (device->meta_state.blit2d.pipelines[src][dst]) {
574 anv_DestroyPipeline(anv_device_to_handle(device),
575 device->meta_state.blit2d.pipelines[src][dst],
576 &device->meta_state.alloc);
577 }
578 }
579 }
580 }
581
582 static VkResult
583 blit2d_init_pipeline(struct anv_device *device,
584 enum blit2d_src_type src_type,
585 enum blit2d_dst_type dst_type)
586 {
587 VkResult result;
588
589 texel_fetch_build_func src_func;
590 switch (src_type) {
591 case BLIT2D_SRC_TYPE_NORMAL:
592 src_func = build_nir_texel_fetch;
593 break;
594 case BLIT2D_SRC_TYPE_W_DETILE:
595 /* Not yet supported */
596 default:
597 return VK_SUCCESS;
598 }
599
600 struct anv_shader_module fs = { .nir = NULL };
601 switch (dst_type) {
602 case BLIT2D_DST_TYPE_NORMAL:
603 fs.nir = build_nir_copy_fragment_shader(device, src_func);
604 break;
605 case BLIT2D_DST_TYPE_W_TILE:
606 case BLIT2D_DST_TYPE_RGB:
607 /* Not yet supported */
608 default:
609 return VK_SUCCESS;
610 }
611
612 /* We don't use a vertex shader for blitting, but instead build and pass
613 * the VUEs directly to the rasterization backend. However, we do need
614 * to provide GLSL source for the vertex shader so that the compiler
615 * does not dead-code our inputs.
616 */
617 struct anv_shader_module vs = {
618 .nir = build_nir_vertex_shader(),
619 };
620
621 VkPipelineVertexInputStateCreateInfo vi_create_info = {
622 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
623 .vertexBindingDescriptionCount = 2,
624 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
625 {
626 .binding = 0,
627 .stride = 0,
628 .inputRate = VK_VERTEX_INPUT_RATE_INSTANCE
629 },
630 {
631 .binding = 1,
632 .stride = 5 * sizeof(float),
633 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
634 },
635 },
636 .vertexAttributeDescriptionCount = 3,
637 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
638 {
639 /* VUE Header */
640 .location = 0,
641 .binding = 0,
642 .format = VK_FORMAT_R32G32B32A32_UINT,
643 .offset = 0
644 },
645 {
646 /* Position */
647 .location = 1,
648 .binding = 1,
649 .format = VK_FORMAT_R32G32_SFLOAT,
650 .offset = 0
651 },
652 {
653 /* Texture Coordinate */
654 .location = 2,
655 .binding = 1,
656 .format = VK_FORMAT_R32G32B32_SFLOAT,
657 .offset = 8
658 }
659 }
660 };
661
662 VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
663 {
664 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
665 .stage = VK_SHADER_STAGE_VERTEX_BIT,
666 .module = anv_shader_module_to_handle(&vs),
667 .pName = "main",
668 .pSpecializationInfo = NULL
669 }, {
670 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
671 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
672 .module = anv_shader_module_to_handle(&fs),
673 .pName = "main",
674 .pSpecializationInfo = NULL
675 },
676 };
677
678 const VkGraphicsPipelineCreateInfo vk_pipeline_info = {
679 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
680 .stageCount = ARRAY_SIZE(pipeline_shader_stages),
681 .pStages = pipeline_shader_stages,
682 .pVertexInputState = &vi_create_info,
683 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
684 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
685 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
686 .primitiveRestartEnable = false,
687 },
688 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
689 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
690 .viewportCount = 1,
691 .scissorCount = 1,
692 },
693 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
694 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
695 .rasterizerDiscardEnable = false,
696 .polygonMode = VK_POLYGON_MODE_FILL,
697 .cullMode = VK_CULL_MODE_NONE,
698 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE
699 },
700 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
701 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
702 .rasterizationSamples = 1,
703 .sampleShadingEnable = false,
704 .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
705 },
706 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
707 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
708 .attachmentCount = 1,
709 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
710 { .colorWriteMask =
711 VK_COLOR_COMPONENT_A_BIT |
712 VK_COLOR_COMPONENT_R_BIT |
713 VK_COLOR_COMPONENT_G_BIT |
714 VK_COLOR_COMPONENT_B_BIT },
715 }
716 },
717 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
718 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
719 .dynamicStateCount = 9,
720 .pDynamicStates = (VkDynamicState[]) {
721 VK_DYNAMIC_STATE_VIEWPORT,
722 VK_DYNAMIC_STATE_SCISSOR,
723 VK_DYNAMIC_STATE_LINE_WIDTH,
724 VK_DYNAMIC_STATE_DEPTH_BIAS,
725 VK_DYNAMIC_STATE_BLEND_CONSTANTS,
726 VK_DYNAMIC_STATE_DEPTH_BOUNDS,
727 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
728 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
729 VK_DYNAMIC_STATE_STENCIL_REFERENCE,
730 },
731 },
732 .flags = 0,
733 .layout = device->meta_state.blit2d.img_p_layout,
734 .renderPass = device->meta_state.blit2d.render_pass,
735 .subpass = 0,
736 };
737
738 const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
739 .color_attachment_count = -1,
740 .use_repclear = false,
741 .disable_viewport = true,
742 .disable_scissor = true,
743 .disable_vs = true,
744 .use_rectlist = true
745 };
746
747 result = anv_graphics_pipeline_create(anv_device_to_handle(device),
748 VK_NULL_HANDLE,
749 &vk_pipeline_info, &anv_pipeline_info,
750 &device->meta_state.alloc,
751 &device->meta_state.blit2d.pipelines[src_type][dst_type]);
752
753 ralloc_free(vs.nir);
754 ralloc_free(fs.nir);
755
756 return result;
757 }
758
759 VkResult
760 anv_device_init_meta_blit2d_state(struct anv_device *device)
761 {
762 VkResult result;
763
764 zero(device->meta_state.blit2d);
765
766 result = anv_CreateRenderPass(anv_device_to_handle(device),
767 &(VkRenderPassCreateInfo) {
768 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
769 .attachmentCount = 1,
770 .pAttachments = &(VkAttachmentDescription) {
771 .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
772 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
773 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
774 .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
775 .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
776 },
777 .subpassCount = 1,
778 .pSubpasses = &(VkSubpassDescription) {
779 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
780 .inputAttachmentCount = 0,
781 .colorAttachmentCount = 1,
782 .pColorAttachments = &(VkAttachmentReference) {
783 .attachment = 0,
784 .layout = VK_IMAGE_LAYOUT_GENERAL,
785 },
786 .pResolveAttachments = NULL,
787 .pDepthStencilAttachment = &(VkAttachmentReference) {
788 .attachment = VK_ATTACHMENT_UNUSED,
789 .layout = VK_IMAGE_LAYOUT_GENERAL,
790 },
791 .preserveAttachmentCount = 1,
792 .pPreserveAttachments = (uint32_t[]) { 0 },
793 },
794 .dependencyCount = 0,
795 }, &device->meta_state.alloc, &device->meta_state.blit2d.render_pass);
796 if (result != VK_SUCCESS)
797 goto fail;
798
799 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
800 &(VkDescriptorSetLayoutCreateInfo) {
801 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
802 .bindingCount = 1,
803 .pBindings = (VkDescriptorSetLayoutBinding[]) {
804 {
805 .binding = 0,
806 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
807 .descriptorCount = 1,
808 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
809 .pImmutableSamplers = NULL
810 },
811 }
812 }, &device->meta_state.alloc, &device->meta_state.blit2d.img_ds_layout);
813 if (result != VK_SUCCESS)
814 goto fail;
815
816 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
817 &(VkPipelineLayoutCreateInfo) {
818 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
819 .setLayoutCount = 1,
820 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout,
821 },
822 &device->meta_state.alloc, &device->meta_state.blit2d.img_p_layout);
823 if (result != VK_SUCCESS)
824 goto fail;
825
826 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
827 &(VkDescriptorSetLayoutCreateInfo) {
828 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
829 .bindingCount = 1,
830 .pBindings = (VkDescriptorSetLayoutBinding[]) {
831 {
832 .binding = 0,
833 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
834 .descriptorCount = 1,
835 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
836 .pImmutableSamplers = NULL
837 },
838 }
839 }, &device->meta_state.alloc, &device->meta_state.blit2d.buf_ds_layout);
840 if (result != VK_SUCCESS)
841 goto fail;
842
843 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
844 &(VkPipelineLayoutCreateInfo) {
845 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
846 .setLayoutCount = 1,
847 .pSetLayouts = &device->meta_state.blit2d.buf_ds_layout,
848 },
849 &device->meta_state.alloc, &device->meta_state.blit2d.buf_p_layout);
850 if (result != VK_SUCCESS)
851 goto fail;
852
853 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
854 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
855 result = blit2d_init_pipeline(device, src, dst);
856 if (result != VK_SUCCESS)
857 goto fail;
858 }
859 }
860
861 return VK_SUCCESS;
862
863 fail:
864 anv_device_finish_meta_blit2d_state(device);
865 return result;
866 }