68ade55d8eab02e8a8da3b8ebfa6cc6d06ba324f
[mesa.git] / src / intel / vulkan / anv_meta_blit2d.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_meta.h"
25 #include "nir/nir_builder.h"
26
27 enum blit2d_src_type {
28 /* We can make a "normal" image view of this source and just texture
29 * from it like you would in any other shader.
30 */
31 BLIT2D_SRC_TYPE_NORMAL,
32
33 /* The source is W-tiled and we need to detile manually in the shader.
34 * This will work on any platform but is needed for all W-tiled sources
35 * prior to Broadwell.
36 */
37 BLIT2D_SRC_TYPE_W_DETILE,
38
39 BLIT2D_NUM_SRC_TYPES,
40 };
41
42 enum blit2d_dst_type {
43 /* We can bind this destination as a "normal" render target and render
44 * to it just like you would anywhere else.
45 */
46 BLIT2D_DST_TYPE_NORMAL,
47
48 /* The destination is W-tiled and we need to do the tiling manually in
49 * the shader. This is required for all W-tiled destinations.
50 *
51 * Sky Lake adds a feature for providing explicit stencil values in the
52 * shader but mesa doesn't support that yet so neither do we.
53 */
54 BLIT2D_DST_TYPE_W_TILE,
55
56 /* The destination has a 3-channel RGB format. Since we can't render to
57 * non-power-of-two textures, we have to bind it as a red texture and
58 * select the correct component for the given red pixel in the shader.
59 */
60 BLIT2D_DST_TYPE_RGB,
61
62 BLIT2D_NUM_DST_TYPES,
63 };
64
65 static VkFormat
66 vk_format_for_size(int bs)
67 {
68 /* The choice of UNORM and UINT formats is very intentional here. Most of
69 * the time, we want to use a UINT format to avoid any rounding error in
70 * the blit. For stencil blits, R8_UINT is required by the hardware.
71 * (It's the only format allowed in conjunction with W-tiling.) Also we
72 * intentionally use the 4-channel formats whenever we can. This is so
73 * that, when we do a RGB <-> RGBX copy, the two formats will line up even
74 * though one of them is 3/4 the size of the other. The choice of UNORM
75 * vs. UINT is also very intentional because Haswell doesn't handle 8 or
76 * 16-bit RGB UINT formats at all so we have to use UNORM there.
77 * Fortunately, the only time we should ever use two different formats in
78 * the table below is for RGB -> RGBA blits and so we will never have any
79 * UNORM/UINT mismatch.
80 */
81 switch (bs) {
82 case 1: return VK_FORMAT_R8_UINT;
83 case 2: return VK_FORMAT_R8G8_UINT;
84 case 3: return VK_FORMAT_R8G8B8_UNORM;
85 case 4: return VK_FORMAT_R8G8B8A8_UNORM;
86 case 6: return VK_FORMAT_R16G16B16_UNORM;
87 case 8: return VK_FORMAT_R16G16B16A16_UNORM;
88 case 12: return VK_FORMAT_R32G32B32_UINT;
89 case 16: return VK_FORMAT_R32G32B32A32_UINT;
90 default:
91 unreachable("Invalid format block size");
92 }
93 }
94
95 static void
96 create_iview(struct anv_cmd_buffer *cmd_buffer,
97 struct anv_meta_blit2d_surf *surf,
98 uint64_t offset,
99 VkImageUsageFlags usage,
100 uint32_t width,
101 uint32_t height,
102 VkImage *img,
103 struct anv_image_view *iview)
104 {
105 const VkImageCreateInfo image_info = {
106 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
107 .imageType = VK_IMAGE_TYPE_2D,
108 .format = vk_format_for_size(surf->bs),
109 .extent = {
110 .width = width,
111 .height = height,
112 .depth = 1,
113 },
114 .mipLevels = 1,
115 .arrayLayers = 1,
116 .samples = 1,
117 .tiling = surf->tiling == ISL_TILING_LINEAR ?
118 VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL,
119 .usage = usage,
120 };
121
122 /* Create the VkImage that is bound to the surface's memory. */
123 anv_image_create(anv_device_to_handle(cmd_buffer->device),
124 &(struct anv_image_create_info) {
125 .vk_info = &image_info,
126 .isl_tiling_flags = 1 << surf->tiling,
127 .stride = surf->pitch,
128 }, &cmd_buffer->pool->alloc, img);
129
130 /* We could use a vk call to bind memory, but that would require
131 * creating a dummy memory object etc. so there's really no point.
132 */
133 anv_image_from_handle(*img)->bo = surf->bo;
134 anv_image_from_handle(*img)->offset = surf->base_offset + offset;
135
136 anv_image_view_init(iview, cmd_buffer->device,
137 &(VkImageViewCreateInfo) {
138 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
139 .image = *img,
140 .viewType = VK_IMAGE_VIEW_TYPE_2D,
141 .format = image_info.format,
142 .subresourceRange = {
143 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
144 .baseMipLevel = 0,
145 .levelCount = 1,
146 .baseArrayLayer = 0,
147 .layerCount = 1
148 },
149 }, cmd_buffer, usage);
150 }
151
152 struct blit2d_src_temps {
153 VkImage image;
154 struct anv_image_view iview;
155
156 struct anv_buffer buffer;
157 struct anv_buffer_view bview;
158
159 VkDescriptorPool desc_pool;
160 VkDescriptorSet set;
161 };
162
163 static void
164 blit2d_bind_src(struct anv_cmd_buffer *cmd_buffer,
165 struct anv_meta_blit2d_surf *src,
166 enum blit2d_src_type src_type,
167 struct anv_meta_blit2d_rect *rect,
168 struct blit2d_src_temps *tmp)
169 {
170 struct anv_device *device = cmd_buffer->device;
171 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
172
173 if (src_type == BLIT2D_SRC_TYPE_NORMAL) {
174 uint32_t offset = 0;
175 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
176 src->tiling, src->bs, src->pitch,
177 rect->src_x, rect->src_y,
178 &offset, &rect->src_x, &rect->src_y);
179
180 create_iview(cmd_buffer, src, offset, VK_IMAGE_USAGE_SAMPLED_BIT,
181 rect->src_x + rect->width, rect->src_y + rect->height,
182 &tmp->image, &tmp->iview);
183
184 anv_CreateDescriptorPool(vk_device,
185 &(const VkDescriptorPoolCreateInfo) {
186 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
187 .pNext = NULL,
188 .flags = 0,
189 .maxSets = 1,
190 .poolSizeCount = 1,
191 .pPoolSizes = (VkDescriptorPoolSize[]) {
192 {
193 .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
194 .descriptorCount = 1
195 },
196 }
197 }, &cmd_buffer->pool->alloc, &tmp->desc_pool);
198
199 anv_AllocateDescriptorSets(vk_device,
200 &(VkDescriptorSetAllocateInfo) {
201 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
202 .descriptorPool = tmp->desc_pool,
203 .descriptorSetCount = 1,
204 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout
205 }, &tmp->set);
206
207 anv_UpdateDescriptorSets(vk_device,
208 1, /* writeCount */
209 (VkWriteDescriptorSet[]) {
210 {
211 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
212 .dstSet = tmp->set,
213 .dstBinding = 0,
214 .dstArrayElement = 0,
215 .descriptorCount = 1,
216 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
217 .pImageInfo = (VkDescriptorImageInfo[]) {
218 {
219 .sampler = NULL,
220 .imageView = anv_image_view_to_handle(&tmp->iview),
221 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
222 },
223 }
224 }
225 }, 0, NULL);
226
227 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
228 VK_PIPELINE_BIND_POINT_GRAPHICS,
229 device->meta_state.blit2d.img_p_layout, 0, 1,
230 &tmp->set, 0, NULL);
231 } else {
232 assert(src_type == BLIT2D_SRC_TYPE_W_DETILE);
233 assert(src->tiling == ISL_TILING_W);
234 assert(src->bs == 1);
235
236 uint32_t tile_offset = 0;
237 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
238 ISL_TILING_W, 1, src->pitch,
239 rect->src_x, rect->src_y,
240 &tile_offset,
241 &rect->src_x, &rect->src_y);
242
243 tmp->buffer = (struct anv_buffer) {
244 .device = device,
245 .size = align_u32(rect->src_y + rect->height, 64) * src->pitch,
246 .usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
247 .bo = src->bo,
248 .offset = src->base_offset + tile_offset,
249 };
250
251 anv_buffer_view_init(&tmp->bview, device,
252 &(VkBufferViewCreateInfo) {
253 .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
254 .buffer = anv_buffer_to_handle(&tmp->buffer),
255 .format = VK_FORMAT_R8_UINT,
256 .offset = 0,
257 .range = VK_WHOLE_SIZE,
258 }, cmd_buffer);
259
260 anv_CreateDescriptorPool(vk_device,
261 &(const VkDescriptorPoolCreateInfo) {
262 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
263 .pNext = NULL,
264 .flags = 0,
265 .maxSets = 1,
266 .poolSizeCount = 1,
267 .pPoolSizes = (VkDescriptorPoolSize[]) {
268 {
269 .type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
270 .descriptorCount = 1
271 },
272 }
273 }, &cmd_buffer->pool->alloc, &tmp->desc_pool);
274
275 anv_AllocateDescriptorSets(vk_device,
276 &(VkDescriptorSetAllocateInfo) {
277 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
278 .descriptorPool = tmp->desc_pool,
279 .descriptorSetCount = 1,
280 .pSetLayouts = &device->meta_state.blit2d.buf_ds_layout
281 }, &tmp->set);
282
283 anv_UpdateDescriptorSets(vk_device,
284 1, /* writeCount */
285 (VkWriteDescriptorSet[]) {
286 {
287 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
288 .dstSet = tmp->set,
289 .dstBinding = 0,
290 .dstArrayElement = 0,
291 .descriptorCount = 1,
292 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
293 .pTexelBufferView = (VkBufferView[]) {
294 anv_buffer_view_to_handle(&tmp->bview),
295 },
296 }
297 }, 0, NULL);
298
299 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
300 VK_PIPELINE_BIND_POINT_GRAPHICS,
301 device->meta_state.blit2d.buf_p_layout, 0, 1,
302 &tmp->set, 0, NULL);
303 }
304 }
305
306 static void
307 blit2d_unbind_src(struct anv_cmd_buffer *cmd_buffer,
308 enum blit2d_src_type src_type,
309 struct blit2d_src_temps *tmp)
310 {
311 anv_DestroyDescriptorPool(anv_device_to_handle(cmd_buffer->device),
312 tmp->desc_pool, &cmd_buffer->pool->alloc);
313 if (src_type == BLIT2D_SRC_TYPE_NORMAL) {
314 anv_DestroyImage(anv_device_to_handle(cmd_buffer->device),
315 tmp->image, &cmd_buffer->pool->alloc);
316 }
317 }
318
319 struct blit2d_dst_temps {
320 VkImage image;
321 struct anv_image_view iview;
322 VkFramebuffer fb;
323 };
324
325 static void
326 blit2d_bind_dst(struct anv_cmd_buffer *cmd_buffer,
327 struct anv_meta_blit2d_surf *dst,
328 uint64_t offset,
329 uint32_t width,
330 uint32_t height,
331 struct blit2d_dst_temps *tmp)
332 {
333 create_iview(cmd_buffer, dst, offset, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
334 width, height, &tmp->image, &tmp->iview);
335
336 anv_CreateFramebuffer(anv_device_to_handle(cmd_buffer->device),
337 &(VkFramebufferCreateInfo) {
338 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
339 .attachmentCount = 1,
340 .pAttachments = (VkImageView[]) {
341 anv_image_view_to_handle(&tmp->iview),
342 },
343 .width = width,
344 .height = height,
345 .layers = 1
346 }, &cmd_buffer->pool->alloc, &tmp->fb);
347
348
349 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
350 &(VkViewport) {
351 .x = 0.0f,
352 .y = 0.0f,
353 .width = width,
354 .height = height,
355 .minDepth = 0.0f,
356 .maxDepth = 1.0f,
357 });
358 }
359
360 static void
361 blit2d_unbind_dst(struct anv_cmd_buffer *cmd_buffer,
362 struct blit2d_dst_temps *tmp)
363 {
364 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
365 anv_DestroyFramebuffer(vk_device, tmp->fb, &cmd_buffer->pool->alloc);
366 anv_DestroyImage(vk_device, tmp->image, &cmd_buffer->pool->alloc);
367 }
368
369 void
370 anv_meta_end_blit2d(struct anv_cmd_buffer *cmd_buffer,
371 struct anv_meta_saved_state *save)
372 {
373 anv_meta_restore(save, cmd_buffer);
374 }
375
376 void
377 anv_meta_begin_blit2d(struct anv_cmd_buffer *cmd_buffer,
378 struct anv_meta_saved_state *save)
379 {
380 anv_meta_save(save, cmd_buffer,
381 (1 << VK_DYNAMIC_STATE_VIEWPORT));
382 }
383
384 static void
385 bind_pipeline(struct anv_cmd_buffer *cmd_buffer,
386 enum blit2d_src_type src_type,
387 enum blit2d_dst_type dst_type)
388 {
389 VkPipeline pipeline =
390 cmd_buffer->device->meta_state.blit2d.pipelines[src_type][dst_type];
391
392 if (cmd_buffer->state.pipeline != anv_pipeline_from_handle(pipeline)) {
393 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer),
394 VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
395 }
396 }
397
398 static void
399 anv_meta_blit2d_normal_dst(struct anv_cmd_buffer *cmd_buffer,
400 struct anv_meta_blit2d_surf *src,
401 enum blit2d_src_type src_type,
402 struct anv_meta_blit2d_surf *dst,
403 unsigned num_rects,
404 struct anv_meta_blit2d_rect *rects)
405 {
406 struct anv_device *device = cmd_buffer->device;
407
408 for (unsigned r = 0; r < num_rects; ++r) {
409 struct blit2d_src_temps src_temps;
410 blit2d_bind_src(cmd_buffer, src, src_type, &rects[r], &src_temps);
411
412 uint32_t offset = 0;
413 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
414 dst->tiling, dst->bs, dst->pitch,
415 rects[r].dst_x, rects[r].dst_y,
416 &offset,
417 &rects[r].dst_x, &rects[r].dst_y);
418
419 struct blit2d_dst_temps dst_temps;
420 blit2d_bind_dst(cmd_buffer, dst, offset, rects[r].dst_x + rects[r].width,
421 rects[r].dst_y + rects[r].height, &dst_temps);
422
423 struct blit_vb_data {
424 float pos[2];
425 float tex_coord[3];
426 } *vb_data;
427
428 unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
429
430 struct anv_state vb_state =
431 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, vb_size, 16);
432 memset(vb_state.map, 0, sizeof(struct anv_vue_header));
433 vb_data = vb_state.map + sizeof(struct anv_vue_header);
434
435 vb_data[0] = (struct blit_vb_data) {
436 .pos = {
437 rects[r].dst_x + rects[r].width,
438 rects[r].dst_y + rects[r].height,
439 },
440 .tex_coord = {
441 rects[r].src_x + rects[r].width,
442 rects[r].src_y + rects[r].height,
443 src->pitch,
444 },
445 };
446
447 vb_data[1] = (struct blit_vb_data) {
448 .pos = {
449 rects[r].dst_x,
450 rects[r].dst_y + rects[r].height,
451 },
452 .tex_coord = {
453 rects[r].src_x,
454 rects[r].src_y + rects[r].height,
455 src->pitch,
456 },
457 };
458
459 vb_data[2] = (struct blit_vb_data) {
460 .pos = {
461 rects[r].dst_x,
462 rects[r].dst_y,
463 },
464 .tex_coord = {
465 rects[r].src_x,
466 rects[r].src_y,
467 src->pitch,
468 },
469 };
470
471 anv_state_clflush(vb_state);
472
473 struct anv_buffer vertex_buffer = {
474 .device = device,
475 .size = vb_size,
476 .bo = &device->dynamic_state_block_pool.bo,
477 .offset = vb_state.offset,
478 };
479
480 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer), 0, 2,
481 (VkBuffer[]) {
482 anv_buffer_to_handle(&vertex_buffer),
483 anv_buffer_to_handle(&vertex_buffer)
484 },
485 (VkDeviceSize[]) {
486 0,
487 sizeof(struct anv_vue_header),
488 });
489
490 ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
491 &(VkRenderPassBeginInfo) {
492 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
493 .renderPass = device->meta_state.blit2d.render_pass,
494 .framebuffer = dst_temps.fb,
495 .renderArea = {
496 .offset = { rects[r].dst_x, rects[r].dst_y, },
497 .extent = { rects[r].width, rects[r].height },
498 },
499 .clearValueCount = 0,
500 .pClearValues = NULL,
501 }, VK_SUBPASS_CONTENTS_INLINE);
502
503 bind_pipeline(cmd_buffer, src_type, BLIT2D_DST_TYPE_NORMAL);
504
505 ANV_CALL(CmdDraw)(anv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
506
507 ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
508
509 /* At the point where we emit the draw call, all data from the
510 * descriptor sets, etc. has been used. We are free to delete it.
511 */
512 blit2d_unbind_src(cmd_buffer, src_type, &src_temps);
513 blit2d_unbind_dst(cmd_buffer, &dst_temps);
514 }
515 }
516
517 void
518 anv_meta_blit2d(struct anv_cmd_buffer *cmd_buffer,
519 struct anv_meta_blit2d_surf *src,
520 struct anv_meta_blit2d_surf *dst,
521 unsigned num_rects,
522 struct anv_meta_blit2d_rect *rects)
523 {
524 enum blit2d_src_type src_type;
525 if (src->tiling == ISL_TILING_W && cmd_buffer->device->info.gen < 8) {
526 src_type = BLIT2D_SRC_TYPE_W_DETILE;
527 } else {
528 src_type = BLIT2D_SRC_TYPE_NORMAL;
529 }
530
531 if (dst->tiling == ISL_TILING_W) {
532 assert(dst->bs == 1);
533 anv_finishme("Blitting to w-tiled destinations not yet supported");
534 return;
535 } else if (dst->bs % 3 == 0) {
536 anv_finishme("Blitting to RGB destinations not yet supported");
537 return;
538 } else {
539 assert(util_is_power_of_two(dst->bs));
540 anv_meta_blit2d_normal_dst(cmd_buffer, src, src_type, dst,
541 num_rects, rects);
542 }
543 }
544
545 static nir_shader *
546 build_nir_vertex_shader(void)
547 {
548 const struct glsl_type *vec4 = glsl_vec4_type();
549 nir_builder b;
550
551 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
552 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
553
554 nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
555 vec4, "a_pos");
556 pos_in->data.location = VERT_ATTRIB_GENERIC0;
557 nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
558 vec4, "gl_Position");
559 pos_out->data.location = VARYING_SLOT_POS;
560 nir_copy_var(&b, pos_out, pos_in);
561
562 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
563 vec4, "a_tex_pos");
564 tex_pos_in->data.location = VERT_ATTRIB_GENERIC1;
565 nir_variable *tex_pos_out = nir_variable_create(b.shader, nir_var_shader_out,
566 vec4, "v_tex_pos");
567 tex_pos_out->data.location = VARYING_SLOT_VAR0;
568 tex_pos_out->data.interpolation = INTERP_QUALIFIER_SMOOTH;
569 nir_copy_var(&b, tex_pos_out, tex_pos_in);
570
571 return b.shader;
572 }
573
574 typedef nir_ssa_def* (*texel_fetch_build_func)(struct nir_builder *,
575 struct anv_device *,
576 nir_ssa_def *, nir_ssa_def *);
577
578 static nir_ssa_def *
579 nir_copy_bits(struct nir_builder *b, nir_ssa_def *dst, unsigned dst_offset,
580 nir_ssa_def *src, unsigned src_offset, unsigned num_bits)
581 {
582 unsigned src_mask = (~1u >> (32 - num_bits)) << src_offset;
583 nir_ssa_def *masked = nir_iand(b, src, nir_imm_int(b, src_mask));
584
585 nir_ssa_def *shifted;
586 if (dst_offset > src_offset) {
587 shifted = nir_ishl(b, masked, nir_imm_int(b, dst_offset - src_offset));
588 } else if (dst_offset < src_offset) {
589 shifted = nir_ushr(b, masked, nir_imm_int(b, src_offset - dst_offset));
590 } else {
591 assert(dst_offset == src_offset);
592 shifted = masked;
593 }
594
595 return nir_ior(b, dst, shifted);
596 }
597
598 static nir_ssa_def *
599 build_nir_w_tiled_fetch(struct nir_builder *b, struct anv_device *device,
600 nir_ssa_def *tex_pos, nir_ssa_def *tex_pitch)
601 {
602 nir_ssa_def *x = nir_channel(b, tex_pos, 0);
603 nir_ssa_def *y = nir_channel(b, tex_pos, 1);
604
605 /* First, compute the block-aligned offset */
606 nir_ssa_def *x_major = nir_ushr(b, x, nir_imm_int(b, 6));
607 nir_ssa_def *y_major = nir_ushr(b, y, nir_imm_int(b, 6));
608 nir_ssa_def *offset =
609 nir_iadd(b, nir_imul(b, y_major,
610 nir_imul(b, tex_pitch, nir_imm_int(b, 64))),
611 nir_imul(b, x_major, nir_imm_int(b, 4096)));
612
613 /* Compute the bottom 12 bits of the offset */
614 offset = nir_copy_bits(b, offset, 0, x, 0, 1);
615 offset = nir_copy_bits(b, offset, 1, y, 0, 1);
616 offset = nir_copy_bits(b, offset, 2, x, 1, 1);
617 offset = nir_copy_bits(b, offset, 3, y, 1, 1);
618 offset = nir_copy_bits(b, offset, 4, x, 2, 1);
619 offset = nir_copy_bits(b, offset, 5, y, 2, 4);
620 offset = nir_copy_bits(b, offset, 9, x, 3, 3);
621
622 if (device->isl_dev.has_bit6_swizzling) {
623 offset = nir_ixor(b, offset,
624 nir_ushr(b, nir_iand(b, offset, nir_imm_int(b, 0x0200)),
625 nir_imm_int(b, 3)));
626 }
627
628 const struct glsl_type *sampler_type =
629 glsl_sampler_type(GLSL_SAMPLER_DIM_BUF, false, false, GLSL_TYPE_FLOAT);
630 nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
631 sampler_type, "s_tex");
632 sampler->data.descriptor_set = 0;
633 sampler->data.binding = 0;
634
635 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1);
636 tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
637 tex->op = nir_texop_txf;
638 tex->src[0].src_type = nir_tex_src_coord;
639 tex->src[0].src = nir_src_for_ssa(offset);
640 tex->dest_type = nir_type_float; /* TODO */
641 tex->is_array = false;
642 tex->coord_components = 1;
643 tex->texture = nir_deref_var_create(tex, sampler);
644 tex->sampler = NULL;
645
646 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
647 nir_builder_instr_insert(b, &tex->instr);
648
649 return &tex->dest.ssa;
650 }
651
652 static nir_ssa_def *
653 build_nir_texel_fetch(struct nir_builder *b, struct anv_device *device,
654 nir_ssa_def *tex_pos, nir_ssa_def *tex_pitch)
655 {
656 const struct glsl_type *sampler_type =
657 glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false, GLSL_TYPE_FLOAT);
658 nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
659 sampler_type, "s_tex");
660 sampler->data.descriptor_set = 0;
661 sampler->data.binding = 0;
662
663 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
664 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
665 tex->op = nir_texop_txf;
666 tex->src[0].src_type = nir_tex_src_coord;
667 tex->src[0].src = nir_src_for_ssa(tex_pos);
668 tex->src[1].src_type = nir_tex_src_lod;
669 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0));
670 tex->dest_type = nir_type_float; /* TODO */
671 tex->is_array = false;
672 tex->coord_components = 2;
673 tex->texture = nir_deref_var_create(tex, sampler);
674 tex->sampler = NULL;
675
676 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
677 nir_builder_instr_insert(b, &tex->instr);
678
679 return &tex->dest.ssa;
680 }
681
682 static nir_shader *
683 build_nir_copy_fragment_shader(struct anv_device *device,
684 texel_fetch_build_func txf_func)
685 {
686 const struct glsl_type *vec4 = glsl_vec4_type();
687 const struct glsl_type *vec3 = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
688 nir_builder b;
689
690 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
691 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit2d_fs");
692
693 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
694 vec3, "v_tex_pos");
695 tex_pos_in->data.location = VARYING_SLOT_VAR0;
696
697 nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
698 vec4, "f_color");
699 color_out->data.location = FRAG_RESULT_DATA0;
700
701 nir_ssa_def *pos_int = nir_f2i(&b, nir_load_var(&b, tex_pos_in));
702 unsigned swiz[4] = { 0, 1 };
703 nir_ssa_def *tex_pos = nir_swizzle(&b, pos_int, swiz, 2, false);
704 nir_ssa_def *tex_pitch = nir_channel(&b, pos_int, 2);
705
706 nir_ssa_def *color = txf_func(&b, device, tex_pos, tex_pitch);
707 nir_store_var(&b, color_out, color, 0xf);
708
709 return b.shader;
710 }
711
712 void
713 anv_device_finish_meta_blit2d_state(struct anv_device *device)
714 {
715 if (device->meta_state.blit2d.render_pass) {
716 anv_DestroyRenderPass(anv_device_to_handle(device),
717 device->meta_state.blit2d.render_pass,
718 &device->meta_state.alloc);
719 }
720
721 if (device->meta_state.blit2d.img_p_layout) {
722 anv_DestroyPipelineLayout(anv_device_to_handle(device),
723 device->meta_state.blit2d.img_p_layout,
724 &device->meta_state.alloc);
725 }
726
727 if (device->meta_state.blit2d.img_ds_layout) {
728 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
729 device->meta_state.blit2d.img_ds_layout,
730 &device->meta_state.alloc);
731 }
732
733 if (device->meta_state.blit2d.buf_p_layout) {
734 anv_DestroyPipelineLayout(anv_device_to_handle(device),
735 device->meta_state.blit2d.buf_p_layout,
736 &device->meta_state.alloc);
737 }
738
739 if (device->meta_state.blit2d.buf_ds_layout) {
740 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
741 device->meta_state.blit2d.buf_ds_layout,
742 &device->meta_state.alloc);
743 }
744
745 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
746 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
747 if (device->meta_state.blit2d.pipelines[src][dst]) {
748 anv_DestroyPipeline(anv_device_to_handle(device),
749 device->meta_state.blit2d.pipelines[src][dst],
750 &device->meta_state.alloc);
751 }
752 }
753 }
754 }
755
756 static VkResult
757 blit2d_init_pipeline(struct anv_device *device,
758 enum blit2d_src_type src_type,
759 enum blit2d_dst_type dst_type)
760 {
761 VkResult result;
762
763 texel_fetch_build_func src_func;
764 switch (src_type) {
765 case BLIT2D_SRC_TYPE_NORMAL:
766 src_func = build_nir_texel_fetch;
767 break;
768 case BLIT2D_SRC_TYPE_W_DETILE:
769 src_func = build_nir_w_tiled_fetch;
770 break;
771 default:
772 unreachable("Invalid blit2d source type");
773 }
774
775 struct anv_shader_module fs = { .nir = NULL };
776 switch (dst_type) {
777 case BLIT2D_DST_TYPE_NORMAL:
778 fs.nir = build_nir_copy_fragment_shader(device, src_func);
779 break;
780 case BLIT2D_DST_TYPE_W_TILE:
781 case BLIT2D_DST_TYPE_RGB:
782 /* Not yet supported */
783 default:
784 return VK_SUCCESS;
785 }
786
787 /* We don't use a vertex shader for blitting, but instead build and pass
788 * the VUEs directly to the rasterization backend. However, we do need
789 * to provide GLSL source for the vertex shader so that the compiler
790 * does not dead-code our inputs.
791 */
792 struct anv_shader_module vs = {
793 .nir = build_nir_vertex_shader(),
794 };
795
796 VkPipelineVertexInputStateCreateInfo vi_create_info = {
797 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
798 .vertexBindingDescriptionCount = 2,
799 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
800 {
801 .binding = 0,
802 .stride = 0,
803 .inputRate = VK_VERTEX_INPUT_RATE_INSTANCE
804 },
805 {
806 .binding = 1,
807 .stride = 5 * sizeof(float),
808 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
809 },
810 },
811 .vertexAttributeDescriptionCount = 3,
812 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
813 {
814 /* VUE Header */
815 .location = 0,
816 .binding = 0,
817 .format = VK_FORMAT_R32G32B32A32_UINT,
818 .offset = 0
819 },
820 {
821 /* Position */
822 .location = 1,
823 .binding = 1,
824 .format = VK_FORMAT_R32G32_SFLOAT,
825 .offset = 0
826 },
827 {
828 /* Texture Coordinate */
829 .location = 2,
830 .binding = 1,
831 .format = VK_FORMAT_R32G32B32_SFLOAT,
832 .offset = 8
833 }
834 }
835 };
836
837 VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
838 {
839 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
840 .stage = VK_SHADER_STAGE_VERTEX_BIT,
841 .module = anv_shader_module_to_handle(&vs),
842 .pName = "main",
843 .pSpecializationInfo = NULL
844 }, {
845 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
846 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
847 .module = anv_shader_module_to_handle(&fs),
848 .pName = "main",
849 .pSpecializationInfo = NULL
850 },
851 };
852
853 const VkGraphicsPipelineCreateInfo vk_pipeline_info = {
854 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
855 .stageCount = ARRAY_SIZE(pipeline_shader_stages),
856 .pStages = pipeline_shader_stages,
857 .pVertexInputState = &vi_create_info,
858 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
859 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
860 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
861 .primitiveRestartEnable = false,
862 },
863 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
864 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
865 .viewportCount = 1,
866 .scissorCount = 1,
867 },
868 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
869 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
870 .rasterizerDiscardEnable = false,
871 .polygonMode = VK_POLYGON_MODE_FILL,
872 .cullMode = VK_CULL_MODE_NONE,
873 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE
874 },
875 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
876 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
877 .rasterizationSamples = 1,
878 .sampleShadingEnable = false,
879 .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
880 },
881 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
882 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
883 .attachmentCount = 1,
884 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
885 { .colorWriteMask =
886 VK_COLOR_COMPONENT_A_BIT |
887 VK_COLOR_COMPONENT_R_BIT |
888 VK_COLOR_COMPONENT_G_BIT |
889 VK_COLOR_COMPONENT_B_BIT },
890 }
891 },
892 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
893 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
894 .dynamicStateCount = 9,
895 .pDynamicStates = (VkDynamicState[]) {
896 VK_DYNAMIC_STATE_VIEWPORT,
897 VK_DYNAMIC_STATE_SCISSOR,
898 VK_DYNAMIC_STATE_LINE_WIDTH,
899 VK_DYNAMIC_STATE_DEPTH_BIAS,
900 VK_DYNAMIC_STATE_BLEND_CONSTANTS,
901 VK_DYNAMIC_STATE_DEPTH_BOUNDS,
902 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
903 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
904 VK_DYNAMIC_STATE_STENCIL_REFERENCE,
905 },
906 },
907 .flags = 0,
908 .layout = device->meta_state.blit2d.img_p_layout,
909 .renderPass = device->meta_state.blit2d.render_pass,
910 .subpass = 0,
911 };
912
913 const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
914 .color_attachment_count = -1,
915 .use_repclear = false,
916 .disable_viewport = true,
917 .disable_scissor = true,
918 .disable_vs = true,
919 .use_rectlist = true
920 };
921
922 result = anv_graphics_pipeline_create(anv_device_to_handle(device),
923 VK_NULL_HANDLE,
924 &vk_pipeline_info, &anv_pipeline_info,
925 &device->meta_state.alloc,
926 &device->meta_state.blit2d.pipelines[src_type][dst_type]);
927
928 ralloc_free(vs.nir);
929 ralloc_free(fs.nir);
930
931 return result;
932 }
933
934 VkResult
935 anv_device_init_meta_blit2d_state(struct anv_device *device)
936 {
937 VkResult result;
938
939 zero(device->meta_state.blit2d);
940
941 result = anv_CreateRenderPass(anv_device_to_handle(device),
942 &(VkRenderPassCreateInfo) {
943 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
944 .attachmentCount = 1,
945 .pAttachments = &(VkAttachmentDescription) {
946 .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
947 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
948 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
949 .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
950 .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
951 },
952 .subpassCount = 1,
953 .pSubpasses = &(VkSubpassDescription) {
954 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
955 .inputAttachmentCount = 0,
956 .colorAttachmentCount = 1,
957 .pColorAttachments = &(VkAttachmentReference) {
958 .attachment = 0,
959 .layout = VK_IMAGE_LAYOUT_GENERAL,
960 },
961 .pResolveAttachments = NULL,
962 .pDepthStencilAttachment = &(VkAttachmentReference) {
963 .attachment = VK_ATTACHMENT_UNUSED,
964 .layout = VK_IMAGE_LAYOUT_GENERAL,
965 },
966 .preserveAttachmentCount = 1,
967 .pPreserveAttachments = (uint32_t[]) { 0 },
968 },
969 .dependencyCount = 0,
970 }, &device->meta_state.alloc, &device->meta_state.blit2d.render_pass);
971 if (result != VK_SUCCESS)
972 goto fail;
973
974 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
975 &(VkDescriptorSetLayoutCreateInfo) {
976 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
977 .bindingCount = 1,
978 .pBindings = (VkDescriptorSetLayoutBinding[]) {
979 {
980 .binding = 0,
981 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
982 .descriptorCount = 1,
983 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
984 .pImmutableSamplers = NULL
985 },
986 }
987 }, &device->meta_state.alloc, &device->meta_state.blit2d.img_ds_layout);
988 if (result != VK_SUCCESS)
989 goto fail;
990
991 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
992 &(VkPipelineLayoutCreateInfo) {
993 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
994 .setLayoutCount = 1,
995 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout,
996 },
997 &device->meta_state.alloc, &device->meta_state.blit2d.img_p_layout);
998 if (result != VK_SUCCESS)
999 goto fail;
1000
1001 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
1002 &(VkDescriptorSetLayoutCreateInfo) {
1003 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1004 .bindingCount = 1,
1005 .pBindings = (VkDescriptorSetLayoutBinding[]) {
1006 {
1007 .binding = 0,
1008 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
1009 .descriptorCount = 1,
1010 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
1011 .pImmutableSamplers = NULL
1012 },
1013 }
1014 }, &device->meta_state.alloc, &device->meta_state.blit2d.buf_ds_layout);
1015 if (result != VK_SUCCESS)
1016 goto fail;
1017
1018 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
1019 &(VkPipelineLayoutCreateInfo) {
1020 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1021 .setLayoutCount = 1,
1022 .pSetLayouts = &device->meta_state.blit2d.buf_ds_layout,
1023 },
1024 &device->meta_state.alloc, &device->meta_state.blit2d.buf_p_layout);
1025 if (result != VK_SUCCESS)
1026 goto fail;
1027
1028 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
1029 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
1030 result = blit2d_init_pipeline(device, src, dst);
1031 if (result != VK_SUCCESS)
1032 goto fail;
1033 }
1034 }
1035
1036 return VK_SUCCESS;
1037
1038 fail:
1039 anv_device_finish_meta_blit2d_state(device);
1040 return result;
1041 }