b6e33c84fdd56801e90d190c717eda5e9226c4d2
[mesa.git] / src / intel / vulkan / anv_meta_blit2d.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_meta.h"
25 #include "nir/nir_builder.h"
26
27 enum blit2d_src_type {
28 /* We can make a "normal" image view of this source and just texture
29 * from it like you would in any other shader.
30 */
31 BLIT2D_SRC_TYPE_NORMAL,
32
33 /* The source is W-tiled and we need to detile manually in the shader.
34 * This will work on any platform but is needed for all W-tiled sources
35 * prior to Broadwell.
36 */
37 BLIT2D_SRC_TYPE_W_DETILE,
38
39 BLIT2D_NUM_SRC_TYPES,
40 };
41
42 enum blit2d_dst_type {
43 /* We can bind this destination as a "normal" render target and render
44 * to it just like you would anywhere else.
45 */
46 BLIT2D_DST_TYPE_NORMAL,
47
48 /* The destination is W-tiled and we need to do the tiling manually in
49 * the shader. This is required for all W-tiled destinations.
50 *
51 * Sky Lake adds a feature for providing explicit stencil values in the
52 * shader but mesa doesn't support that yet so neither do we.
53 */
54 BLIT2D_DST_TYPE_W_TILE,
55
56 /* The destination has a 3-channel RGB format. Since we can't render to
57 * non-power-of-two textures, we have to bind it as a red texture and
58 * select the correct component for the given red pixel in the shader.
59 */
60 BLIT2D_DST_TYPE_RGB,
61
62 BLIT2D_NUM_DST_TYPES,
63 };
64
65 static VkFormat
66 vk_format_for_size(int bs)
67 {
68 /* The choice of UNORM and UINT formats is very intentional here. Most of
69 * the time, we want to use a UINT format to avoid any rounding error in
70 * the blit. For stencil blits, R8_UINT is required by the hardware.
71 * (It's the only format allowed in conjunction with W-tiling.) Also we
72 * intentionally use the 4-channel formats whenever we can. This is so
73 * that, when we do a RGB <-> RGBX copy, the two formats will line up even
74 * though one of them is 3/4 the size of the other. The choice of UNORM
75 * vs. UINT is also very intentional because Haswell doesn't handle 8 or
76 * 16-bit RGB UINT formats at all so we have to use UNORM there.
77 * Fortunately, the only time we should ever use two different formats in
78 * the table below is for RGB -> RGBA blits and so we will never have any
79 * UNORM/UINT mismatch.
80 */
81 switch (bs) {
82 case 1: return VK_FORMAT_R8_UINT;
83 case 2: return VK_FORMAT_R8G8_UINT;
84 case 3: return VK_FORMAT_R8G8B8_UNORM;
85 case 4: return VK_FORMAT_R8G8B8A8_UNORM;
86 case 6: return VK_FORMAT_R16G16B16_UNORM;
87 case 8: return VK_FORMAT_R16G16B16A16_UNORM;
88 case 12: return VK_FORMAT_R32G32B32_UINT;
89 case 16: return VK_FORMAT_R32G32B32A32_UINT;
90 default:
91 unreachable("Invalid format block size");
92 }
93 }
94
95 static void
96 create_iview(struct anv_cmd_buffer *cmd_buffer,
97 struct anv_meta_blit2d_surf *surf,
98 uint64_t offset,
99 VkImageUsageFlags usage,
100 uint32_t width,
101 uint32_t height,
102 VkImage *img,
103 struct anv_image_view *iview)
104 {
105 const VkImageCreateInfo image_info = {
106 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
107 .imageType = VK_IMAGE_TYPE_2D,
108 .format = vk_format_for_size(surf->bs),
109 .extent = {
110 .width = width,
111 .height = height,
112 .depth = 1,
113 },
114 .mipLevels = 1,
115 .arrayLayers = 1,
116 .samples = 1,
117 .tiling = surf->tiling == ISL_TILING_LINEAR ?
118 VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL,
119 .usage = usage,
120 };
121
122 /* Create the VkImage that is bound to the surface's memory. */
123 anv_image_create(anv_device_to_handle(cmd_buffer->device),
124 &(struct anv_image_create_info) {
125 .vk_info = &image_info,
126 .isl_tiling_flags = 1 << surf->tiling,
127 .stride = surf->pitch,
128 }, &cmd_buffer->pool->alloc, img);
129
130 /* We could use a vk call to bind memory, but that would require
131 * creating a dummy memory object etc. so there's really no point.
132 */
133 anv_image_from_handle(*img)->bo = surf->bo;
134 anv_image_from_handle(*img)->offset = surf->base_offset + offset;
135
136 anv_image_view_init(iview, cmd_buffer->device,
137 &(VkImageViewCreateInfo) {
138 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
139 .image = *img,
140 .viewType = VK_IMAGE_VIEW_TYPE_2D,
141 .format = image_info.format,
142 .subresourceRange = {
143 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
144 .baseMipLevel = 0,
145 .levelCount = 1,
146 .baseArrayLayer = 0,
147 .layerCount = 1
148 },
149 }, cmd_buffer, usage);
150 }
151
152 struct blit2d_src_temps {
153 VkImage image;
154 struct anv_image_view iview;
155
156 struct anv_buffer buffer;
157 struct anv_buffer_view bview;
158
159 VkDescriptorPool desc_pool;
160 VkDescriptorSet set;
161 };
162
163 static void
164 blit2d_bind_src(struct anv_cmd_buffer *cmd_buffer,
165 struct anv_meta_blit2d_surf *src,
166 enum blit2d_src_type src_type,
167 struct anv_meta_blit2d_rect *rect,
168 struct blit2d_src_temps *tmp)
169 {
170 struct anv_device *device = cmd_buffer->device;
171 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
172
173 if (src_type == BLIT2D_SRC_TYPE_NORMAL) {
174 uint32_t offset = 0;
175 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
176 src->tiling, src->bs, src->pitch,
177 rect->src_x, rect->src_y,
178 &offset, &rect->src_x, &rect->src_y);
179
180 create_iview(cmd_buffer, src, offset, VK_IMAGE_USAGE_SAMPLED_BIT,
181 rect->src_x + rect->width, rect->src_y + rect->height,
182 &tmp->image, &tmp->iview);
183
184 anv_CreateDescriptorPool(vk_device,
185 &(const VkDescriptorPoolCreateInfo) {
186 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
187 .pNext = NULL,
188 .flags = 0,
189 .maxSets = 1,
190 .poolSizeCount = 1,
191 .pPoolSizes = (VkDescriptorPoolSize[]) {
192 {
193 .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
194 .descriptorCount = 1
195 },
196 }
197 }, &cmd_buffer->pool->alloc, &tmp->desc_pool);
198
199 anv_AllocateDescriptorSets(vk_device,
200 &(VkDescriptorSetAllocateInfo) {
201 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
202 .descriptorPool = tmp->desc_pool,
203 .descriptorSetCount = 1,
204 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout
205 }, &tmp->set);
206
207 anv_UpdateDescriptorSets(vk_device,
208 1, /* writeCount */
209 (VkWriteDescriptorSet[]) {
210 {
211 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
212 .dstSet = tmp->set,
213 .dstBinding = 0,
214 .dstArrayElement = 0,
215 .descriptorCount = 1,
216 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
217 .pImageInfo = (VkDescriptorImageInfo[]) {
218 {
219 .sampler = NULL,
220 .imageView = anv_image_view_to_handle(&tmp->iview),
221 .imageLayout = VK_IMAGE_LAYOUT_GENERAL,
222 },
223 }
224 }
225 }, 0, NULL);
226
227 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
228 VK_PIPELINE_BIND_POINT_GRAPHICS,
229 device->meta_state.blit2d.img_p_layout, 0, 1,
230 &tmp->set, 0, NULL);
231 } else {
232 assert(src_type == BLIT2D_SRC_TYPE_W_DETILE);
233 assert(src->tiling == ISL_TILING_W);
234 assert(src->bs == 1);
235
236 uint32_t tile_offset = 0;
237 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
238 ISL_TILING_W, 1, src->pitch,
239 rect->src_x, rect->src_y,
240 &tile_offset,
241 &rect->src_x, &rect->src_y);
242
243 tmp->buffer = (struct anv_buffer) {
244 .device = device,
245 .size = align_u32(rect->src_y + rect->height, 64) * src->pitch,
246 .usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT,
247 .bo = src->bo,
248 .offset = src->base_offset + tile_offset,
249 };
250
251 anv_buffer_view_init(&tmp->bview, device,
252 &(VkBufferViewCreateInfo) {
253 .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
254 .buffer = anv_buffer_to_handle(&tmp->buffer),
255 .format = VK_FORMAT_R8_UINT,
256 .offset = 0,
257 .range = VK_WHOLE_SIZE,
258 }, cmd_buffer);
259
260 anv_CreateDescriptorPool(vk_device,
261 &(const VkDescriptorPoolCreateInfo) {
262 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
263 .pNext = NULL,
264 .flags = 0,
265 .maxSets = 1,
266 .poolSizeCount = 1,
267 .pPoolSizes = (VkDescriptorPoolSize[]) {
268 {
269 .type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
270 .descriptorCount = 1
271 },
272 }
273 }, &cmd_buffer->pool->alloc, &tmp->desc_pool);
274
275 anv_AllocateDescriptorSets(vk_device,
276 &(VkDescriptorSetAllocateInfo) {
277 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
278 .descriptorPool = tmp->desc_pool,
279 .descriptorSetCount = 1,
280 .pSetLayouts = &device->meta_state.blit2d.buf_ds_layout
281 }, &tmp->set);
282
283 anv_UpdateDescriptorSets(vk_device,
284 1, /* writeCount */
285 (VkWriteDescriptorSet[]) {
286 {
287 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
288 .dstSet = tmp->set,
289 .dstBinding = 0,
290 .dstArrayElement = 0,
291 .descriptorCount = 1,
292 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
293 .pTexelBufferView = (VkBufferView[]) {
294 anv_buffer_view_to_handle(&tmp->bview),
295 },
296 }
297 }, 0, NULL);
298
299 anv_CmdBindDescriptorSets(anv_cmd_buffer_to_handle(cmd_buffer),
300 VK_PIPELINE_BIND_POINT_GRAPHICS,
301 device->meta_state.blit2d.buf_p_layout, 0, 1,
302 &tmp->set, 0, NULL);
303 }
304 }
305
306 static void
307 blit2d_unbind_src(struct anv_cmd_buffer *cmd_buffer,
308 enum blit2d_src_type src_type,
309 struct blit2d_src_temps *tmp)
310 {
311 anv_DestroyDescriptorPool(anv_device_to_handle(cmd_buffer->device),
312 tmp->desc_pool, &cmd_buffer->pool->alloc);
313 if (src_type == BLIT2D_SRC_TYPE_NORMAL) {
314 anv_DestroyImage(anv_device_to_handle(cmd_buffer->device),
315 tmp->image, &cmd_buffer->pool->alloc);
316 }
317 }
318
319 struct blit2d_dst_temps {
320 VkImage image;
321 struct anv_image_view iview;
322 VkFramebuffer fb;
323 };
324
325 static void
326 blit2d_bind_dst(struct anv_cmd_buffer *cmd_buffer,
327 struct anv_meta_blit2d_surf *dst,
328 uint64_t offset,
329 uint32_t width,
330 uint32_t height,
331 struct blit2d_dst_temps *tmp)
332 {
333 create_iview(cmd_buffer, dst, offset, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
334 width, height, &tmp->image, &tmp->iview);
335
336 anv_CreateFramebuffer(anv_device_to_handle(cmd_buffer->device),
337 &(VkFramebufferCreateInfo) {
338 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
339 .attachmentCount = 1,
340 .pAttachments = (VkImageView[]) {
341 anv_image_view_to_handle(&tmp->iview),
342 },
343 .width = width,
344 .height = height,
345 .layers = 1
346 }, &cmd_buffer->pool->alloc, &tmp->fb);
347
348
349 anv_CmdSetViewport(anv_cmd_buffer_to_handle(cmd_buffer), 0, 1,
350 &(VkViewport) {
351 .x = 0.0f,
352 .y = 0.0f,
353 .width = width,
354 .height = height,
355 .minDepth = 0.0f,
356 .maxDepth = 1.0f,
357 });
358 }
359
360 static void
361 blit2d_unbind_dst(struct anv_cmd_buffer *cmd_buffer,
362 struct blit2d_dst_temps *tmp)
363 {
364 VkDevice vk_device = anv_device_to_handle(cmd_buffer->device);
365 anv_DestroyFramebuffer(vk_device, tmp->fb, &cmd_buffer->pool->alloc);
366 anv_DestroyImage(vk_device, tmp->image, &cmd_buffer->pool->alloc);
367 }
368
369 void
370 anv_meta_end_blit2d(struct anv_cmd_buffer *cmd_buffer,
371 struct anv_meta_saved_state *save)
372 {
373 anv_meta_restore(save, cmd_buffer);
374 }
375
376 void
377 anv_meta_begin_blit2d(struct anv_cmd_buffer *cmd_buffer,
378 struct anv_meta_saved_state *save)
379 {
380 anv_meta_save(save, cmd_buffer,
381 (1 << VK_DYNAMIC_STATE_VIEWPORT));
382 }
383
384 static void
385 bind_pipeline(struct anv_cmd_buffer *cmd_buffer,
386 enum blit2d_src_type src_type,
387 enum blit2d_dst_type dst_type)
388 {
389 VkPipeline pipeline =
390 cmd_buffer->device->meta_state.blit2d.pipelines[src_type][dst_type];
391
392 if (cmd_buffer->state.pipeline != anv_pipeline_from_handle(pipeline)) {
393 anv_CmdBindPipeline(anv_cmd_buffer_to_handle(cmd_buffer),
394 VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
395 }
396 }
397
398 static void
399 anv_meta_blit2d_normal_dst(struct anv_cmd_buffer *cmd_buffer,
400 struct anv_meta_blit2d_surf *src,
401 enum blit2d_src_type src_type,
402 struct anv_meta_blit2d_surf *dst,
403 unsigned num_rects,
404 struct anv_meta_blit2d_rect *rects)
405 {
406 struct anv_device *device = cmd_buffer->device;
407
408 for (unsigned r = 0; r < num_rects; ++r) {
409 struct blit2d_src_temps src_temps;
410 blit2d_bind_src(cmd_buffer, src, src_type, &rects[r], &src_temps);
411
412 uint32_t offset = 0;
413 isl_tiling_get_intratile_offset_el(&cmd_buffer->device->isl_dev,
414 dst->tiling, dst->bs, dst->pitch,
415 rects[r].dst_x, rects[r].dst_y,
416 &offset,
417 &rects[r].dst_x, &rects[r].dst_y);
418
419 struct blit2d_dst_temps dst_temps;
420 blit2d_bind_dst(cmd_buffer, dst, offset, rects[r].dst_x + rects[r].width,
421 rects[r].dst_y + rects[r].height, &dst_temps);
422
423 struct blit_vb_data {
424 float pos[2];
425 float tex_coord[3];
426 } *vb_data;
427
428 unsigned vb_size = sizeof(struct anv_vue_header) + 3 * sizeof(*vb_data);
429
430 struct anv_state vb_state =
431 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, vb_size, 16);
432 memset(vb_state.map, 0, sizeof(struct anv_vue_header));
433 vb_data = vb_state.map + sizeof(struct anv_vue_header);
434
435 vb_data[0] = (struct blit_vb_data) {
436 .pos = {
437 rects[r].dst_x + rects[r].width,
438 rects[r].dst_y + rects[r].height,
439 },
440 .tex_coord = {
441 rects[r].src_x + rects[r].width,
442 rects[r].src_y + rects[r].height,
443 src->pitch,
444 },
445 };
446
447 vb_data[1] = (struct blit_vb_data) {
448 .pos = {
449 rects[r].dst_x,
450 rects[r].dst_y + rects[r].height,
451 },
452 .tex_coord = {
453 rects[r].src_x,
454 rects[r].src_y + rects[r].height,
455 src->pitch,
456 },
457 };
458
459 vb_data[2] = (struct blit_vb_data) {
460 .pos = {
461 rects[r].dst_x,
462 rects[r].dst_y,
463 },
464 .tex_coord = {
465 rects[r].src_x,
466 rects[r].src_y,
467 src->pitch,
468 },
469 };
470
471 anv_state_clflush(vb_state);
472
473 struct anv_buffer vertex_buffer = {
474 .device = device,
475 .size = vb_size,
476 .bo = &device->dynamic_state_block_pool.bo,
477 .offset = vb_state.offset,
478 };
479
480 anv_CmdBindVertexBuffers(anv_cmd_buffer_to_handle(cmd_buffer), 0, 2,
481 (VkBuffer[]) {
482 anv_buffer_to_handle(&vertex_buffer),
483 anv_buffer_to_handle(&vertex_buffer)
484 },
485 (VkDeviceSize[]) {
486 0,
487 sizeof(struct anv_vue_header),
488 });
489
490 ANV_CALL(CmdBeginRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer),
491 &(VkRenderPassBeginInfo) {
492 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
493 .renderPass = device->meta_state.blit2d.render_pass,
494 .framebuffer = dst_temps.fb,
495 .renderArea = {
496 .offset = { rects[r].dst_x, rects[r].dst_y, },
497 .extent = { rects[r].width, rects[r].height },
498 },
499 .clearValueCount = 0,
500 .pClearValues = NULL,
501 }, VK_SUBPASS_CONTENTS_INLINE);
502
503 bind_pipeline(cmd_buffer, src_type, BLIT2D_DST_TYPE_NORMAL);
504
505 ANV_CALL(CmdDraw)(anv_cmd_buffer_to_handle(cmd_buffer), 3, 1, 0, 0);
506
507 ANV_CALL(CmdEndRenderPass)(anv_cmd_buffer_to_handle(cmd_buffer));
508
509 /* At the point where we emit the draw call, all data from the
510 * descriptor sets, etc. has been used. We are free to delete it.
511 */
512 blit2d_unbind_src(cmd_buffer, src_type, &src_temps);
513 blit2d_unbind_dst(cmd_buffer, &dst_temps);
514 }
515 }
516
517 void
518 anv_meta_blit2d(struct anv_cmd_buffer *cmd_buffer,
519 struct anv_meta_blit2d_surf *src,
520 struct anv_meta_blit2d_surf *dst,
521 unsigned num_rects,
522 struct anv_meta_blit2d_rect *rects)
523 {
524 enum blit2d_src_type src_type;
525 if (src->tiling == ISL_TILING_W && cmd_buffer->device->info.gen < 8) {
526 src_type = BLIT2D_SRC_TYPE_W_DETILE;
527 } else {
528 src_type = BLIT2D_SRC_TYPE_NORMAL;
529 }
530
531 if (dst->tiling == ISL_TILING_W) {
532 assert(dst->bs == 1);
533 anv_finishme("Blitting to w-tiled destinations not yet supported");
534 return;
535 } else if (dst->bs % 3 == 0) {
536 anv_finishme("Blitting to RGB destinations not yet supported");
537 return;
538 } else {
539 assert(util_is_power_of_two(dst->bs));
540 anv_meta_blit2d_normal_dst(cmd_buffer, src, src_type, dst,
541 num_rects, rects);
542 }
543 }
544
545 static nir_shader *
546 build_nir_vertex_shader(void)
547 {
548 const struct glsl_type *vec4 = glsl_vec4_type();
549 nir_builder b;
550
551 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
552 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit_vs");
553
554 nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
555 vec4, "a_pos");
556 pos_in->data.location = VERT_ATTRIB_GENERIC0;
557 nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
558 vec4, "gl_Position");
559 pos_out->data.location = VARYING_SLOT_POS;
560 nir_copy_var(&b, pos_out, pos_in);
561
562 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
563 vec4, "a_tex_pos");
564 tex_pos_in->data.location = VERT_ATTRIB_GENERIC1;
565 nir_variable *tex_pos_out = nir_variable_create(b.shader, nir_var_shader_out,
566 vec4, "v_tex_pos");
567 tex_pos_out->data.location = VARYING_SLOT_VAR0;
568 tex_pos_out->data.interpolation = INTERP_QUALIFIER_SMOOTH;
569 nir_copy_var(&b, tex_pos_out, tex_pos_in);
570
571 nir_variable *other_in = nir_variable_create(b.shader, nir_var_shader_in,
572 vec4, "a_other");
573 other_in->data.location = VERT_ATTRIB_GENERIC2;
574 nir_variable *other_out = nir_variable_create(b.shader, nir_var_shader_out,
575 vec4, "v_other");
576 other_out->data.location = VARYING_SLOT_VAR1;
577 other_out->data.interpolation = INTERP_QUALIFIER_FLAT;
578 nir_copy_var(&b, other_out, other_in);
579
580 return b.shader;
581 }
582
583 typedef nir_ssa_def* (*texel_fetch_build_func)(struct nir_builder *,
584 struct anv_device *,
585 nir_ssa_def *, nir_ssa_def *);
586
587 static nir_ssa_def *
588 nir_copy_bits(struct nir_builder *b, nir_ssa_def *dst, unsigned dst_offset,
589 nir_ssa_def *src, unsigned src_offset, unsigned num_bits)
590 {
591 unsigned src_mask = (~1u >> (32 - num_bits)) << src_offset;
592 nir_ssa_def *masked = nir_iand(b, src, nir_imm_int(b, src_mask));
593
594 nir_ssa_def *shifted;
595 if (dst_offset > src_offset) {
596 shifted = nir_ishl(b, masked, nir_imm_int(b, dst_offset - src_offset));
597 } else if (dst_offset < src_offset) {
598 shifted = nir_ushr(b, masked, nir_imm_int(b, src_offset - dst_offset));
599 } else {
600 assert(dst_offset == src_offset);
601 shifted = masked;
602 }
603
604 return nir_ior(b, dst, shifted);
605 }
606
607 static nir_ssa_def *
608 build_nir_w_tiled_fetch(struct nir_builder *b, struct anv_device *device,
609 nir_ssa_def *tex_pos, nir_ssa_def *tex_pitch)
610 {
611 nir_ssa_def *x = nir_channel(b, tex_pos, 0);
612 nir_ssa_def *y = nir_channel(b, tex_pos, 1);
613
614 /* First, compute the block-aligned offset */
615 nir_ssa_def *x_major = nir_ushr(b, x, nir_imm_int(b, 6));
616 nir_ssa_def *y_major = nir_ushr(b, y, nir_imm_int(b, 6));
617 nir_ssa_def *offset =
618 nir_iadd(b, nir_imul(b, y_major,
619 nir_imul(b, tex_pitch, nir_imm_int(b, 64))),
620 nir_imul(b, x_major, nir_imm_int(b, 4096)));
621
622 /* Compute the bottom 12 bits of the offset */
623 offset = nir_copy_bits(b, offset, 0, x, 0, 1);
624 offset = nir_copy_bits(b, offset, 1, y, 0, 1);
625 offset = nir_copy_bits(b, offset, 2, x, 1, 1);
626 offset = nir_copy_bits(b, offset, 3, y, 1, 1);
627 offset = nir_copy_bits(b, offset, 4, x, 2, 1);
628 offset = nir_copy_bits(b, offset, 5, y, 2, 4);
629 offset = nir_copy_bits(b, offset, 9, x, 3, 3);
630
631 if (device->isl_dev.has_bit6_swizzling) {
632 offset = nir_ixor(b, offset,
633 nir_ushr(b, nir_iand(b, offset, nir_imm_int(b, 0x0200)),
634 nir_imm_int(b, 3)));
635 }
636
637 const struct glsl_type *sampler_type =
638 glsl_sampler_type(GLSL_SAMPLER_DIM_BUF, false, false, GLSL_TYPE_FLOAT);
639 nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
640 sampler_type, "s_tex");
641 sampler->data.descriptor_set = 0;
642 sampler->data.binding = 0;
643
644 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1);
645 tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
646 tex->op = nir_texop_txf;
647 tex->src[0].src_type = nir_tex_src_coord;
648 tex->src[0].src = nir_src_for_ssa(offset);
649 tex->dest_type = nir_type_float; /* TODO */
650 tex->is_array = false;
651 tex->coord_components = 1;
652 tex->texture = nir_deref_var_create(tex, sampler);
653 tex->sampler = NULL;
654
655 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
656 nir_builder_instr_insert(b, &tex->instr);
657
658 return &tex->dest.ssa;
659 }
660
661 static nir_ssa_def *
662 build_nir_texel_fetch(struct nir_builder *b, struct anv_device *device,
663 nir_ssa_def *tex_pos, nir_ssa_def *tex_pitch)
664 {
665 const struct glsl_type *sampler_type =
666 glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false, GLSL_TYPE_FLOAT);
667 nir_variable *sampler = nir_variable_create(b->shader, nir_var_uniform,
668 sampler_type, "s_tex");
669 sampler->data.descriptor_set = 0;
670 sampler->data.binding = 0;
671
672 nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
673 tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
674 tex->op = nir_texop_txf;
675 tex->src[0].src_type = nir_tex_src_coord;
676 tex->src[0].src = nir_src_for_ssa(tex_pos);
677 tex->src[1].src_type = nir_tex_src_lod;
678 tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0));
679 tex->dest_type = nir_type_float; /* TODO */
680 tex->is_array = false;
681 tex->coord_components = 2;
682 tex->texture = nir_deref_var_create(tex, sampler);
683 tex->sampler = NULL;
684
685 nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
686 nir_builder_instr_insert(b, &tex->instr);
687
688 return &tex->dest.ssa;
689 }
690
691 static nir_shader *
692 build_nir_copy_fragment_shader(struct anv_device *device,
693 texel_fetch_build_func txf_func)
694 {
695 const struct glsl_type *vec4 = glsl_vec4_type();
696 const struct glsl_type *vec3 = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
697 nir_builder b;
698
699 nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
700 b.shader->info.name = ralloc_strdup(b.shader, "meta_blit2d_fs");
701
702 nir_variable *tex_pos_in = nir_variable_create(b.shader, nir_var_shader_in,
703 vec3, "v_tex_pos");
704 tex_pos_in->data.location = VARYING_SLOT_VAR0;
705
706 nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
707 vec4, "f_color");
708 color_out->data.location = FRAG_RESULT_DATA0;
709
710 nir_ssa_def *pos_int = nir_f2i(&b, nir_load_var(&b, tex_pos_in));
711 unsigned swiz[4] = { 0, 1 };
712 nir_ssa_def *tex_pos = nir_swizzle(&b, pos_int, swiz, 2, false);
713 nir_ssa_def *tex_pitch = nir_channel(&b, pos_int, 2);
714
715 nir_ssa_def *color = txf_func(&b, device, tex_pos, tex_pitch);
716 nir_store_var(&b, color_out, color, 0xf);
717
718 return b.shader;
719 }
720
721 void
722 anv_device_finish_meta_blit2d_state(struct anv_device *device)
723 {
724 if (device->meta_state.blit2d.render_pass) {
725 anv_DestroyRenderPass(anv_device_to_handle(device),
726 device->meta_state.blit2d.render_pass,
727 &device->meta_state.alloc);
728 }
729
730 if (device->meta_state.blit2d.img_p_layout) {
731 anv_DestroyPipelineLayout(anv_device_to_handle(device),
732 device->meta_state.blit2d.img_p_layout,
733 &device->meta_state.alloc);
734 }
735
736 if (device->meta_state.blit2d.img_ds_layout) {
737 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
738 device->meta_state.blit2d.img_ds_layout,
739 &device->meta_state.alloc);
740 }
741
742 if (device->meta_state.blit2d.buf_p_layout) {
743 anv_DestroyPipelineLayout(anv_device_to_handle(device),
744 device->meta_state.blit2d.buf_p_layout,
745 &device->meta_state.alloc);
746 }
747
748 if (device->meta_state.blit2d.buf_ds_layout) {
749 anv_DestroyDescriptorSetLayout(anv_device_to_handle(device),
750 device->meta_state.blit2d.buf_ds_layout,
751 &device->meta_state.alloc);
752 }
753
754 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
755 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
756 if (device->meta_state.blit2d.pipelines[src][dst]) {
757 anv_DestroyPipeline(anv_device_to_handle(device),
758 device->meta_state.blit2d.pipelines[src][dst],
759 &device->meta_state.alloc);
760 }
761 }
762 }
763 }
764
765 static VkResult
766 blit2d_init_pipeline(struct anv_device *device,
767 enum blit2d_src_type src_type,
768 enum blit2d_dst_type dst_type)
769 {
770 VkResult result;
771
772 texel_fetch_build_func src_func;
773 switch (src_type) {
774 case BLIT2D_SRC_TYPE_NORMAL:
775 src_func = build_nir_texel_fetch;
776 break;
777 case BLIT2D_SRC_TYPE_W_DETILE:
778 src_func = build_nir_w_tiled_fetch;
779 break;
780 default:
781 unreachable("Invalid blit2d source type");
782 }
783
784 struct anv_shader_module fs = { .nir = NULL };
785 switch (dst_type) {
786 case BLIT2D_DST_TYPE_NORMAL:
787 fs.nir = build_nir_copy_fragment_shader(device, src_func);
788 break;
789 case BLIT2D_DST_TYPE_W_TILE:
790 case BLIT2D_DST_TYPE_RGB:
791 /* Not yet supported */
792 default:
793 return VK_SUCCESS;
794 }
795
796 /* We don't use a vertex shader for blitting, but instead build and pass
797 * the VUEs directly to the rasterization backend. However, we do need
798 * to provide GLSL source for the vertex shader so that the compiler
799 * does not dead-code our inputs.
800 */
801 struct anv_shader_module vs = {
802 .nir = build_nir_vertex_shader(),
803 };
804
805 VkPipelineVertexInputStateCreateInfo vi_create_info = {
806 .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
807 .vertexBindingDescriptionCount = 2,
808 .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) {
809 {
810 .binding = 0,
811 .stride = 0,
812 .inputRate = VK_VERTEX_INPUT_RATE_INSTANCE
813 },
814 {
815 .binding = 1,
816 .stride = 5 * sizeof(float),
817 .inputRate = VK_VERTEX_INPUT_RATE_VERTEX
818 },
819 },
820 .vertexAttributeDescriptionCount = 3,
821 .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) {
822 {
823 /* VUE Header */
824 .location = 0,
825 .binding = 0,
826 .format = VK_FORMAT_R32G32B32A32_UINT,
827 .offset = 0
828 },
829 {
830 /* Position */
831 .location = 1,
832 .binding = 1,
833 .format = VK_FORMAT_R32G32_SFLOAT,
834 .offset = 0
835 },
836 {
837 /* Texture Coordinate */
838 .location = 2,
839 .binding = 1,
840 .format = VK_FORMAT_R32G32B32_SFLOAT,
841 .offset = 8
842 }
843 }
844 };
845
846 VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
847 {
848 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
849 .stage = VK_SHADER_STAGE_VERTEX_BIT,
850 .module = anv_shader_module_to_handle(&vs),
851 .pName = "main",
852 .pSpecializationInfo = NULL
853 }, {
854 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
855 .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
856 .module = anv_shader_module_to_handle(&fs),
857 .pName = "main",
858 .pSpecializationInfo = NULL
859 },
860 };
861
862 const VkGraphicsPipelineCreateInfo vk_pipeline_info = {
863 .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
864 .stageCount = ARRAY_SIZE(pipeline_shader_stages),
865 .pStages = pipeline_shader_stages,
866 .pVertexInputState = &vi_create_info,
867 .pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo) {
868 .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
869 .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
870 .primitiveRestartEnable = false,
871 },
872 .pViewportState = &(VkPipelineViewportStateCreateInfo) {
873 .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
874 .viewportCount = 1,
875 .scissorCount = 1,
876 },
877 .pRasterizationState = &(VkPipelineRasterizationStateCreateInfo) {
878 .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
879 .rasterizerDiscardEnable = false,
880 .polygonMode = VK_POLYGON_MODE_FILL,
881 .cullMode = VK_CULL_MODE_NONE,
882 .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE
883 },
884 .pMultisampleState = &(VkPipelineMultisampleStateCreateInfo) {
885 .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
886 .rasterizationSamples = 1,
887 .sampleShadingEnable = false,
888 .pSampleMask = (VkSampleMask[]) { UINT32_MAX },
889 },
890 .pColorBlendState = &(VkPipelineColorBlendStateCreateInfo) {
891 .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
892 .attachmentCount = 1,
893 .pAttachments = (VkPipelineColorBlendAttachmentState []) {
894 { .colorWriteMask =
895 VK_COLOR_COMPONENT_A_BIT |
896 VK_COLOR_COMPONENT_R_BIT |
897 VK_COLOR_COMPONENT_G_BIT |
898 VK_COLOR_COMPONENT_B_BIT },
899 }
900 },
901 .pDynamicState = &(VkPipelineDynamicStateCreateInfo) {
902 .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
903 .dynamicStateCount = 9,
904 .pDynamicStates = (VkDynamicState[]) {
905 VK_DYNAMIC_STATE_VIEWPORT,
906 VK_DYNAMIC_STATE_SCISSOR,
907 VK_DYNAMIC_STATE_LINE_WIDTH,
908 VK_DYNAMIC_STATE_DEPTH_BIAS,
909 VK_DYNAMIC_STATE_BLEND_CONSTANTS,
910 VK_DYNAMIC_STATE_DEPTH_BOUNDS,
911 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
912 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
913 VK_DYNAMIC_STATE_STENCIL_REFERENCE,
914 },
915 },
916 .flags = 0,
917 .layout = device->meta_state.blit2d.img_p_layout,
918 .renderPass = device->meta_state.blit2d.render_pass,
919 .subpass = 0,
920 };
921
922 const struct anv_graphics_pipeline_create_info anv_pipeline_info = {
923 .color_attachment_count = -1,
924 .use_repclear = false,
925 .disable_viewport = true,
926 .disable_scissor = true,
927 .disable_vs = true,
928 .use_rectlist = true
929 };
930
931 result = anv_graphics_pipeline_create(anv_device_to_handle(device),
932 VK_NULL_HANDLE,
933 &vk_pipeline_info, &anv_pipeline_info,
934 &device->meta_state.alloc,
935 &device->meta_state.blit2d.pipelines[src_type][dst_type]);
936
937 ralloc_free(vs.nir);
938 ralloc_free(fs.nir);
939
940 return result;
941 }
942
943 VkResult
944 anv_device_init_meta_blit2d_state(struct anv_device *device)
945 {
946 VkResult result;
947
948 zero(device->meta_state.blit2d);
949
950 result = anv_CreateRenderPass(anv_device_to_handle(device),
951 &(VkRenderPassCreateInfo) {
952 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
953 .attachmentCount = 1,
954 .pAttachments = &(VkAttachmentDescription) {
955 .format = VK_FORMAT_UNDEFINED, /* Our shaders don't care */
956 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
957 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
958 .initialLayout = VK_IMAGE_LAYOUT_GENERAL,
959 .finalLayout = VK_IMAGE_LAYOUT_GENERAL,
960 },
961 .subpassCount = 1,
962 .pSubpasses = &(VkSubpassDescription) {
963 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
964 .inputAttachmentCount = 0,
965 .colorAttachmentCount = 1,
966 .pColorAttachments = &(VkAttachmentReference) {
967 .attachment = 0,
968 .layout = VK_IMAGE_LAYOUT_GENERAL,
969 },
970 .pResolveAttachments = NULL,
971 .pDepthStencilAttachment = &(VkAttachmentReference) {
972 .attachment = VK_ATTACHMENT_UNUSED,
973 .layout = VK_IMAGE_LAYOUT_GENERAL,
974 },
975 .preserveAttachmentCount = 1,
976 .pPreserveAttachments = (uint32_t[]) { 0 },
977 },
978 .dependencyCount = 0,
979 }, &device->meta_state.alloc, &device->meta_state.blit2d.render_pass);
980 if (result != VK_SUCCESS)
981 goto fail;
982
983 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
984 &(VkDescriptorSetLayoutCreateInfo) {
985 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
986 .bindingCount = 1,
987 .pBindings = (VkDescriptorSetLayoutBinding[]) {
988 {
989 .binding = 0,
990 .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
991 .descriptorCount = 1,
992 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
993 .pImmutableSamplers = NULL
994 },
995 }
996 }, &device->meta_state.alloc, &device->meta_state.blit2d.img_ds_layout);
997 if (result != VK_SUCCESS)
998 goto fail;
999
1000 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
1001 &(VkPipelineLayoutCreateInfo) {
1002 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1003 .setLayoutCount = 1,
1004 .pSetLayouts = &device->meta_state.blit2d.img_ds_layout,
1005 },
1006 &device->meta_state.alloc, &device->meta_state.blit2d.img_p_layout);
1007 if (result != VK_SUCCESS)
1008 goto fail;
1009
1010 result = anv_CreateDescriptorSetLayout(anv_device_to_handle(device),
1011 &(VkDescriptorSetLayoutCreateInfo) {
1012 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1013 .bindingCount = 1,
1014 .pBindings = (VkDescriptorSetLayoutBinding[]) {
1015 {
1016 .binding = 0,
1017 .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
1018 .descriptorCount = 1,
1019 .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
1020 .pImmutableSamplers = NULL
1021 },
1022 }
1023 }, &device->meta_state.alloc, &device->meta_state.blit2d.buf_ds_layout);
1024 if (result != VK_SUCCESS)
1025 goto fail;
1026
1027 result = anv_CreatePipelineLayout(anv_device_to_handle(device),
1028 &(VkPipelineLayoutCreateInfo) {
1029 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1030 .setLayoutCount = 1,
1031 .pSetLayouts = &device->meta_state.blit2d.buf_ds_layout,
1032 },
1033 &device->meta_state.alloc, &device->meta_state.blit2d.buf_p_layout);
1034 if (result != VK_SUCCESS)
1035 goto fail;
1036
1037 for (unsigned src = 0; src < BLIT2D_NUM_SRC_TYPES; src++) {
1038 for (unsigned dst = 0; dst < BLIT2D_NUM_DST_TYPES; dst++) {
1039 result = blit2d_init_pipeline(device, src, dst);
1040 if (result != VK_SUCCESS)
1041 goto fail;
1042 }
1043 }
1044
1045 return VK_SUCCESS;
1046
1047 fail:
1048 anv_device_finish_meta_blit2d_state(device);
1049 return result;
1050 }