anv: Use blorp for ClearAttachments
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out =
48 anv_shader_bin_get_prog_data(bin);
49
50 return true;
51 }
52
53 static void
54 upload_blorp_shader(struct blorp_context *blorp,
55 const void *key, uint32_t key_size,
56 const void *kernel, uint32_t kernel_size,
57 const void *prog_data, uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 /* The cache already has a reference and it's not going anywhere so there
76 * is no need to hold a second reference.
77 */
78 anv_shader_bin_unref(device, bin);
79
80 *kernel_out = bin->kernel.offset;
81 *(const struct brw_stage_prog_data **)prog_data_out =
82 anv_shader_bin_get_prog_data(bin);
83 }
84
85 void
86 anv_device_init_blorp(struct anv_device *device)
87 {
88 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
89 blorp_init(&device->blorp, device, &device->isl_dev);
90 device->blorp.compiler = device->instance->physicalDevice.compiler;
91 device->blorp.mocs.tex = device->default_mocs;
92 device->blorp.mocs.rb = device->default_mocs;
93 device->blorp.mocs.vb = device->default_mocs;
94 device->blorp.lookup_shader = lookup_blorp_shader;
95 device->blorp.upload_shader = upload_blorp_shader;
96 switch (device->info.gen) {
97 case 7:
98 if (device->info.is_haswell) {
99 device->blorp.exec = gen75_blorp_exec;
100 } else {
101 device->blorp.exec = gen7_blorp_exec;
102 }
103 break;
104 case 8:
105 device->blorp.exec = gen8_blorp_exec;
106 break;
107 case 9:
108 device->blorp.exec = gen9_blorp_exec;
109 break;
110 default:
111 unreachable("Unknown hardware generation");
112 }
113 }
114
115 void
116 anv_device_finish_blorp(struct anv_device *device)
117 {
118 blorp_finish(&device->blorp);
119 anv_pipeline_cache_finish(&device->blorp_shader_cache);
120 }
121
122 static void
123 get_blorp_surf_for_anv_buffer(struct anv_device *device,
124 struct anv_buffer *buffer, uint64_t offset,
125 uint32_t width, uint32_t height,
126 uint32_t row_pitch, enum isl_format format,
127 struct blorp_surf *blorp_surf,
128 struct isl_surf *isl_surf)
129 {
130 *blorp_surf = (struct blorp_surf) {
131 .surf = isl_surf,
132 .addr = {
133 .buffer = buffer->bo,
134 .offset = buffer->offset + offset,
135 },
136 };
137
138 isl_surf_init(&device->isl_dev, isl_surf,
139 .dim = ISL_SURF_DIM_2D,
140 .format = format,
141 .width = width,
142 .height = height,
143 .depth = 1,
144 .levels = 1,
145 .array_len = 1,
146 .samples = 1,
147 .min_pitch = row_pitch,
148 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
149 ISL_SURF_USAGE_RENDER_TARGET_BIT,
150 .tiling_flags = ISL_TILING_LINEAR_BIT);
151 assert(isl_surf->row_pitch == row_pitch);
152 }
153
154 static void
155 get_blorp_surf_for_anv_image(const struct anv_image *image,
156 VkImageAspectFlags aspect,
157 struct blorp_surf *blorp_surf)
158 {
159 const struct anv_surface *surface =
160 anv_image_get_surface_for_aspect_mask(image, aspect);
161
162 *blorp_surf = (struct blorp_surf) {
163 .surf = &surface->isl,
164 .addr = {
165 .buffer = image->bo,
166 .offset = image->offset + surface->offset,
167 },
168 };
169 }
170
171 void anv_CmdCopyImage(
172 VkCommandBuffer commandBuffer,
173 VkImage srcImage,
174 VkImageLayout srcImageLayout,
175 VkImage dstImage,
176 VkImageLayout dstImageLayout,
177 uint32_t regionCount,
178 const VkImageCopy* pRegions)
179 {
180 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
181 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
182 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
183
184 struct blorp_batch batch;
185 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
186
187 for (unsigned r = 0; r < regionCount; r++) {
188 VkOffset3D srcOffset =
189 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
190 VkOffset3D dstOffset =
191 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
192 VkExtent3D extent =
193 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
194
195 unsigned dst_base_layer, layer_count;
196 if (dst_image->type == VK_IMAGE_TYPE_3D) {
197 dst_base_layer = pRegions[r].dstOffset.z;
198 layer_count = pRegions[r].extent.depth;
199 } else {
200 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
201 layer_count = pRegions[r].dstSubresource.layerCount;
202 }
203
204 unsigned src_base_layer;
205 if (src_image->type == VK_IMAGE_TYPE_3D) {
206 src_base_layer = pRegions[r].srcOffset.z;
207 } else {
208 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
209 assert(pRegions[r].srcSubresource.layerCount == layer_count);
210 }
211
212 assert(pRegions[r].srcSubresource.aspectMask ==
213 pRegions[r].dstSubresource.aspectMask);
214
215 uint32_t a;
216 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
217 VkImageAspectFlagBits aspect = (1 << a);
218
219 struct blorp_surf src_surf, dst_surf;
220 get_blorp_surf_for_anv_image(src_image, aspect, &src_surf);
221 get_blorp_surf_for_anv_image(dst_image, aspect, &dst_surf);
222
223 for (unsigned i = 0; i < layer_count; i++) {
224 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
225 src_base_layer + i,
226 &dst_surf, pRegions[r].dstSubresource.mipLevel,
227 dst_base_layer + i,
228 srcOffset.x, srcOffset.y,
229 dstOffset.x, dstOffset.y,
230 extent.width, extent.height);
231 }
232 }
233 }
234
235 blorp_batch_finish(&batch);
236 }
237
238 static void
239 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
240 struct anv_buffer *anv_buffer,
241 struct anv_image *anv_image,
242 uint32_t regionCount,
243 const VkBufferImageCopy* pRegions,
244 bool buffer_to_image)
245 {
246 struct blorp_batch batch;
247 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
248
249 struct {
250 struct blorp_surf surf;
251 uint32_t level;
252 VkOffset3D offset;
253 } image, buffer, *src, *dst;
254
255 buffer.level = 0;
256 buffer.offset = (VkOffset3D) { 0, 0, 0 };
257
258 if (buffer_to_image) {
259 src = &buffer;
260 dst = &image;
261 } else {
262 src = &image;
263 dst = &buffer;
264 }
265
266 for (unsigned r = 0; r < regionCount; r++) {
267 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
268
269 get_blorp_surf_for_anv_image(anv_image, aspect, &image.surf);
270 image.offset =
271 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
272 image.level = pRegions[r].imageSubresource.mipLevel;
273
274 VkExtent3D extent =
275 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
276 if (anv_image->type != VK_IMAGE_TYPE_3D) {
277 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
278 extent.depth = pRegions[r].imageSubresource.layerCount;
279 }
280
281 const enum isl_format buffer_format =
282 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
283 aspect, VK_IMAGE_TILING_LINEAR);
284
285 const VkExtent3D bufferImageExtent = {
286 .width = pRegions[r].bufferRowLength ?
287 pRegions[r].bufferRowLength : extent.width,
288 .height = pRegions[r].bufferImageHeight ?
289 pRegions[r].bufferImageHeight : extent.height,
290 };
291
292 const struct isl_format_layout *buffer_fmtl =
293 isl_format_get_layout(buffer_format);
294
295 const uint32_t buffer_row_pitch =
296 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
297 (buffer_fmtl->bpb / 8);
298
299 const uint32_t buffer_layer_stride =
300 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
301 buffer_row_pitch;
302
303 struct isl_surf buffer_isl_surf;
304 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
305 anv_buffer, pRegions[r].bufferOffset,
306 extent.width, extent.height,
307 buffer_row_pitch, buffer_format,
308 &buffer.surf, &buffer_isl_surf);
309
310 for (unsigned z = 0; z < extent.depth; z++) {
311 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
312 &dst->surf, dst->level, dst->offset.z,
313 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
314 extent.width, extent.height);
315
316 image.offset.z++;
317 buffer.surf.addr.offset += buffer_layer_stride;
318 }
319 }
320
321 blorp_batch_finish(&batch);
322 }
323
324 void anv_CmdCopyBufferToImage(
325 VkCommandBuffer commandBuffer,
326 VkBuffer srcBuffer,
327 VkImage dstImage,
328 VkImageLayout dstImageLayout,
329 uint32_t regionCount,
330 const VkBufferImageCopy* pRegions)
331 {
332 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
333 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
334 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
335
336 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
337 regionCount, pRegions, true);
338 }
339
340 void anv_CmdCopyImageToBuffer(
341 VkCommandBuffer commandBuffer,
342 VkImage srcImage,
343 VkImageLayout srcImageLayout,
344 VkBuffer dstBuffer,
345 uint32_t regionCount,
346 const VkBufferImageCopy* pRegions)
347 {
348 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
349 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
350 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
351
352 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
353 regionCount, pRegions, false);
354 }
355
356 static bool
357 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
358 {
359 bool flip = false;
360 if (*src0 > *src1) {
361 unsigned tmp = *src0;
362 *src0 = *src1;
363 *src1 = tmp;
364 flip = !flip;
365 }
366
367 if (*dst0 > *dst1) {
368 unsigned tmp = *dst0;
369 *dst0 = *dst1;
370 *dst1 = tmp;
371 flip = !flip;
372 }
373
374 return flip;
375 }
376
377 void anv_CmdBlitImage(
378 VkCommandBuffer commandBuffer,
379 VkImage srcImage,
380 VkImageLayout srcImageLayout,
381 VkImage dstImage,
382 VkImageLayout dstImageLayout,
383 uint32_t regionCount,
384 const VkImageBlit* pRegions,
385 VkFilter filter)
386
387 {
388 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
389 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
390 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
391
392 struct blorp_surf src, dst;
393
394 uint32_t gl_filter;
395 switch (filter) {
396 case VK_FILTER_NEAREST:
397 gl_filter = 0x2600; /* GL_NEAREST */
398 break;
399 case VK_FILTER_LINEAR:
400 gl_filter = 0x2601; /* GL_LINEAR */
401 break;
402 default:
403 unreachable("Invalid filter");
404 }
405
406 struct blorp_batch batch;
407 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
408
409 for (unsigned r = 0; r < regionCount; r++) {
410 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
411 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
412
413 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask, &src);
414 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask, &dst);
415
416 struct anv_format src_format =
417 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
418 src_res->aspectMask, src_image->tiling);
419 struct anv_format dst_format =
420 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
421 dst_res->aspectMask, dst_image->tiling);
422
423 unsigned dst_start, dst_end;
424 if (dst_image->type == VK_IMAGE_TYPE_3D) {
425 assert(dst_res->baseArrayLayer == 0);
426 dst_start = pRegions[r].dstOffsets[0].z;
427 dst_end = pRegions[r].dstOffsets[1].z;
428 } else {
429 dst_start = dst_res->baseArrayLayer;
430 dst_end = dst_start + dst_res->layerCount;
431 }
432
433 unsigned src_start, src_end;
434 if (src_image->type == VK_IMAGE_TYPE_3D) {
435 assert(src_res->baseArrayLayer == 0);
436 src_start = pRegions[r].srcOffsets[0].z;
437 src_end = pRegions[r].srcOffsets[1].z;
438 } else {
439 src_start = src_res->baseArrayLayer;
440 src_end = src_start + src_res->layerCount;
441 }
442
443 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
444 float src_z_step = (float)(src_end + 1 - src_start) /
445 (float)(dst_end + 1 - dst_start);
446
447 if (flip_z) {
448 src_start = src_end;
449 src_z_step *= -1;
450 }
451
452 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
453 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
454 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
455 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
456 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
457
458 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
459 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
460 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
461 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
462 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
463
464 const unsigned num_layers = dst_end - dst_start;
465 for (unsigned i = 0; i < num_layers; i++) {
466 unsigned dst_z = dst_start + i;
467 unsigned src_z = src_start + i * src_z_step;
468
469 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
470 src_format.isl_format, src_format.swizzle,
471 &dst, dst_res->mipLevel, dst_z,
472 dst_format.isl_format, dst_format.swizzle,
473 src_x0, src_y0, src_x1, src_y1,
474 dst_x0, dst_y0, dst_x1, dst_y1,
475 gl_filter, flip_x, flip_y);
476 }
477
478 }
479
480 blorp_batch_finish(&batch);
481 }
482
483 static enum isl_format
484 isl_format_for_size(unsigned size_B)
485 {
486 switch (size_B) {
487 case 1: return ISL_FORMAT_R8_UINT;
488 case 2: return ISL_FORMAT_R8G8_UINT;
489 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
490 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
491 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
492 default:
493 unreachable("Not a power-of-two format size");
494 }
495 }
496
497 static void
498 do_buffer_copy(struct blorp_batch *batch,
499 struct anv_bo *src, uint64_t src_offset,
500 struct anv_bo *dst, uint64_t dst_offset,
501 int width, int height, int block_size)
502 {
503 struct anv_device *device = batch->blorp->driver_ctx;
504
505 /* The actual format we pick doesn't matter as blorp will throw it away.
506 * The only thing that actually matters is the size.
507 */
508 enum isl_format format = isl_format_for_size(block_size);
509
510 struct isl_surf surf;
511 isl_surf_init(&device->isl_dev, &surf,
512 .dim = ISL_SURF_DIM_2D,
513 .format = format,
514 .width = width,
515 .height = height,
516 .depth = 1,
517 .levels = 1,
518 .array_len = 1,
519 .samples = 1,
520 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
521 ISL_SURF_USAGE_RENDER_TARGET_BIT,
522 .tiling_flags = ISL_TILING_LINEAR_BIT);
523 assert(surf.row_pitch == width * block_size);
524
525 struct blorp_surf src_blorp_surf = {
526 .surf = &surf,
527 .addr = {
528 .buffer = src,
529 .offset = src_offset,
530 },
531 };
532
533 struct blorp_surf dst_blorp_surf = {
534 .surf = &surf,
535 .addr = {
536 .buffer = dst,
537 .offset = dst_offset,
538 },
539 };
540
541 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
542 0, 0, 0, 0, width, height);
543 }
544
545 /**
546 * Returns the greatest common divisor of a and b that is a power of two.
547 */
548 static inline uint64_t
549 gcd_pow2_u64(uint64_t a, uint64_t b)
550 {
551 assert(a > 0 || b > 0);
552
553 unsigned a_log2 = ffsll(a) - 1;
554 unsigned b_log2 = ffsll(b) - 1;
555
556 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
557 * case, the MIN2() will take the other one. If both are 0 then we will
558 * hit the assert above.
559 */
560 return 1 << MIN2(a_log2, b_log2);
561 }
562
563 /* This is maximum possible width/height our HW can handle */
564 #define MAX_SURFACE_DIM (1ull << 14)
565
566 void anv_CmdCopyBuffer(
567 VkCommandBuffer commandBuffer,
568 VkBuffer srcBuffer,
569 VkBuffer dstBuffer,
570 uint32_t regionCount,
571 const VkBufferCopy* pRegions)
572 {
573 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
574 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
575 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
576
577 struct blorp_batch batch;
578 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
579
580 for (unsigned r = 0; r < regionCount; r++) {
581 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
582 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
583 uint64_t copy_size = pRegions[r].size;
584
585 /* First, we compute the biggest format that can be used with the
586 * given offsets and size.
587 */
588 int bs = 16;
589 bs = gcd_pow2_u64(bs, src_offset);
590 bs = gcd_pow2_u64(bs, dst_offset);
591 bs = gcd_pow2_u64(bs, pRegions[r].size);
592
593 /* First, we make a bunch of max-sized copies */
594 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
595 while (copy_size >= max_copy_size) {
596 do_buffer_copy(&batch, src_buffer->bo, src_offset,
597 dst_buffer->bo, dst_offset,
598 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
599 copy_size -= max_copy_size;
600 src_offset += max_copy_size;
601 dst_offset += max_copy_size;
602 }
603
604 /* Now make a max-width copy */
605 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
606 assert(height < MAX_SURFACE_DIM);
607 if (height != 0) {
608 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
609 do_buffer_copy(&batch, src_buffer->bo, src_offset,
610 dst_buffer->bo, dst_offset,
611 MAX_SURFACE_DIM, height, bs);
612 copy_size -= rect_copy_size;
613 src_offset += rect_copy_size;
614 dst_offset += rect_copy_size;
615 }
616
617 /* Finally, make a small copy to finish it off */
618 if (copy_size != 0) {
619 do_buffer_copy(&batch, src_buffer->bo, src_offset,
620 dst_buffer->bo, dst_offset,
621 copy_size / bs, 1, bs);
622 }
623 }
624
625 blorp_batch_finish(&batch);
626 }
627
628 void anv_CmdUpdateBuffer(
629 VkCommandBuffer commandBuffer,
630 VkBuffer dstBuffer,
631 VkDeviceSize dstOffset,
632 VkDeviceSize dataSize,
633 const uint32_t* pData)
634 {
635 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
636 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
637
638 struct blorp_batch batch;
639 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
640
641 /* We can't quite grab a full block because the state stream needs a
642 * little data at the top to build its linked list.
643 */
644 const uint32_t max_update_size =
645 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
646
647 assert(max_update_size < MAX_SURFACE_DIM * 4);
648
649 while (dataSize) {
650 const uint32_t copy_size = MIN2(dataSize, max_update_size);
651
652 struct anv_state tmp_data =
653 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
654
655 memcpy(tmp_data.map, pData, copy_size);
656
657 int bs = 16;
658 bs = gcd_pow2_u64(bs, dstOffset);
659 bs = gcd_pow2_u64(bs, copy_size);
660
661 do_buffer_copy(&batch,
662 &cmd_buffer->device->dynamic_state_block_pool.bo,
663 tmp_data.offset,
664 dst_buffer->bo, dst_buffer->offset + dstOffset,
665 copy_size / bs, 1, bs);
666
667 dataSize -= copy_size;
668 dstOffset += copy_size;
669 pData = (void *)pData + copy_size;
670 }
671
672 blorp_batch_finish(&batch);
673 }
674
675 void anv_CmdFillBuffer(
676 VkCommandBuffer commandBuffer,
677 VkBuffer dstBuffer,
678 VkDeviceSize dstOffset,
679 VkDeviceSize fillSize,
680 uint32_t data)
681 {
682 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
683 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
684 struct blorp_surf surf;
685 struct isl_surf isl_surf;
686
687 struct blorp_batch batch;
688 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
689
690 if (fillSize == VK_WHOLE_SIZE) {
691 fillSize = dst_buffer->size - dstOffset;
692 /* Make sure fillSize is a multiple of 4 */
693 fillSize &= ~3ull;
694 }
695
696 /* First, we compute the biggest format that can be used with the
697 * given offsets and size.
698 */
699 int bs = 16;
700 bs = gcd_pow2_u64(bs, dstOffset);
701 bs = gcd_pow2_u64(bs, fillSize);
702 enum isl_format isl_format = isl_format_for_size(bs);
703
704 union isl_color_value color = {
705 .u32 = { data, data, data, data },
706 };
707
708 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
709 while (fillSize >= max_fill_size) {
710 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
711 dst_buffer, dstOffset,
712 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
713 MAX_SURFACE_DIM * bs, isl_format,
714 &surf, &isl_surf);
715
716 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
717 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
718 color, NULL);
719 fillSize -= max_fill_size;
720 dstOffset += max_fill_size;
721 }
722
723 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
724 assert(height < MAX_SURFACE_DIM);
725 if (height != 0) {
726 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
727 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
728 dst_buffer, dstOffset,
729 MAX_SURFACE_DIM, height,
730 MAX_SURFACE_DIM * bs, isl_format,
731 &surf, &isl_surf);
732
733 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
734 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
735 color, NULL);
736 fillSize -= rect_fill_size;
737 dstOffset += rect_fill_size;
738 }
739
740 if (fillSize != 0) {
741 const uint32_t width = fillSize / bs;
742 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
743 dst_buffer, dstOffset,
744 width, 1,
745 width * bs, isl_format,
746 &surf, &isl_surf);
747
748 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
749 0, 0, 1, 0, 0, width, 1,
750 color, NULL);
751 }
752
753 blorp_batch_finish(&batch);
754 }
755
756 void anv_CmdClearColorImage(
757 VkCommandBuffer commandBuffer,
758 VkImage _image,
759 VkImageLayout imageLayout,
760 const VkClearColorValue* pColor,
761 uint32_t rangeCount,
762 const VkImageSubresourceRange* pRanges)
763 {
764 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
765 ANV_FROM_HANDLE(anv_image, image, _image);
766
767 static const bool color_write_disable[4] = { false, false, false, false };
768
769 struct blorp_batch batch;
770 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
771
772 union isl_color_value clear_color;
773 memcpy(clear_color.u32, pColor->uint32, sizeof(pColor->uint32));
774
775 struct blorp_surf surf;
776 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT, &surf);
777
778 for (unsigned r = 0; r < rangeCount; r++) {
779 if (pRanges[r].aspectMask == 0)
780 continue;
781
782 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
783
784 struct anv_format src_format =
785 anv_get_format(&cmd_buffer->device->info, image->vk_format,
786 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
787
788 unsigned base_layer = pRanges[r].baseArrayLayer;
789 unsigned layer_count = pRanges[r].layerCount;
790
791 for (unsigned i = 0; i < pRanges[r].levelCount; i++) {
792 const unsigned level = pRanges[r].baseMipLevel + i;
793 const unsigned level_width = anv_minify(image->extent.width, level);
794 const unsigned level_height = anv_minify(image->extent.height, level);
795
796 if (image->type == VK_IMAGE_TYPE_3D) {
797 base_layer = 0;
798 layer_count = anv_minify(image->extent.depth, level);
799 }
800
801 blorp_clear(&batch, &surf,
802 src_format.isl_format, src_format.swizzle,
803 level, base_layer, layer_count,
804 0, 0, level_width, level_height,
805 clear_color, color_write_disable);
806 }
807 }
808
809 blorp_batch_finish(&batch);
810 }
811
812 void anv_CmdClearDepthStencilImage(
813 VkCommandBuffer commandBuffer,
814 VkImage image_h,
815 VkImageLayout imageLayout,
816 const VkClearDepthStencilValue* pDepthStencil,
817 uint32_t rangeCount,
818 const VkImageSubresourceRange* pRanges)
819 {
820 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
821 ANV_FROM_HANDLE(anv_image, image, image_h);
822
823 struct blorp_batch batch;
824 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
825
826 struct blorp_surf depth, stencil;
827 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
828 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
829 &depth);
830 } else {
831 memset(&depth, 0, sizeof(depth));
832 }
833
834 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
835 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
836 &stencil);
837 } else {
838 memset(&stencil, 0, sizeof(stencil));
839 }
840
841 for (unsigned r = 0; r < rangeCount; r++) {
842 if (pRanges[r].aspectMask == 0)
843 continue;
844
845 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
846 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
847
848 unsigned base_layer = pRanges[r].baseArrayLayer;
849 unsigned layer_count = pRanges[r].layerCount;
850
851 for (unsigned i = 0; i < pRanges[r].levelCount; i++) {
852 const unsigned level = pRanges[r].baseMipLevel + i;
853 const unsigned level_width = anv_minify(image->extent.width, level);
854 const unsigned level_height = anv_minify(image->extent.height, level);
855
856 if (image->type == VK_IMAGE_TYPE_3D)
857 layer_count = anv_minify(image->extent.depth, level);
858
859 blorp_clear_depth_stencil(&batch, &depth, &stencil,
860 level, base_layer, layer_count,
861 0, 0, level_width, level_height,
862 clear_depth, pDepthStencil->depth,
863 clear_stencil ? 0xff : 0,
864 pDepthStencil->stencil);
865 }
866 }
867
868 blorp_batch_finish(&batch);
869 }
870
871 static void
872 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
873 struct blorp_batch *batch,
874 const VkClearAttachment *attachment,
875 uint32_t rectCount, const VkClearRect *pRects)
876 {
877 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
878 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
879 const uint32_t att = attachment->colorAttachment;
880 const struct anv_image_view *iview =
881 fb->attachments[subpass->color_attachments[att]];
882 const struct anv_image *image = iview->image;
883
884 struct blorp_surf surf;
885 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT, &surf);
886
887 union isl_color_value clear_color;
888 memcpy(clear_color.u32, attachment->clearValue.color.uint32,
889 sizeof(clear_color.u32));
890
891 static const bool color_write_disable[4] = { false, false, false, false };
892
893 for (uint32_t r = 0; r < rectCount; ++r) {
894 const VkOffset2D offset = pRects[r].rect.offset;
895 const VkExtent2D extent = pRects[r].rect.extent;
896 blorp_clear(batch, &surf, iview->isl.format, iview->isl.swizzle,
897 iview->isl.base_level,
898 iview->isl.base_array_layer + pRects[r].baseArrayLayer,
899 pRects[r].layerCount,
900 offset.x, offset.y,
901 offset.x + extent.width, offset.y + extent.height,
902 clear_color, color_write_disable);
903 }
904 }
905
906 static void
907 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
908 struct blorp_batch *batch,
909 const VkClearAttachment *attachment,
910 uint32_t rectCount, const VkClearRect *pRects)
911 {
912 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
913 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
914 const struct anv_image_view *iview =
915 fb->attachments[subpass->depth_stencil_attachment];
916 const struct anv_image *image = iview->image;
917
918 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
919 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
920
921 struct blorp_surf depth, stencil;
922 if (clear_depth) {
923 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
924 &depth);
925 } else {
926 memset(&depth, 0, sizeof(depth));
927 }
928
929 if (clear_stencil) {
930 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
931 &stencil);
932 } else {
933 memset(&stencil, 0, sizeof(stencil));
934 }
935
936 for (uint32_t r = 0; r < rectCount; ++r) {
937 const VkOffset2D offset = pRects[r].rect.offset;
938 const VkExtent2D extent = pRects[r].rect.extent;
939 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
940 blorp_clear_depth_stencil(batch, &depth, &stencil,
941 iview->isl.base_level,
942 iview->isl.base_array_layer +
943 pRects[r].baseArrayLayer,
944 pRects[r].layerCount,
945 offset.x, offset.y,
946 offset.x + extent.width,
947 offset.y + extent.height,
948 clear_depth, value.depth,
949 clear_stencil ? 0xff : 0, value.stencil);
950 }
951 }
952
953 void anv_CmdClearAttachments(
954 VkCommandBuffer commandBuffer,
955 uint32_t attachmentCount,
956 const VkClearAttachment* pAttachments,
957 uint32_t rectCount,
958 const VkClearRect* pRects)
959 {
960 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
961
962 /* Because this gets called within a render pass, we tell blorp not to
963 * trash our depth and stencil buffers.
964 */
965 struct blorp_batch batch;
966 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
967 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
968
969 for (uint32_t a = 0; a < attachmentCount; ++a) {
970 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
971 clear_color_attachment(cmd_buffer, &batch,
972 &pAttachments[a],
973 rectCount, pRects);
974 } else {
975 clear_depth_stencil_attachment(cmd_buffer, &batch,
976 &pAttachments[a],
977 rectCount, pRects);
978 }
979 }
980
981 blorp_batch_finish(&batch);
982 }
983
984 static void
985 resolve_image(struct blorp_batch *batch,
986 const struct anv_image *src_image,
987 uint32_t src_level, uint32_t src_layer,
988 const struct anv_image *dst_image,
989 uint32_t dst_level, uint32_t dst_layer,
990 VkImageAspectFlags aspect_mask,
991 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
992 uint32_t width, uint32_t height)
993 {
994 assert(src_image->type == VK_IMAGE_TYPE_2D);
995 assert(src_image->samples > 1);
996 assert(dst_image->type == VK_IMAGE_TYPE_2D);
997 assert(dst_image->samples == 1);
998
999 uint32_t a;
1000 for_each_bit(a, aspect_mask) {
1001 VkImageAspectFlagBits aspect = 1 << a;
1002
1003 struct blorp_surf src_surf, dst_surf;
1004 get_blorp_surf_for_anv_image(src_image, aspect, &src_surf);
1005 get_blorp_surf_for_anv_image(dst_image, aspect, &dst_surf);
1006
1007 blorp_blit(batch,
1008 &src_surf, src_level, src_layer,
1009 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1010 &dst_surf, dst_level, dst_layer,
1011 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1012 src_x, src_y, src_x + width, src_y + height,
1013 dst_x, dst_y, dst_x + width, dst_y + height,
1014 0x2600 /* GL_NEAREST */, false, false);
1015 }
1016 }
1017
1018 void anv_CmdResolveImage(
1019 VkCommandBuffer commandBuffer,
1020 VkImage srcImage,
1021 VkImageLayout srcImageLayout,
1022 VkImage dstImage,
1023 VkImageLayout dstImageLayout,
1024 uint32_t regionCount,
1025 const VkImageResolve* pRegions)
1026 {
1027 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1028 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1029 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1030
1031 struct blorp_batch batch;
1032 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1033
1034 for (uint32_t r = 0; r < regionCount; r++) {
1035 assert(pRegions[r].srcSubresource.aspectMask ==
1036 pRegions[r].dstSubresource.aspectMask);
1037 assert(pRegions[r].srcSubresource.layerCount ==
1038 pRegions[r].dstSubresource.layerCount);
1039
1040 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1041
1042 for (uint32_t layer = 0; layer < layer_count; layer++) {
1043 resolve_image(&batch,
1044 src_image, pRegions[r].srcSubresource.mipLevel,
1045 pRegions[r].srcSubresource.baseArrayLayer + layer,
1046 dst_image, pRegions[r].dstSubresource.mipLevel,
1047 pRegions[r].dstSubresource.baseArrayLayer + layer,
1048 pRegions[r].dstSubresource.aspectMask,
1049 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1050 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1051 pRegions[r].extent.width, pRegions[r].extent.height);
1052 }
1053 }
1054
1055 blorp_batch_finish(&batch);
1056 }
1057
1058 void
1059 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1060 {
1061 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1062 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1063
1064 /* FINISHME(perf): Skip clears for resolve attachments.
1065 *
1066 * From the Vulkan 1.0 spec:
1067 *
1068 * If the first use of an attachment in a render pass is as a resolve
1069 * attachment, then the loadOp is effectively ignored as the resolve is
1070 * guaranteed to overwrite all pixels in the render area.
1071 */
1072
1073 if (!subpass->has_resolve)
1074 return;
1075
1076 struct blorp_batch batch;
1077 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1078
1079 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1080 uint32_t src_att = subpass->color_attachments[i];
1081 uint32_t dst_att = subpass->resolve_attachments[i];
1082
1083 if (dst_att == VK_ATTACHMENT_UNUSED)
1084 continue;
1085
1086 struct anv_image_view *src_iview = fb->attachments[src_att];
1087 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1088
1089 const VkRect2D render_area = cmd_buffer->state.render_area;
1090
1091 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1092 resolve_image(&batch, src_iview->image,
1093 src_iview->isl.base_level, src_iview->isl.base_array_layer,
1094 dst_iview->image,
1095 dst_iview->isl.base_level, dst_iview->isl.base_array_layer,
1096 src_iview->aspect_mask,
1097 render_area.offset.x, render_area.offset.y,
1098 render_area.offset.x, render_area.offset.y,
1099 render_area.extent.width, render_area.extent.height);
1100 }
1101
1102 blorp_batch_finish(&batch);
1103 }