intel/blorp: make upload_shader() return a bool indicating success or failure
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 /* The cache already has a reference and it's not going anywhere so there
76 * is no need to hold a second reference.
77 */
78 anv_shader_bin_unref(device, bin);
79
80 *kernel_out = bin->kernel.offset;
81 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
82
83 return true;
84 }
85
86 void
87 anv_device_init_blorp(struct anv_device *device)
88 {
89 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
90 blorp_init(&device->blorp, device, &device->isl_dev);
91 device->blorp.compiler = device->instance->physicalDevice.compiler;
92 device->blorp.mocs.tex = device->default_mocs;
93 device->blorp.mocs.rb = device->default_mocs;
94 device->blorp.mocs.vb = device->default_mocs;
95 device->blorp.lookup_shader = lookup_blorp_shader;
96 device->blorp.upload_shader = upload_blorp_shader;
97 switch (device->info.gen) {
98 case 7:
99 if (device->info.is_haswell) {
100 device->blorp.exec = gen75_blorp_exec;
101 } else {
102 device->blorp.exec = gen7_blorp_exec;
103 }
104 break;
105 case 8:
106 device->blorp.exec = gen8_blorp_exec;
107 break;
108 case 9:
109 device->blorp.exec = gen9_blorp_exec;
110 break;
111 default:
112 unreachable("Unknown hardware generation");
113 }
114 }
115
116 void
117 anv_device_finish_blorp(struct anv_device *device)
118 {
119 blorp_finish(&device->blorp);
120 anv_pipeline_cache_finish(&device->blorp_shader_cache);
121 }
122
123 static void
124 get_blorp_surf_for_anv_buffer(struct anv_device *device,
125 struct anv_buffer *buffer, uint64_t offset,
126 uint32_t width, uint32_t height,
127 uint32_t row_pitch, enum isl_format format,
128 struct blorp_surf *blorp_surf,
129 struct isl_surf *isl_surf)
130 {
131 const struct isl_format_layout *fmtl =
132 isl_format_get_layout(format);
133
134 /* ASTC is the only format which doesn't support linear layouts.
135 * Create an equivalently sized surface with ISL to get around this.
136 */
137 if (fmtl->txc == ISL_TXC_ASTC) {
138 /* Use an equivalently sized format */
139 format = ISL_FORMAT_R32G32B32A32_UINT;
140 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
141
142 /* Shrink the dimensions for the new format */
143 width = DIV_ROUND_UP(width, fmtl->bw);
144 height = DIV_ROUND_UP(height, fmtl->bh);
145 }
146
147 *blorp_surf = (struct blorp_surf) {
148 .surf = isl_surf,
149 .addr = {
150 .buffer = buffer->bo,
151 .offset = buffer->offset + offset,
152 },
153 };
154
155 isl_surf_init(&device->isl_dev, isl_surf,
156 .dim = ISL_SURF_DIM_2D,
157 .format = format,
158 .width = width,
159 .height = height,
160 .depth = 1,
161 .levels = 1,
162 .array_len = 1,
163 .samples = 1,
164 .min_pitch = row_pitch,
165 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
166 ISL_SURF_USAGE_RENDER_TARGET_BIT,
167 .tiling_flags = ISL_TILING_LINEAR_BIT);
168 assert(isl_surf->row_pitch == row_pitch);
169 }
170
171 static void
172 get_blorp_surf_for_anv_image(const struct anv_image *image,
173 VkImageAspectFlags aspect,
174 enum isl_aux_usage aux_usage,
175 struct blorp_surf *blorp_surf)
176 {
177 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
178 aux_usage == ISL_AUX_USAGE_HIZ)
179 aux_usage = ISL_AUX_USAGE_NONE;
180
181 const struct anv_surface *surface =
182 anv_image_get_surface_for_aspect_mask(image, aspect);
183
184 *blorp_surf = (struct blorp_surf) {
185 .surf = &surface->isl,
186 .addr = {
187 .buffer = image->bo,
188 .offset = image->offset + surface->offset,
189 },
190 };
191
192 if (aux_usage != ISL_AUX_USAGE_NONE) {
193 blorp_surf->aux_surf = &image->aux_surface.isl,
194 blorp_surf->aux_addr = (struct blorp_address) {
195 .buffer = image->bo,
196 .offset = image->offset + image->aux_surface.offset,
197 };
198 blorp_surf->aux_usage = aux_usage;
199 }
200 }
201
202 void anv_CmdCopyImage(
203 VkCommandBuffer commandBuffer,
204 VkImage srcImage,
205 VkImageLayout srcImageLayout,
206 VkImage dstImage,
207 VkImageLayout dstImageLayout,
208 uint32_t regionCount,
209 const VkImageCopy* pRegions)
210 {
211 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
212 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
213 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
214
215 struct blorp_batch batch;
216 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
217
218 for (unsigned r = 0; r < regionCount; r++) {
219 VkOffset3D srcOffset =
220 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
221 VkOffset3D dstOffset =
222 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
223 VkExtent3D extent =
224 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
225
226 unsigned dst_base_layer, layer_count;
227 if (dst_image->type == VK_IMAGE_TYPE_3D) {
228 dst_base_layer = pRegions[r].dstOffset.z;
229 layer_count = pRegions[r].extent.depth;
230 } else {
231 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
232 layer_count = pRegions[r].dstSubresource.layerCount;
233 }
234
235 unsigned src_base_layer;
236 if (src_image->type == VK_IMAGE_TYPE_3D) {
237 src_base_layer = pRegions[r].srcOffset.z;
238 } else {
239 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
240 assert(pRegions[r].srcSubresource.layerCount == layer_count);
241 }
242
243 assert(pRegions[r].srcSubresource.aspectMask ==
244 pRegions[r].dstSubresource.aspectMask);
245
246 uint32_t a;
247 for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
248 VkImageAspectFlagBits aspect = (1 << a);
249
250 struct blorp_surf src_surf, dst_surf;
251 get_blorp_surf_for_anv_image(src_image, aspect, src_image->aux_usage,
252 &src_surf);
253 get_blorp_surf_for_anv_image(dst_image, aspect, dst_image->aux_usage,
254 &dst_surf);
255
256 for (unsigned i = 0; i < layer_count; i++) {
257 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
258 src_base_layer + i,
259 &dst_surf, pRegions[r].dstSubresource.mipLevel,
260 dst_base_layer + i,
261 srcOffset.x, srcOffset.y,
262 dstOffset.x, dstOffset.y,
263 extent.width, extent.height);
264 }
265 }
266 }
267
268 blorp_batch_finish(&batch);
269 }
270
271 static void
272 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
273 struct anv_buffer *anv_buffer,
274 struct anv_image *anv_image,
275 uint32_t regionCount,
276 const VkBufferImageCopy* pRegions,
277 bool buffer_to_image)
278 {
279 struct blorp_batch batch;
280 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
281
282 struct {
283 struct blorp_surf surf;
284 uint32_t level;
285 VkOffset3D offset;
286 } image, buffer, *src, *dst;
287
288 buffer.level = 0;
289 buffer.offset = (VkOffset3D) { 0, 0, 0 };
290
291 if (buffer_to_image) {
292 src = &buffer;
293 dst = &image;
294 } else {
295 src = &image;
296 dst = &buffer;
297 }
298
299 for (unsigned r = 0; r < regionCount; r++) {
300 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
301
302 get_blorp_surf_for_anv_image(anv_image, aspect, anv_image->aux_usage,
303 &image.surf);
304 image.offset =
305 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
306 image.level = pRegions[r].imageSubresource.mipLevel;
307
308 VkExtent3D extent =
309 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
310 if (anv_image->type != VK_IMAGE_TYPE_3D) {
311 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
312 extent.depth = pRegions[r].imageSubresource.layerCount;
313 }
314
315 const enum isl_format buffer_format =
316 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
317 aspect, VK_IMAGE_TILING_LINEAR);
318
319 const VkExtent3D bufferImageExtent = {
320 .width = pRegions[r].bufferRowLength ?
321 pRegions[r].bufferRowLength : extent.width,
322 .height = pRegions[r].bufferImageHeight ?
323 pRegions[r].bufferImageHeight : extent.height,
324 };
325
326 const struct isl_format_layout *buffer_fmtl =
327 isl_format_get_layout(buffer_format);
328
329 const uint32_t buffer_row_pitch =
330 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
331 (buffer_fmtl->bpb / 8);
332
333 const uint32_t buffer_layer_stride =
334 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
335 buffer_row_pitch;
336
337 struct isl_surf buffer_isl_surf;
338 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
339 anv_buffer, pRegions[r].bufferOffset,
340 extent.width, extent.height,
341 buffer_row_pitch, buffer_format,
342 &buffer.surf, &buffer_isl_surf);
343
344 for (unsigned z = 0; z < extent.depth; z++) {
345 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
346 &dst->surf, dst->level, dst->offset.z,
347 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
348 extent.width, extent.height);
349
350 image.offset.z++;
351 buffer.surf.addr.offset += buffer_layer_stride;
352 }
353 }
354
355 blorp_batch_finish(&batch);
356 }
357
358 void anv_CmdCopyBufferToImage(
359 VkCommandBuffer commandBuffer,
360 VkBuffer srcBuffer,
361 VkImage dstImage,
362 VkImageLayout dstImageLayout,
363 uint32_t regionCount,
364 const VkBufferImageCopy* pRegions)
365 {
366 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
367 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
368 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
369
370 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
371 regionCount, pRegions, true);
372 }
373
374 void anv_CmdCopyImageToBuffer(
375 VkCommandBuffer commandBuffer,
376 VkImage srcImage,
377 VkImageLayout srcImageLayout,
378 VkBuffer dstBuffer,
379 uint32_t regionCount,
380 const VkBufferImageCopy* pRegions)
381 {
382 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
383 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
384 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
385
386 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
387 regionCount, pRegions, false);
388 }
389
390 static bool
391 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
392 {
393 bool flip = false;
394 if (*src0 > *src1) {
395 unsigned tmp = *src0;
396 *src0 = *src1;
397 *src1 = tmp;
398 flip = !flip;
399 }
400
401 if (*dst0 > *dst1) {
402 unsigned tmp = *dst0;
403 *dst0 = *dst1;
404 *dst1 = tmp;
405 flip = !flip;
406 }
407
408 return flip;
409 }
410
411 void anv_CmdBlitImage(
412 VkCommandBuffer commandBuffer,
413 VkImage srcImage,
414 VkImageLayout srcImageLayout,
415 VkImage dstImage,
416 VkImageLayout dstImageLayout,
417 uint32_t regionCount,
418 const VkImageBlit* pRegions,
419 VkFilter filter)
420
421 {
422 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
423 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
424 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
425
426 struct blorp_surf src, dst;
427
428 uint32_t gl_filter;
429 switch (filter) {
430 case VK_FILTER_NEAREST:
431 gl_filter = 0x2600; /* GL_NEAREST */
432 break;
433 case VK_FILTER_LINEAR:
434 gl_filter = 0x2601; /* GL_LINEAR */
435 break;
436 default:
437 unreachable("Invalid filter");
438 }
439
440 struct blorp_batch batch;
441 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
442
443 for (unsigned r = 0; r < regionCount; r++) {
444 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
445 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
446
447 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
448 src_image->aux_usage, &src);
449 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
450 dst_image->aux_usage, &dst);
451
452 struct anv_format src_format =
453 anv_get_format(&cmd_buffer->device->info, src_image->vk_format,
454 src_res->aspectMask, src_image->tiling);
455 struct anv_format dst_format =
456 anv_get_format(&cmd_buffer->device->info, dst_image->vk_format,
457 dst_res->aspectMask, dst_image->tiling);
458
459 unsigned dst_start, dst_end;
460 if (dst_image->type == VK_IMAGE_TYPE_3D) {
461 assert(dst_res->baseArrayLayer == 0);
462 dst_start = pRegions[r].dstOffsets[0].z;
463 dst_end = pRegions[r].dstOffsets[1].z;
464 } else {
465 dst_start = dst_res->baseArrayLayer;
466 dst_end = dst_start + dst_res->layerCount;
467 }
468
469 unsigned src_start, src_end;
470 if (src_image->type == VK_IMAGE_TYPE_3D) {
471 assert(src_res->baseArrayLayer == 0);
472 src_start = pRegions[r].srcOffsets[0].z;
473 src_end = pRegions[r].srcOffsets[1].z;
474 } else {
475 src_start = src_res->baseArrayLayer;
476 src_end = src_start + src_res->layerCount;
477 }
478
479 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
480 float src_z_step = (float)(src_end + 1 - src_start) /
481 (float)(dst_end + 1 - dst_start);
482
483 if (flip_z) {
484 src_start = src_end;
485 src_z_step *= -1;
486 }
487
488 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
489 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
490 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
491 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
492 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
493
494 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
495 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
496 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
497 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
498 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
499
500 const unsigned num_layers = dst_end - dst_start;
501 for (unsigned i = 0; i < num_layers; i++) {
502 unsigned dst_z = dst_start + i;
503 unsigned src_z = src_start + i * src_z_step;
504
505 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
506 src_format.isl_format, src_format.swizzle,
507 &dst, dst_res->mipLevel, dst_z,
508 dst_format.isl_format,
509 anv_swizzle_for_render(dst_format.swizzle),
510 src_x0, src_y0, src_x1, src_y1,
511 dst_x0, dst_y0, dst_x1, dst_y1,
512 gl_filter, flip_x, flip_y);
513 }
514
515 }
516
517 blorp_batch_finish(&batch);
518 }
519
520 static enum isl_format
521 isl_format_for_size(unsigned size_B)
522 {
523 switch (size_B) {
524 case 1: return ISL_FORMAT_R8_UINT;
525 case 2: return ISL_FORMAT_R8G8_UINT;
526 case 4: return ISL_FORMAT_R8G8B8A8_UINT;
527 case 8: return ISL_FORMAT_R16G16B16A16_UINT;
528 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
529 default:
530 unreachable("Not a power-of-two format size");
531 }
532 }
533
534 static void
535 do_buffer_copy(struct blorp_batch *batch,
536 struct anv_bo *src, uint64_t src_offset,
537 struct anv_bo *dst, uint64_t dst_offset,
538 int width, int height, int block_size)
539 {
540 struct anv_device *device = batch->blorp->driver_ctx;
541
542 /* The actual format we pick doesn't matter as blorp will throw it away.
543 * The only thing that actually matters is the size.
544 */
545 enum isl_format format = isl_format_for_size(block_size);
546
547 struct isl_surf surf;
548 isl_surf_init(&device->isl_dev, &surf,
549 .dim = ISL_SURF_DIM_2D,
550 .format = format,
551 .width = width,
552 .height = height,
553 .depth = 1,
554 .levels = 1,
555 .array_len = 1,
556 .samples = 1,
557 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
558 ISL_SURF_USAGE_RENDER_TARGET_BIT,
559 .tiling_flags = ISL_TILING_LINEAR_BIT);
560 assert(surf.row_pitch == width * block_size);
561
562 struct blorp_surf src_blorp_surf = {
563 .surf = &surf,
564 .addr = {
565 .buffer = src,
566 .offset = src_offset,
567 },
568 };
569
570 struct blorp_surf dst_blorp_surf = {
571 .surf = &surf,
572 .addr = {
573 .buffer = dst,
574 .offset = dst_offset,
575 },
576 };
577
578 blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0,
579 0, 0, 0, 0, width, height);
580 }
581
582 /**
583 * Returns the greatest common divisor of a and b that is a power of two.
584 */
585 static inline uint64_t
586 gcd_pow2_u64(uint64_t a, uint64_t b)
587 {
588 assert(a > 0 || b > 0);
589
590 unsigned a_log2 = ffsll(a) - 1;
591 unsigned b_log2 = ffsll(b) - 1;
592
593 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
594 * case, the MIN2() will take the other one. If both are 0 then we will
595 * hit the assert above.
596 */
597 return 1 << MIN2(a_log2, b_log2);
598 }
599
600 /* This is maximum possible width/height our HW can handle */
601 #define MAX_SURFACE_DIM (1ull << 14)
602
603 void anv_CmdCopyBuffer(
604 VkCommandBuffer commandBuffer,
605 VkBuffer srcBuffer,
606 VkBuffer dstBuffer,
607 uint32_t regionCount,
608 const VkBufferCopy* pRegions)
609 {
610 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
611 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
612 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
613
614 struct blorp_batch batch;
615 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
616
617 for (unsigned r = 0; r < regionCount; r++) {
618 uint64_t src_offset = src_buffer->offset + pRegions[r].srcOffset;
619 uint64_t dst_offset = dst_buffer->offset + pRegions[r].dstOffset;
620 uint64_t copy_size = pRegions[r].size;
621
622 /* First, we compute the biggest format that can be used with the
623 * given offsets and size.
624 */
625 int bs = 16;
626 bs = gcd_pow2_u64(bs, src_offset);
627 bs = gcd_pow2_u64(bs, dst_offset);
628 bs = gcd_pow2_u64(bs, pRegions[r].size);
629
630 /* First, we make a bunch of max-sized copies */
631 uint64_t max_copy_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
632 while (copy_size >= max_copy_size) {
633 do_buffer_copy(&batch, src_buffer->bo, src_offset,
634 dst_buffer->bo, dst_offset,
635 MAX_SURFACE_DIM, MAX_SURFACE_DIM, bs);
636 copy_size -= max_copy_size;
637 src_offset += max_copy_size;
638 dst_offset += max_copy_size;
639 }
640
641 /* Now make a max-width copy */
642 uint64_t height = copy_size / (MAX_SURFACE_DIM * bs);
643 assert(height < MAX_SURFACE_DIM);
644 if (height != 0) {
645 uint64_t rect_copy_size = height * MAX_SURFACE_DIM * bs;
646 do_buffer_copy(&batch, src_buffer->bo, src_offset,
647 dst_buffer->bo, dst_offset,
648 MAX_SURFACE_DIM, height, bs);
649 copy_size -= rect_copy_size;
650 src_offset += rect_copy_size;
651 dst_offset += rect_copy_size;
652 }
653
654 /* Finally, make a small copy to finish it off */
655 if (copy_size != 0) {
656 do_buffer_copy(&batch, src_buffer->bo, src_offset,
657 dst_buffer->bo, dst_offset,
658 copy_size / bs, 1, bs);
659 }
660 }
661
662 blorp_batch_finish(&batch);
663 }
664
665 void anv_CmdUpdateBuffer(
666 VkCommandBuffer commandBuffer,
667 VkBuffer dstBuffer,
668 VkDeviceSize dstOffset,
669 VkDeviceSize dataSize,
670 const void* pData)
671 {
672 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
673 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
674
675 struct blorp_batch batch;
676 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
677
678 /* We can't quite grab a full block because the state stream needs a
679 * little data at the top to build its linked list.
680 */
681 const uint32_t max_update_size =
682 cmd_buffer->device->dynamic_state_block_pool.block_size - 64;
683
684 assert(max_update_size < MAX_SURFACE_DIM * 4);
685
686 while (dataSize) {
687 const uint32_t copy_size = MIN2(dataSize, max_update_size);
688
689 struct anv_state tmp_data =
690 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
691
692 memcpy(tmp_data.map, pData, copy_size);
693
694 int bs = 16;
695 bs = gcd_pow2_u64(bs, dstOffset);
696 bs = gcd_pow2_u64(bs, copy_size);
697
698 do_buffer_copy(&batch,
699 &cmd_buffer->device->dynamic_state_block_pool.bo,
700 tmp_data.offset,
701 dst_buffer->bo, dst_buffer->offset + dstOffset,
702 copy_size / bs, 1, bs);
703
704 dataSize -= copy_size;
705 dstOffset += copy_size;
706 pData = (void *)pData + copy_size;
707 }
708
709 blorp_batch_finish(&batch);
710 }
711
712 void anv_CmdFillBuffer(
713 VkCommandBuffer commandBuffer,
714 VkBuffer dstBuffer,
715 VkDeviceSize dstOffset,
716 VkDeviceSize fillSize,
717 uint32_t data)
718 {
719 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
720 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
721 struct blorp_surf surf;
722 struct isl_surf isl_surf;
723
724 struct blorp_batch batch;
725 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
726
727 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
728
729 /* From the Vulkan spec:
730 *
731 * "size is the number of bytes to fill, and must be either a multiple
732 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
733 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
734 * buffer is not a multiple of 4, then the nearest smaller multiple is
735 * used."
736 */
737 fillSize &= ~3ull;
738
739 /* First, we compute the biggest format that can be used with the
740 * given offsets and size.
741 */
742 int bs = 16;
743 bs = gcd_pow2_u64(bs, dstOffset);
744 bs = gcd_pow2_u64(bs, fillSize);
745 enum isl_format isl_format = isl_format_for_size(bs);
746
747 union isl_color_value color = {
748 .u32 = { data, data, data, data },
749 };
750
751 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
752 while (fillSize >= max_fill_size) {
753 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
754 dst_buffer, dstOffset,
755 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
756 MAX_SURFACE_DIM * bs, isl_format,
757 &surf, &isl_surf);
758
759 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
760 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
761 color, NULL);
762 fillSize -= max_fill_size;
763 dstOffset += max_fill_size;
764 }
765
766 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
767 assert(height < MAX_SURFACE_DIM);
768 if (height != 0) {
769 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
770 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
771 dst_buffer, dstOffset,
772 MAX_SURFACE_DIM, height,
773 MAX_SURFACE_DIM * bs, isl_format,
774 &surf, &isl_surf);
775
776 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
777 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
778 color, NULL);
779 fillSize -= rect_fill_size;
780 dstOffset += rect_fill_size;
781 }
782
783 if (fillSize != 0) {
784 const uint32_t width = fillSize / bs;
785 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
786 dst_buffer, dstOffset,
787 width, 1,
788 width * bs, isl_format,
789 &surf, &isl_surf);
790
791 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
792 0, 0, 1, 0, 0, width, 1,
793 color, NULL);
794 }
795
796 blorp_batch_finish(&batch);
797 }
798
799 void anv_CmdClearColorImage(
800 VkCommandBuffer commandBuffer,
801 VkImage _image,
802 VkImageLayout imageLayout,
803 const VkClearColorValue* pColor,
804 uint32_t rangeCount,
805 const VkImageSubresourceRange* pRanges)
806 {
807 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
808 ANV_FROM_HANDLE(anv_image, image, _image);
809
810 static const bool color_write_disable[4] = { false, false, false, false };
811
812 struct blorp_batch batch;
813 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
814
815 struct blorp_surf surf;
816 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
817 image->aux_usage, &surf);
818
819 for (unsigned r = 0; r < rangeCount; r++) {
820 if (pRanges[r].aspectMask == 0)
821 continue;
822
823 assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
824
825 struct anv_format src_format =
826 anv_get_format(&cmd_buffer->device->info, image->vk_format,
827 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
828
829 unsigned base_layer = pRanges[r].baseArrayLayer;
830 unsigned layer_count = pRanges[r].layerCount;
831
832 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
833 const unsigned level = pRanges[r].baseMipLevel + i;
834 const unsigned level_width = anv_minify(image->extent.width, level);
835 const unsigned level_height = anv_minify(image->extent.height, level);
836
837 if (image->type == VK_IMAGE_TYPE_3D) {
838 base_layer = 0;
839 layer_count = anv_minify(image->extent.depth, level);
840 }
841
842 blorp_clear(&batch, &surf,
843 src_format.isl_format, src_format.swizzle,
844 level, base_layer, layer_count,
845 0, 0, level_width, level_height,
846 vk_to_isl_color(*pColor), color_write_disable);
847 }
848 }
849
850 blorp_batch_finish(&batch);
851 }
852
853 void anv_CmdClearDepthStencilImage(
854 VkCommandBuffer commandBuffer,
855 VkImage image_h,
856 VkImageLayout imageLayout,
857 const VkClearDepthStencilValue* pDepthStencil,
858 uint32_t rangeCount,
859 const VkImageSubresourceRange* pRanges)
860 {
861 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
862 ANV_FROM_HANDLE(anv_image, image, image_h);
863
864 struct blorp_batch batch;
865 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
866
867 struct blorp_surf depth, stencil;
868 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
869 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
870 ISL_AUX_USAGE_NONE, &depth);
871 } else {
872 memset(&depth, 0, sizeof(depth));
873 }
874
875 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
876 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
877 ISL_AUX_USAGE_NONE, &stencil);
878 } else {
879 memset(&stencil, 0, sizeof(stencil));
880 }
881
882 for (unsigned r = 0; r < rangeCount; r++) {
883 if (pRanges[r].aspectMask == 0)
884 continue;
885
886 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
887 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
888
889 unsigned base_layer = pRanges[r].baseArrayLayer;
890 unsigned layer_count = pRanges[r].layerCount;
891
892 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
893 const unsigned level = pRanges[r].baseMipLevel + i;
894 const unsigned level_width = anv_minify(image->extent.width, level);
895 const unsigned level_height = anv_minify(image->extent.height, level);
896
897 if (image->type == VK_IMAGE_TYPE_3D)
898 layer_count = anv_minify(image->extent.depth, level);
899
900 blorp_clear_depth_stencil(&batch, &depth, &stencil,
901 level, base_layer, layer_count,
902 0, 0, level_width, level_height,
903 clear_depth, pDepthStencil->depth,
904 clear_stencil ? 0xff : 0,
905 pDepthStencil->stencil);
906 }
907 }
908
909 blorp_batch_finish(&batch);
910 }
911
912 struct anv_state
913 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
914 uint32_t num_entries,
915 uint32_t *state_offset)
916 {
917 struct anv_state bt_state =
918 anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
919 state_offset);
920 if (bt_state.map == NULL) {
921 /* We ran out of space. Grab a new binding table block. */
922 MAYBE_UNUSED VkResult result =
923 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
924 assert(result == VK_SUCCESS);
925
926 /* Re-emit state base addresses so we get the new surface state base
927 * address before we start emitting binding tables etc.
928 */
929 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
930
931 bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
932 state_offset);
933 assert(bt_state.map != NULL);
934 }
935
936 return bt_state;
937 }
938
939 static uint32_t
940 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
941 struct anv_state surface_state)
942 {
943 uint32_t state_offset;
944 struct anv_state bt_state =
945 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset);
946
947 uint32_t *bt_map = bt_state.map;
948 bt_map[0] = surface_state.offset + state_offset;
949
950 return bt_state.offset;
951 }
952
953 static void
954 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
955 struct blorp_batch *batch,
956 const VkClearAttachment *attachment,
957 uint32_t rectCount, const VkClearRect *pRects)
958 {
959 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
960 const uint32_t color_att = attachment->colorAttachment;
961 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
962
963 if (att_idx == VK_ATTACHMENT_UNUSED)
964 return;
965
966 struct anv_render_pass_attachment *pass_att =
967 &cmd_buffer->state.pass->attachments[att_idx];
968 struct anv_attachment_state *att_state =
969 &cmd_buffer->state.attachments[att_idx];
970
971 uint32_t binding_table =
972 binding_table_for_surface_state(cmd_buffer, att_state->color_rt_state);
973
974 union isl_color_value clear_color =
975 vk_to_isl_color(attachment->clearValue.color);
976
977 for (uint32_t r = 0; r < rectCount; ++r) {
978 const VkOffset2D offset = pRects[r].rect.offset;
979 const VkExtent2D extent = pRects[r].rect.extent;
980 blorp_clear_attachments(batch, binding_table,
981 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
982 pRects[r].baseArrayLayer,
983 pRects[r].layerCount,
984 offset.x, offset.y,
985 offset.x + extent.width, offset.y + extent.height,
986 true, clear_color, false, 0.0f, 0, 0);
987 }
988 }
989
990 static void
991 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
992 struct blorp_batch *batch,
993 const VkClearAttachment *attachment,
994 uint32_t rectCount, const VkClearRect *pRects)
995 {
996 static const union isl_color_value color_value = { .u32 = { 0, } };
997 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
998 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
999
1000 if (att_idx == VK_ATTACHMENT_UNUSED)
1001 return;
1002
1003 struct anv_render_pass_attachment *pass_att =
1004 &cmd_buffer->state.pass->attachments[att_idx];
1005
1006 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1007 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1008
1009 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1010 if (clear_depth) {
1011 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1012 pass_att->format,
1013 VK_IMAGE_ASPECT_DEPTH_BIT,
1014 VK_IMAGE_TILING_OPTIMAL);
1015 }
1016
1017 uint32_t binding_table =
1018 binding_table_for_surface_state(cmd_buffer,
1019 cmd_buffer->state.null_surface_state);
1020
1021 for (uint32_t r = 0; r < rectCount; ++r) {
1022 const VkOffset2D offset = pRects[r].rect.offset;
1023 const VkExtent2D extent = pRects[r].rect.extent;
1024 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1025 blorp_clear_attachments(batch, binding_table,
1026 depth_format, pass_att->samples,
1027 pRects[r].baseArrayLayer,
1028 pRects[r].layerCount,
1029 offset.x, offset.y,
1030 offset.x + extent.width, offset.y + extent.height,
1031 false, color_value,
1032 clear_depth, value.depth,
1033 clear_stencil ? 0xff : 0, value.stencil);
1034 }
1035 }
1036
1037 void anv_CmdClearAttachments(
1038 VkCommandBuffer commandBuffer,
1039 uint32_t attachmentCount,
1040 const VkClearAttachment* pAttachments,
1041 uint32_t rectCount,
1042 const VkClearRect* pRects)
1043 {
1044 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1045
1046 /* Because this gets called within a render pass, we tell blorp not to
1047 * trash our depth and stencil buffers.
1048 */
1049 struct blorp_batch batch;
1050 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1051 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1052
1053 for (uint32_t a = 0; a < attachmentCount; ++a) {
1054 if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
1055 clear_color_attachment(cmd_buffer, &batch,
1056 &pAttachments[a],
1057 rectCount, pRects);
1058 } else {
1059 clear_depth_stencil_attachment(cmd_buffer, &batch,
1060 &pAttachments[a],
1061 rectCount, pRects);
1062 }
1063 }
1064
1065 blorp_batch_finish(&batch);
1066 }
1067
1068 enum subpass_stage {
1069 SUBPASS_STAGE_LOAD,
1070 SUBPASS_STAGE_DRAW,
1071 SUBPASS_STAGE_RESOLVE,
1072 };
1073
1074 static bool
1075 attachment_needs_flush(struct anv_cmd_buffer *cmd_buffer,
1076 struct anv_render_pass_attachment *att,
1077 enum subpass_stage stage)
1078 {
1079 struct anv_render_pass *pass = cmd_buffer->state.pass;
1080 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1081
1082 /* We handle this subpass specially based on the current stage */
1083 enum anv_subpass_usage usage = att->subpass_usage[subpass_idx];
1084 switch (stage) {
1085 case SUBPASS_STAGE_LOAD:
1086 if (usage & (ANV_SUBPASS_USAGE_INPUT | ANV_SUBPASS_USAGE_RESOLVE_SRC))
1087 return true;
1088 break;
1089
1090 case SUBPASS_STAGE_DRAW:
1091 if (usage & ANV_SUBPASS_USAGE_RESOLVE_SRC)
1092 return true;
1093 break;
1094
1095 default:
1096 break;
1097 }
1098
1099 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1100 usage = att->subpass_usage[s];
1101
1102 /* If this attachment is going to be used as an input in this or any
1103 * future subpass, then we need to flush its cache and invalidate the
1104 * texture cache.
1105 */
1106 if (att->subpass_usage[s] & ANV_SUBPASS_USAGE_INPUT)
1107 return true;
1108
1109 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1110 /* We found another subpass that draws to this attachment. We'll
1111 * wait to resolve until then.
1112 */
1113 return false;
1114 }
1115 }
1116
1117 return false;
1118 }
1119
1120 static void
1121 anv_cmd_buffer_flush_attachments(struct anv_cmd_buffer *cmd_buffer,
1122 enum subpass_stage stage)
1123 {
1124 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1125 struct anv_render_pass *pass = cmd_buffer->state.pass;
1126
1127 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1128 uint32_t att = subpass->color_attachments[i].attachment;
1129 assert(att < pass->attachment_count);
1130 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1131 cmd_buffer->state.pending_pipe_bits |=
1132 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1133 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1134 }
1135 }
1136
1137 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1138 uint32_t att = subpass->depth_stencil_attachment.attachment;
1139 assert(att < pass->attachment_count);
1140 if (attachment_needs_flush(cmd_buffer, &pass->attachments[att], stage)) {
1141 cmd_buffer->state.pending_pipe_bits |=
1142 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1143 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
1144 }
1145 }
1146 }
1147
1148 static bool
1149 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1150 {
1151 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1152 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1153
1154 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1155 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1156 if (cmd_state->attachments[a].pending_clear_aspects) {
1157 return true;
1158 }
1159 }
1160
1161 if (ds != VK_ATTACHMENT_UNUSED &&
1162 cmd_state->attachments[ds].pending_clear_aspects) {
1163 return true;
1164 }
1165
1166 return false;
1167 }
1168
1169 void
1170 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1171 {
1172 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1173 const VkRect2D render_area = cmd_buffer->state.render_area;
1174
1175
1176 if (!subpass_needs_clear(cmd_buffer))
1177 return;
1178
1179 /* Because this gets called within a render pass, we tell blorp not to
1180 * trash our depth and stencil buffers.
1181 */
1182 struct blorp_batch batch;
1183 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1184 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1185
1186 VkClearRect clear_rect = {
1187 .rect = cmd_buffer->state.render_area,
1188 .baseArrayLayer = 0,
1189 .layerCount = cmd_buffer->state.framebuffer->layers,
1190 };
1191
1192 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1193 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1194 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1195 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1196
1197 if (!att_state->pending_clear_aspects)
1198 continue;
1199
1200 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1201
1202 struct anv_image_view *iview = fb->attachments[a];
1203 const struct anv_image *image = iview->image;
1204 struct blorp_surf surf;
1205 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1206 att_state->aux_usage, &surf);
1207
1208 if (att_state->fast_clear) {
1209 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1210
1211 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1212 *
1213 * "After Render target fast clear, pipe-control with color cache
1214 * write-flush must be issued before sending any DRAW commands on
1215 * that render target."
1216 *
1217 * This comment is a bit cryptic and doesn't really tell you what's
1218 * going or what's really needed. It appears that fast clear ops are
1219 * not properly synchronized with other drawing. This means that we
1220 * cannot have a fast clear operation in the pipe at the same time as
1221 * other regular drawing operations. We need to use a PIPE_CONTROL
1222 * to ensure that the contents of the previous draw hit the render
1223 * target before we resolve and then use a second PIPE_CONTROL after
1224 * the resolve to ensure that it is completed before any additional
1225 * drawing occurs.
1226 */
1227 cmd_buffer->state.pending_pipe_bits |=
1228 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1229
1230 blorp_fast_clear(&batch, &surf, iview->isl.format,
1231 iview->isl.base_level,
1232 iview->isl.base_array_layer, fb->layers,
1233 render_area.offset.x, render_area.offset.y,
1234 render_area.offset.x + render_area.extent.width,
1235 render_area.offset.y + render_area.extent.height);
1236
1237 cmd_buffer->state.pending_pipe_bits |=
1238 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1239 } else {
1240 blorp_clear(&batch, &surf, iview->isl.format,
1241 anv_swizzle_for_render(iview->isl.swizzle),
1242 iview->isl.base_level,
1243 iview->isl.base_array_layer, fb->layers,
1244 render_area.offset.x, render_area.offset.y,
1245 render_area.offset.x + render_area.extent.width,
1246 render_area.offset.y + render_area.extent.height,
1247 vk_to_isl_color(att_state->clear_value.color), NULL);
1248 }
1249
1250 att_state->pending_clear_aspects = 0;
1251 }
1252
1253 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1254
1255 if (ds != VK_ATTACHMENT_UNUSED &&
1256 cmd_state->attachments[ds].pending_clear_aspects) {
1257
1258 VkClearAttachment clear_att = {
1259 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1260 .clearValue = cmd_state->attachments[ds].clear_value,
1261 };
1262
1263
1264 const uint8_t gen = cmd_buffer->device->info.gen;
1265 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1266 ISL_AUX_USAGE_HIZ;
1267 const struct anv_image_view *iview = fb->attachments[ds];
1268
1269 if (clear_with_hiz) {
1270 const bool clear_depth = clear_att.aspectMask &
1271 VK_IMAGE_ASPECT_DEPTH_BIT;
1272 const bool clear_stencil = clear_att.aspectMask &
1273 VK_IMAGE_ASPECT_STENCIL_BIT;
1274
1275 /* Check against restrictions for depth buffer clearing. A great GPU
1276 * performance benefit isn't expected when using the HZ sequence for
1277 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1278 * a stencil clear in addition to using the BLORP-fallback for depth.
1279 */
1280 if (clear_depth) {
1281 if (!blorp_can_hiz_clear_depth(gen, iview->isl.format,
1282 iview->image->samples,
1283 render_area.offset.x,
1284 render_area.offset.y,
1285 render_area.offset.x +
1286 render_area.extent.width,
1287 render_area.offset.y +
1288 render_area.extent.height)) {
1289 clear_with_hiz = false;
1290 } else if (clear_att.clearValue.depthStencil.depth !=
1291 ANV_HZ_FC_VAL) {
1292 /* Don't enable fast depth clears for any color not equal to
1293 * ANV_HZ_FC_VAL.
1294 */
1295 clear_with_hiz = false;
1296 } else if (gen == 8 &&
1297 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1298 iview->aspect_mask,
1299 iview->image->samples)) {
1300 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1301 * fast-cleared portion of a HiZ buffer. Testing has revealed
1302 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1303 * not support this feature at all.
1304 */
1305 clear_with_hiz = false;
1306 }
1307 }
1308
1309 if (clear_with_hiz) {
1310 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1311 render_area.offset.x,
1312 render_area.offset.y,
1313 render_area.offset.x +
1314 render_area.extent.width,
1315 render_area.offset.y +
1316 render_area.extent.height,
1317 clear_depth, clear_stencil,
1318 clear_att.clearValue.
1319 depthStencil.stencil);
1320 }
1321 }
1322
1323 if (!clear_with_hiz) {
1324 clear_depth_stencil_attachment(cmd_buffer, &batch,
1325 &clear_att, 1, &clear_rect);
1326 }
1327
1328 cmd_state->attachments[ds].pending_clear_aspects = 0;
1329 }
1330
1331 blorp_batch_finish(&batch);
1332
1333 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_LOAD);
1334 }
1335
1336 static void
1337 resolve_image(struct blorp_batch *batch,
1338 const struct anv_image *src_image,
1339 uint32_t src_level, uint32_t src_layer,
1340 const struct anv_image *dst_image,
1341 uint32_t dst_level, uint32_t dst_layer,
1342 VkImageAspectFlags aspect_mask,
1343 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1344 uint32_t width, uint32_t height)
1345 {
1346 assert(src_image->type == VK_IMAGE_TYPE_2D);
1347 assert(src_image->samples > 1);
1348 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1349 assert(dst_image->samples == 1);
1350
1351 uint32_t a;
1352 for_each_bit(a, aspect_mask) {
1353 VkImageAspectFlagBits aspect = 1 << a;
1354
1355 struct blorp_surf src_surf, dst_surf;
1356 get_blorp_surf_for_anv_image(src_image, aspect,
1357 src_image->aux_usage, &src_surf);
1358 get_blorp_surf_for_anv_image(dst_image, aspect,
1359 dst_image->aux_usage, &dst_surf);
1360
1361 blorp_blit(batch,
1362 &src_surf, src_level, src_layer,
1363 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1364 &dst_surf, dst_level, dst_layer,
1365 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1366 src_x, src_y, src_x + width, src_y + height,
1367 dst_x, dst_y, dst_x + width, dst_y + height,
1368 0x2600 /* GL_NEAREST */, false, false);
1369 }
1370 }
1371
1372 void anv_CmdResolveImage(
1373 VkCommandBuffer commandBuffer,
1374 VkImage srcImage,
1375 VkImageLayout srcImageLayout,
1376 VkImage dstImage,
1377 VkImageLayout dstImageLayout,
1378 uint32_t regionCount,
1379 const VkImageResolve* pRegions)
1380 {
1381 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1382 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1383 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1384
1385 struct blorp_batch batch;
1386 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1387
1388 for (uint32_t r = 0; r < regionCount; r++) {
1389 assert(pRegions[r].srcSubresource.aspectMask ==
1390 pRegions[r].dstSubresource.aspectMask);
1391 assert(pRegions[r].srcSubresource.layerCount ==
1392 pRegions[r].dstSubresource.layerCount);
1393
1394 const uint32_t layer_count = pRegions[r].dstSubresource.layerCount;
1395
1396 for (uint32_t layer = 0; layer < layer_count; layer++) {
1397 resolve_image(&batch,
1398 src_image, pRegions[r].srcSubresource.mipLevel,
1399 pRegions[r].srcSubresource.baseArrayLayer + layer,
1400 dst_image, pRegions[r].dstSubresource.mipLevel,
1401 pRegions[r].dstSubresource.baseArrayLayer + layer,
1402 pRegions[r].dstSubresource.aspectMask,
1403 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1404 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1405 pRegions[r].extent.width, pRegions[r].extent.height);
1406 }
1407 }
1408
1409 blorp_batch_finish(&batch);
1410 }
1411
1412 static void
1413 ccs_resolve_attachment(struct anv_cmd_buffer *cmd_buffer,
1414 struct blorp_batch *batch,
1415 uint32_t att)
1416 {
1417 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1418 struct anv_attachment_state *att_state =
1419 &cmd_buffer->state.attachments[att];
1420
1421 if (att_state->aux_usage == ISL_AUX_USAGE_NONE ||
1422 att_state->aux_usage == ISL_AUX_USAGE_MCS)
1423 return; /* Nothing to resolve */
1424
1425 assert(att_state->aux_usage == ISL_AUX_USAGE_CCS_E ||
1426 att_state->aux_usage == ISL_AUX_USAGE_CCS_D);
1427
1428 struct anv_render_pass *pass = cmd_buffer->state.pass;
1429 const uint32_t subpass_idx = anv_get_subpass_id(&cmd_buffer->state);
1430
1431 /* Scan forward to see what all ways this attachment will be used.
1432 * Ideally, we would like to resolve in the same subpass as the last write
1433 * of a particular attachment. That way we only resolve once but it's
1434 * still hot in the cache.
1435 */
1436 bool found_draw = false;
1437 enum anv_subpass_usage usage = 0;
1438 for (uint32_t s = subpass_idx + 1; s < pass->subpass_count; s++) {
1439 usage |= pass->attachments[att].subpass_usage[s];
1440
1441 if (usage & (ANV_SUBPASS_USAGE_DRAW | ANV_SUBPASS_USAGE_RESOLVE_DST)) {
1442 /* We found another subpass that draws to this attachment. We'll
1443 * wait to resolve until then.
1444 */
1445 found_draw = true;
1446 break;
1447 }
1448 }
1449
1450 struct anv_image_view *iview = fb->attachments[att];
1451 const struct anv_image *image = iview->image;
1452 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1453
1454 enum blorp_fast_clear_op resolve_op = BLORP_FAST_CLEAR_OP_NONE;
1455 if (!found_draw) {
1456 /* This is the last subpass that writes to this attachment so we need to
1457 * resolve here. Ideally, we would like to only resolve if the storeOp
1458 * is set to VK_ATTACHMENT_STORE_OP_STORE. However, we need to ensure
1459 * that the CCS bits are set to "resolved" because there may be copy or
1460 * blit operations (which may ignore CCS) between now and the next time
1461 * we render and we need to ensure that anything they write will be
1462 * respected in the next render. Unfortunately, the hardware does not
1463 * provide us with any sort of "invalidate" pass that sets the CCS to
1464 * "resolved" without writing to the render target.
1465 */
1466 if (iview->image->aux_usage != ISL_AUX_USAGE_CCS_E) {
1467 /* The image destination surface doesn't support compression outside
1468 * the render pass. We need a full resolve.
1469 */
1470 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1471 } else if (att_state->fast_clear) {
1472 /* We don't know what to do with clear colors outside the render
1473 * pass. We need a partial resolve. Only transparent black is
1474 * built into the surface state object and thus no resolve is
1475 * required for this case.
1476 */
1477 if (att_state->clear_value.color.uint32[0] ||
1478 att_state->clear_value.color.uint32[1] ||
1479 att_state->clear_value.color.uint32[2] ||
1480 att_state->clear_value.color.uint32[3])
1481 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1482 } else {
1483 /* The image "natively" supports all the compression we care about
1484 * and we don't need to resolve at all. If this is the case, we also
1485 * don't need to resolve for any of the input attachment cases below.
1486 */
1487 }
1488 } else if (usage & ANV_SUBPASS_USAGE_INPUT) {
1489 /* Input attachments are clear-color aware so, at least on Sky Lake, we
1490 * can frequently sample from them with no resolves at all.
1491 */
1492 if (att_state->aux_usage != att_state->input_aux_usage) {
1493 assert(att_state->input_aux_usage == ISL_AUX_USAGE_NONE);
1494 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_FULL;
1495 } else if (!att_state->clear_color_is_zero_one) {
1496 /* Sky Lake PRM, Vol. 2d, RENDER_SURFACE_STATE::Red Clear Color:
1497 *
1498 * "If Number of Multisamples is MULTISAMPLECOUNT_1 AND if this RT
1499 * is fast cleared with non-0/1 clear value, this RT must be
1500 * partially resolved (refer to Partial Resolve operation) before
1501 * binding this surface to Sampler."
1502 */
1503 resolve_op = BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL;
1504 }
1505 }
1506
1507 if (resolve_op == BLORP_FAST_CLEAR_OP_NONE)
1508 return;
1509
1510 struct blorp_surf surf;
1511 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1512 att_state->aux_usage, &surf);
1513 if (att_state->fast_clear)
1514 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1515
1516 /* From the Sky Lake PRM Vol. 7, "Render Target Resolve":
1517 *
1518 * "When performing a render target resolve, PIPE_CONTROL with end of
1519 * pipe sync must be delivered."
1520 *
1521 * This comment is a bit cryptic and doesn't really tell you what's going
1522 * or what's really needed. It appears that fast clear ops are not
1523 * properly synchronized with other drawing. We need to use a PIPE_CONTROL
1524 * to ensure that the contents of the previous draw hit the render target
1525 * before we resolve and then use a second PIPE_CONTROL after the resolve
1526 * to ensure that it is completed before any additional drawing occurs.
1527 */
1528 cmd_buffer->state.pending_pipe_bits |=
1529 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1530
1531 for (uint32_t layer = 0; layer < fb->layers; layer++) {
1532 blorp_ccs_resolve(batch, &surf,
1533 iview->isl.base_level,
1534 iview->isl.base_array_layer + layer,
1535 iview->isl.format, resolve_op);
1536 }
1537
1538 cmd_buffer->state.pending_pipe_bits |=
1539 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1540
1541 /* Once we've done any sort of resolve, we're no longer fast-cleared */
1542 att_state->fast_clear = false;
1543 if (att_state->aux_usage == ISL_AUX_USAGE_CCS_D)
1544 att_state->aux_usage = ISL_AUX_USAGE_NONE;
1545 }
1546
1547 void
1548 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1549 {
1550 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1551 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1552
1553
1554 struct blorp_batch batch;
1555 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1556
1557 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1558 ccs_resolve_attachment(cmd_buffer, &batch,
1559 subpass->color_attachments[i].attachment);
1560 }
1561
1562 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_DRAW);
1563
1564 if (subpass->has_resolve) {
1565 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1566 uint32_t src_att = subpass->color_attachments[i].attachment;
1567 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1568
1569 if (dst_att == VK_ATTACHMENT_UNUSED)
1570 continue;
1571
1572 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1573 /* From the Vulkan 1.0 spec:
1574 *
1575 * If the first use of an attachment in a render pass is as a
1576 * resolve attachment, then the loadOp is effectively ignored
1577 * as the resolve is guaranteed to overwrite all pixels in the
1578 * render area.
1579 */
1580 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1581 }
1582
1583 struct anv_image_view *src_iview = fb->attachments[src_att];
1584 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1585
1586 const VkRect2D render_area = cmd_buffer->state.render_area;
1587
1588 assert(src_iview->aspect_mask == dst_iview->aspect_mask);
1589 resolve_image(&batch, src_iview->image,
1590 src_iview->isl.base_level,
1591 src_iview->isl.base_array_layer,
1592 dst_iview->image,
1593 dst_iview->isl.base_level,
1594 dst_iview->isl.base_array_layer,
1595 src_iview->aspect_mask,
1596 render_area.offset.x, render_area.offset.y,
1597 render_area.offset.x, render_area.offset.y,
1598 render_area.extent.width, render_area.extent.height);
1599
1600 ccs_resolve_attachment(cmd_buffer, &batch, dst_att);
1601 }
1602
1603 anv_cmd_buffer_flush_attachments(cmd_buffer, SUBPASS_STAGE_RESOLVE);
1604 }
1605
1606 blorp_batch_finish(&batch);
1607 }
1608
1609 void
1610 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1611 const struct anv_image *image,
1612 enum blorp_hiz_op op)
1613 {
1614 assert(image);
1615
1616 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1617 * don't perform such a resolve on gens that don't support it.
1618 */
1619 if (cmd_buffer->device->info.gen < 8 ||
1620 image->aux_usage != ISL_AUX_USAGE_HIZ)
1621 return;
1622
1623 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1624 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1625
1626 struct blorp_batch batch;
1627 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1628
1629 struct blorp_surf surf;
1630 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1631 ISL_AUX_USAGE_NONE, &surf);
1632
1633 /* Manually add the aux HiZ surf */
1634 surf.aux_surf = &image->aux_surface.isl,
1635 surf.aux_addr = (struct blorp_address) {
1636 .buffer = image->bo,
1637 .offset = image->offset + image->aux_surface.offset,
1638 };
1639 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1640
1641 surf.clear_color.u32[0] = (uint32_t) ANV_HZ_FC_VAL;
1642
1643 blorp_gen6_hiz_op(&batch, &surf, 0, 0, op);
1644 blorp_batch_finish(&batch);
1645 }