anv: enable multiple planes per image/imageView
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.mocs.tex = device->default_mocs;
96 device->blorp.mocs.rb = device->default_mocs;
97 device->blorp.mocs.vb = device->default_mocs;
98 device->blorp.lookup_shader = lookup_blorp_shader;
99 device->blorp.upload_shader = upload_blorp_shader;
100 switch (device->info.gen) {
101 case 7:
102 if (device->info.is_haswell) {
103 device->blorp.exec = gen75_blorp_exec;
104 } else {
105 device->blorp.exec = gen7_blorp_exec;
106 }
107 break;
108 case 8:
109 device->blorp.exec = gen8_blorp_exec;
110 break;
111 case 9:
112 device->blorp.exec = gen9_blorp_exec;
113 break;
114 case 10:
115 device->blorp.exec = gen10_blorp_exec;
116 break;
117 default:
118 unreachable("Unknown hardware generation");
119 }
120 }
121
122 void
123 anv_device_finish_blorp(struct anv_device *device)
124 {
125 blorp_finish(&device->blorp);
126 anv_pipeline_cache_finish(&device->blorp_shader_cache);
127 }
128
129 static void
130 get_blorp_surf_for_anv_buffer(struct anv_device *device,
131 struct anv_buffer *buffer, uint64_t offset,
132 uint32_t width, uint32_t height,
133 uint32_t row_pitch, enum isl_format format,
134 struct blorp_surf *blorp_surf,
135 struct isl_surf *isl_surf)
136 {
137 const struct isl_format_layout *fmtl =
138 isl_format_get_layout(format);
139 bool ok UNUSED;
140
141 /* ASTC is the only format which doesn't support linear layouts.
142 * Create an equivalently sized surface with ISL to get around this.
143 */
144 if (fmtl->txc == ISL_TXC_ASTC) {
145 /* Use an equivalently sized format */
146 format = ISL_FORMAT_R32G32B32A32_UINT;
147 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
148
149 /* Shrink the dimensions for the new format */
150 width = DIV_ROUND_UP(width, fmtl->bw);
151 height = DIV_ROUND_UP(height, fmtl->bh);
152 }
153
154 *blorp_surf = (struct blorp_surf) {
155 .surf = isl_surf,
156 .addr = {
157 .buffer = buffer->bo,
158 .offset = buffer->offset + offset,
159 },
160 };
161
162 ok = isl_surf_init(&device->isl_dev, isl_surf,
163 .dim = ISL_SURF_DIM_2D,
164 .format = format,
165 .width = width,
166 .height = height,
167 .depth = 1,
168 .levels = 1,
169 .array_len = 1,
170 .samples = 1,
171 .row_pitch = row_pitch,
172 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
173 ISL_SURF_USAGE_RENDER_TARGET_BIT,
174 .tiling_flags = ISL_TILING_LINEAR_BIT);
175 assert(ok);
176 }
177
178 #define ANV_AUX_USAGE_DEFAULT ((enum isl_aux_usage)0xff)
179
180 static void
181 get_blorp_surf_for_anv_image(const struct anv_image *image,
182 VkImageAspectFlags aspect,
183 enum isl_aux_usage aux_usage,
184 struct blorp_surf *blorp_surf)
185 {
186 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
187
188 if (aux_usage == ANV_AUX_USAGE_DEFAULT)
189 aux_usage = image->planes[plane].aux_usage;
190
191 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
192 aux_usage == ISL_AUX_USAGE_HIZ)
193 aux_usage = ISL_AUX_USAGE_NONE;
194
195 const struct anv_surface *surface = &image->planes[plane].surface;
196 *blorp_surf = (struct blorp_surf) {
197 .surf = &surface->isl,
198 .addr = {
199 .buffer = image->planes[plane].bo,
200 .offset = image->planes[plane].bo_offset + surface->offset,
201 },
202 };
203
204 if (aux_usage != ISL_AUX_USAGE_NONE) {
205 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
206 blorp_surf->aux_surf = &aux_surface->isl,
207 blorp_surf->aux_addr = (struct blorp_address) {
208 .buffer = image->planes[plane].bo,
209 .offset = image->planes[plane].bo_offset + aux_surface->offset,
210 };
211 blorp_surf->aux_usage = aux_usage;
212 }
213 }
214
215 void anv_CmdCopyImage(
216 VkCommandBuffer commandBuffer,
217 VkImage srcImage,
218 VkImageLayout srcImageLayout,
219 VkImage dstImage,
220 VkImageLayout dstImageLayout,
221 uint32_t regionCount,
222 const VkImageCopy* pRegions)
223 {
224 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
225 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
226 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
227
228 struct blorp_batch batch;
229 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
230
231 for (unsigned r = 0; r < regionCount; r++) {
232 VkOffset3D srcOffset =
233 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
234 VkOffset3D dstOffset =
235 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
236 VkExtent3D extent =
237 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
238
239 unsigned dst_base_layer, layer_count;
240 if (dst_image->type == VK_IMAGE_TYPE_3D) {
241 dst_base_layer = pRegions[r].dstOffset.z;
242 layer_count = pRegions[r].extent.depth;
243 } else {
244 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
245 layer_count =
246 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
247 }
248
249 unsigned src_base_layer;
250 if (src_image->type == VK_IMAGE_TYPE_3D) {
251 src_base_layer = pRegions[r].srcOffset.z;
252 } else {
253 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
254 assert(layer_count ==
255 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
256 }
257
258 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
259 dst_mask = pRegions[r].dstSubresource.aspectMask;
260
261 assert(anv_image_aspects_compatible(src_mask, dst_mask));
262
263 if (_mesa_bitcount(src_mask) > 1) {
264 uint32_t aspect_bit;
265 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
266 struct blorp_surf src_surf, dst_surf;
267 get_blorp_surf_for_anv_image(src_image, 1UL << aspect_bit,
268 ANV_AUX_USAGE_DEFAULT, &src_surf);
269 get_blorp_surf_for_anv_image(dst_image, 1UL << aspect_bit,
270 ANV_AUX_USAGE_DEFAULT, &dst_surf);
271
272 for (unsigned i = 0; i < layer_count; i++) {
273 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
274 src_base_layer + i,
275 &dst_surf, pRegions[r].dstSubresource.mipLevel,
276 dst_base_layer + i,
277 srcOffset.x, srcOffset.y,
278 dstOffset.x, dstOffset.y,
279 extent.width, extent.height);
280 }
281 }
282 } else {
283 struct blorp_surf src_surf, dst_surf;
284 get_blorp_surf_for_anv_image(src_image, src_mask,
285 ANV_AUX_USAGE_DEFAULT, &src_surf);
286 get_blorp_surf_for_anv_image(dst_image, dst_mask,
287 ANV_AUX_USAGE_DEFAULT, &dst_surf);
288
289 for (unsigned i = 0; i < layer_count; i++) {
290 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
291 src_base_layer + i,
292 &dst_surf, pRegions[r].dstSubresource.mipLevel,
293 dst_base_layer + i,
294 srcOffset.x, srcOffset.y,
295 dstOffset.x, dstOffset.y,
296 extent.width, extent.height);
297 }
298 }
299 }
300
301 blorp_batch_finish(&batch);
302 }
303
304 static void
305 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
306 struct anv_buffer *anv_buffer,
307 struct anv_image *anv_image,
308 uint32_t regionCount,
309 const VkBufferImageCopy* pRegions,
310 bool buffer_to_image)
311 {
312 struct blorp_batch batch;
313 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
314
315 struct {
316 struct blorp_surf surf;
317 uint32_t level;
318 VkOffset3D offset;
319 } image, buffer, *src, *dst;
320
321 buffer.level = 0;
322 buffer.offset = (VkOffset3D) { 0, 0, 0 };
323
324 if (buffer_to_image) {
325 src = &buffer;
326 dst = &image;
327 } else {
328 src = &image;
329 dst = &buffer;
330 }
331
332 for (unsigned r = 0; r < regionCount; r++) {
333 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
334
335 get_blorp_surf_for_anv_image(anv_image, aspect,
336 ANV_AUX_USAGE_DEFAULT, &image.surf);
337 image.offset =
338 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
339 image.level = pRegions[r].imageSubresource.mipLevel;
340
341 VkExtent3D extent =
342 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
343 if (anv_image->type != VK_IMAGE_TYPE_3D) {
344 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
345 extent.depth =
346 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
347 }
348
349 const enum isl_format buffer_format =
350 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
351 aspect, VK_IMAGE_TILING_LINEAR);
352
353 const VkExtent3D bufferImageExtent = {
354 .width = pRegions[r].bufferRowLength ?
355 pRegions[r].bufferRowLength : extent.width,
356 .height = pRegions[r].bufferImageHeight ?
357 pRegions[r].bufferImageHeight : extent.height,
358 };
359
360 const struct isl_format_layout *buffer_fmtl =
361 isl_format_get_layout(buffer_format);
362
363 const uint32_t buffer_row_pitch =
364 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
365 (buffer_fmtl->bpb / 8);
366
367 const uint32_t buffer_layer_stride =
368 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
369 buffer_row_pitch;
370
371 struct isl_surf buffer_isl_surf;
372 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
373 anv_buffer, pRegions[r].bufferOffset,
374 extent.width, extent.height,
375 buffer_row_pitch, buffer_format,
376 &buffer.surf, &buffer_isl_surf);
377
378 for (unsigned z = 0; z < extent.depth; z++) {
379 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
380 &dst->surf, dst->level, dst->offset.z,
381 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
382 extent.width, extent.height);
383
384 image.offset.z++;
385 buffer.surf.addr.offset += buffer_layer_stride;
386 }
387 }
388
389 blorp_batch_finish(&batch);
390 }
391
392 void anv_CmdCopyBufferToImage(
393 VkCommandBuffer commandBuffer,
394 VkBuffer srcBuffer,
395 VkImage dstImage,
396 VkImageLayout dstImageLayout,
397 uint32_t regionCount,
398 const VkBufferImageCopy* pRegions)
399 {
400 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
401 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
402 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
403
404 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
405 regionCount, pRegions, true);
406 }
407
408 void anv_CmdCopyImageToBuffer(
409 VkCommandBuffer commandBuffer,
410 VkImage srcImage,
411 VkImageLayout srcImageLayout,
412 VkBuffer dstBuffer,
413 uint32_t regionCount,
414 const VkBufferImageCopy* pRegions)
415 {
416 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
417 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
418 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
419
420 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
421 regionCount, pRegions, false);
422 }
423
424 static bool
425 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
426 {
427 bool flip = false;
428 if (*src0 > *src1) {
429 unsigned tmp = *src0;
430 *src0 = *src1;
431 *src1 = tmp;
432 flip = !flip;
433 }
434
435 if (*dst0 > *dst1) {
436 unsigned tmp = *dst0;
437 *dst0 = *dst1;
438 *dst1 = tmp;
439 flip = !flip;
440 }
441
442 return flip;
443 }
444
445 void anv_CmdBlitImage(
446 VkCommandBuffer commandBuffer,
447 VkImage srcImage,
448 VkImageLayout srcImageLayout,
449 VkImage dstImage,
450 VkImageLayout dstImageLayout,
451 uint32_t regionCount,
452 const VkImageBlit* pRegions,
453 VkFilter filter)
454
455 {
456 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
457 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
458 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
459
460 struct blorp_surf src, dst;
461
462 uint32_t gl_filter;
463 switch (filter) {
464 case VK_FILTER_NEAREST:
465 gl_filter = 0x2600; /* GL_NEAREST */
466 break;
467 case VK_FILTER_LINEAR:
468 gl_filter = 0x2601; /* GL_LINEAR */
469 break;
470 default:
471 unreachable("Invalid filter");
472 }
473
474 struct blorp_batch batch;
475 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
476
477 for (unsigned r = 0; r < regionCount; r++) {
478 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
479 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
480
481 get_blorp_surf_for_anv_image(src_image, src_res->aspectMask,
482 ANV_AUX_USAGE_DEFAULT, &src);
483 get_blorp_surf_for_anv_image(dst_image, dst_res->aspectMask,
484 ANV_AUX_USAGE_DEFAULT, &dst);
485
486 struct anv_format_plane src_format =
487 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
488 src_res->aspectMask, src_image->tiling);
489 struct anv_format_plane dst_format =
490 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
491 dst_res->aspectMask, dst_image->tiling);
492
493 unsigned dst_start, dst_end;
494 if (dst_image->type == VK_IMAGE_TYPE_3D) {
495 assert(dst_res->baseArrayLayer == 0);
496 dst_start = pRegions[r].dstOffsets[0].z;
497 dst_end = pRegions[r].dstOffsets[1].z;
498 } else {
499 dst_start = dst_res->baseArrayLayer;
500 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
501 }
502
503 unsigned src_start, src_end;
504 if (src_image->type == VK_IMAGE_TYPE_3D) {
505 assert(src_res->baseArrayLayer == 0);
506 src_start = pRegions[r].srcOffsets[0].z;
507 src_end = pRegions[r].srcOffsets[1].z;
508 } else {
509 src_start = src_res->baseArrayLayer;
510 src_end = src_start + anv_get_layerCount(src_image, src_res);
511 }
512
513 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
514 float src_z_step = (float)(src_end + 1 - src_start) /
515 (float)(dst_end + 1 - dst_start);
516
517 if (flip_z) {
518 src_start = src_end;
519 src_z_step *= -1;
520 }
521
522 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
523 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
524 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
525 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
526 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
527
528 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
529 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
530 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
531 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
532 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
533
534 const unsigned num_layers = dst_end - dst_start;
535 for (unsigned i = 0; i < num_layers; i++) {
536 unsigned dst_z = dst_start + i;
537 unsigned src_z = src_start + i * src_z_step;
538
539 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
540 src_format.isl_format, src_format.swizzle,
541 &dst, dst_res->mipLevel, dst_z,
542 dst_format.isl_format,
543 anv_swizzle_for_render(dst_format.swizzle),
544 src_x0, src_y0, src_x1, src_y1,
545 dst_x0, dst_y0, dst_x1, dst_y1,
546 gl_filter, flip_x, flip_y);
547 }
548
549 }
550
551 blorp_batch_finish(&batch);
552 }
553
554 static enum isl_format
555 isl_format_for_size(unsigned size_B)
556 {
557 switch (size_B) {
558 case 4: return ISL_FORMAT_R32_UINT;
559 case 8: return ISL_FORMAT_R32G32_UINT;
560 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
561 default:
562 unreachable("Not a power-of-two format size");
563 }
564 }
565
566 /**
567 * Returns the greatest common divisor of a and b that is a power of two.
568 */
569 static uint64_t
570 gcd_pow2_u64(uint64_t a, uint64_t b)
571 {
572 assert(a > 0 || b > 0);
573
574 unsigned a_log2 = ffsll(a) - 1;
575 unsigned b_log2 = ffsll(b) - 1;
576
577 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
578 * case, the MIN2() will take the other one. If both are 0 then we will
579 * hit the assert above.
580 */
581 return 1 << MIN2(a_log2, b_log2);
582 }
583
584 /* This is maximum possible width/height our HW can handle */
585 #define MAX_SURFACE_DIM (1ull << 14)
586
587 void anv_CmdCopyBuffer(
588 VkCommandBuffer commandBuffer,
589 VkBuffer srcBuffer,
590 VkBuffer dstBuffer,
591 uint32_t regionCount,
592 const VkBufferCopy* pRegions)
593 {
594 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
595 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
596 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
597
598 struct blorp_batch batch;
599 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
600
601 for (unsigned r = 0; r < regionCount; r++) {
602 struct blorp_address src = {
603 .buffer = src_buffer->bo,
604 .offset = src_buffer->offset + pRegions[r].srcOffset,
605 };
606 struct blorp_address dst = {
607 .buffer = dst_buffer->bo,
608 .offset = dst_buffer->offset + pRegions[r].dstOffset,
609 };
610
611 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
612 }
613
614 blorp_batch_finish(&batch);
615 }
616
617 void anv_CmdUpdateBuffer(
618 VkCommandBuffer commandBuffer,
619 VkBuffer dstBuffer,
620 VkDeviceSize dstOffset,
621 VkDeviceSize dataSize,
622 const void* pData)
623 {
624 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
625 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
626
627 struct blorp_batch batch;
628 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
629
630 /* We can't quite grab a full block because the state stream needs a
631 * little data at the top to build its linked list.
632 */
633 const uint32_t max_update_size =
634 cmd_buffer->device->dynamic_state_pool.block_size - 64;
635
636 assert(max_update_size < MAX_SURFACE_DIM * 4);
637
638 /* We're about to read data that was written from the CPU. Flush the
639 * texture cache so we don't get anything stale.
640 */
641 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
642
643 while (dataSize) {
644 const uint32_t copy_size = MIN2(dataSize, max_update_size);
645
646 struct anv_state tmp_data =
647 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
648
649 memcpy(tmp_data.map, pData, copy_size);
650
651 anv_state_flush(cmd_buffer->device, tmp_data);
652
653 struct blorp_address src = {
654 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
655 .offset = tmp_data.offset,
656 };
657 struct blorp_address dst = {
658 .buffer = dst_buffer->bo,
659 .offset = dst_buffer->offset + dstOffset,
660 };
661
662 blorp_buffer_copy(&batch, src, dst, copy_size);
663
664 dataSize -= copy_size;
665 dstOffset += copy_size;
666 pData = (void *)pData + copy_size;
667 }
668
669 blorp_batch_finish(&batch);
670 }
671
672 void anv_CmdFillBuffer(
673 VkCommandBuffer commandBuffer,
674 VkBuffer dstBuffer,
675 VkDeviceSize dstOffset,
676 VkDeviceSize fillSize,
677 uint32_t data)
678 {
679 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
680 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
681 struct blorp_surf surf;
682 struct isl_surf isl_surf;
683
684 struct blorp_batch batch;
685 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
686
687 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
688
689 /* From the Vulkan spec:
690 *
691 * "size is the number of bytes to fill, and must be either a multiple
692 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
693 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
694 * buffer is not a multiple of 4, then the nearest smaller multiple is
695 * used."
696 */
697 fillSize &= ~3ull;
698
699 /* First, we compute the biggest format that can be used with the
700 * given offsets and size.
701 */
702 int bs = 16;
703 bs = gcd_pow2_u64(bs, dstOffset);
704 bs = gcd_pow2_u64(bs, fillSize);
705 enum isl_format isl_format = isl_format_for_size(bs);
706
707 union isl_color_value color = {
708 .u32 = { data, data, data, data },
709 };
710
711 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
712 while (fillSize >= max_fill_size) {
713 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
714 dst_buffer, dstOffset,
715 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
716 MAX_SURFACE_DIM * bs, isl_format,
717 &surf, &isl_surf);
718
719 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
720 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
721 color, NULL);
722 fillSize -= max_fill_size;
723 dstOffset += max_fill_size;
724 }
725
726 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
727 assert(height < MAX_SURFACE_DIM);
728 if (height != 0) {
729 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
730 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
731 dst_buffer, dstOffset,
732 MAX_SURFACE_DIM, height,
733 MAX_SURFACE_DIM * bs, isl_format,
734 &surf, &isl_surf);
735
736 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
737 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
738 color, NULL);
739 fillSize -= rect_fill_size;
740 dstOffset += rect_fill_size;
741 }
742
743 if (fillSize != 0) {
744 const uint32_t width = fillSize / bs;
745 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
746 dst_buffer, dstOffset,
747 width, 1,
748 width * bs, isl_format,
749 &surf, &isl_surf);
750
751 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
752 0, 0, 1, 0, 0, width, 1,
753 color, NULL);
754 }
755
756 blorp_batch_finish(&batch);
757 }
758
759 void anv_CmdClearColorImage(
760 VkCommandBuffer commandBuffer,
761 VkImage _image,
762 VkImageLayout imageLayout,
763 const VkClearColorValue* pColor,
764 uint32_t rangeCount,
765 const VkImageSubresourceRange* pRanges)
766 {
767 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
768 ANV_FROM_HANDLE(anv_image, image, _image);
769
770 static const bool color_write_disable[4] = { false, false, false, false };
771
772 struct blorp_batch batch;
773 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
774
775
776 for (unsigned r = 0; r < rangeCount; r++) {
777 if (pRanges[r].aspectMask == 0)
778 continue;
779
780 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
781
782 struct blorp_surf surf;
783 get_blorp_surf_for_anv_image(image, pRanges[r].aspectMask,
784 ANV_AUX_USAGE_DEFAULT, &surf);
785
786 struct anv_format_plane src_format =
787 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
788 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
789
790 unsigned base_layer = pRanges[r].baseArrayLayer;
791 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
792
793 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
794 const unsigned level = pRanges[r].baseMipLevel + i;
795 const unsigned level_width = anv_minify(image->extent.width, level);
796 const unsigned level_height = anv_minify(image->extent.height, level);
797
798 if (image->type == VK_IMAGE_TYPE_3D) {
799 base_layer = 0;
800 layer_count = anv_minify(image->extent.depth, level);
801 }
802
803 blorp_clear(&batch, &surf,
804 src_format.isl_format, src_format.swizzle,
805 level, base_layer, layer_count,
806 0, 0, level_width, level_height,
807 vk_to_isl_color(*pColor), color_write_disable);
808 }
809 }
810
811 blorp_batch_finish(&batch);
812 }
813
814 void anv_CmdClearDepthStencilImage(
815 VkCommandBuffer commandBuffer,
816 VkImage image_h,
817 VkImageLayout imageLayout,
818 const VkClearDepthStencilValue* pDepthStencil,
819 uint32_t rangeCount,
820 const VkImageSubresourceRange* pRanges)
821 {
822 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
823 ANV_FROM_HANDLE(anv_image, image, image_h);
824
825 struct blorp_batch batch;
826 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
827
828 struct blorp_surf depth, stencil;
829 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
830 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
831 ISL_AUX_USAGE_NONE, &depth);
832 } else {
833 memset(&depth, 0, sizeof(depth));
834 }
835
836 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
837 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_STENCIL_BIT,
838 ISL_AUX_USAGE_NONE, &stencil);
839 } else {
840 memset(&stencil, 0, sizeof(stencil));
841 }
842
843 for (unsigned r = 0; r < rangeCount; r++) {
844 if (pRanges[r].aspectMask == 0)
845 continue;
846
847 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
848 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
849
850 unsigned base_layer = pRanges[r].baseArrayLayer;
851 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
852
853 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
854 const unsigned level = pRanges[r].baseMipLevel + i;
855 const unsigned level_width = anv_minify(image->extent.width, level);
856 const unsigned level_height = anv_minify(image->extent.height, level);
857
858 if (image->type == VK_IMAGE_TYPE_3D)
859 layer_count = anv_minify(image->extent.depth, level);
860
861 blorp_clear_depth_stencil(&batch, &depth, &stencil,
862 level, base_layer, layer_count,
863 0, 0, level_width, level_height,
864 clear_depth, pDepthStencil->depth,
865 clear_stencil ? 0xff : 0,
866 pDepthStencil->stencil);
867 }
868 }
869
870 blorp_batch_finish(&batch);
871 }
872
873 VkResult
874 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
875 uint32_t num_entries,
876 uint32_t *state_offset,
877 struct anv_state *bt_state)
878 {
879 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
880 state_offset);
881 if (bt_state->map == NULL) {
882 /* We ran out of space. Grab a new binding table block. */
883 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
884 if (result != VK_SUCCESS)
885 return result;
886
887 /* Re-emit state base addresses so we get the new surface state base
888 * address before we start emitting binding tables etc.
889 */
890 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
891
892 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
893 state_offset);
894 assert(bt_state->map != NULL);
895 }
896
897 return VK_SUCCESS;
898 }
899
900 static VkResult
901 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
902 struct anv_state surface_state,
903 uint32_t *bt_offset)
904 {
905 uint32_t state_offset;
906 struct anv_state bt_state;
907
908 VkResult result =
909 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
910 &bt_state);
911 if (result != VK_SUCCESS)
912 return result;
913
914 uint32_t *bt_map = bt_state.map;
915 bt_map[0] = surface_state.offset + state_offset;
916
917 *bt_offset = bt_state.offset;
918 return VK_SUCCESS;
919 }
920
921 static void
922 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
923 struct blorp_batch *batch,
924 const VkClearAttachment *attachment,
925 uint32_t rectCount, const VkClearRect *pRects)
926 {
927 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
928 const uint32_t color_att = attachment->colorAttachment;
929 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
930
931 if (att_idx == VK_ATTACHMENT_UNUSED)
932 return;
933
934 struct anv_render_pass_attachment *pass_att =
935 &cmd_buffer->state.pass->attachments[att_idx];
936 struct anv_attachment_state *att_state =
937 &cmd_buffer->state.attachments[att_idx];
938
939 uint32_t binding_table;
940 VkResult result =
941 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
942 &binding_table);
943 if (result != VK_SUCCESS)
944 return;
945
946 union isl_color_value clear_color =
947 vk_to_isl_color(attachment->clearValue.color);
948
949 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
950 if (subpass->view_mask) {
951 uint32_t view_idx;
952 for_each_bit(view_idx, subpass->view_mask) {
953 for (uint32_t r = 0; r < rectCount; ++r) {
954 const VkOffset2D offset = pRects[r].rect.offset;
955 const VkExtent2D extent = pRects[r].rect.extent;
956 blorp_clear_attachments(batch, binding_table,
957 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
958 view_idx, 1,
959 offset.x, offset.y,
960 offset.x + extent.width,
961 offset.y + extent.height,
962 true, clear_color, false, 0.0f, 0, 0);
963 }
964 }
965 return;
966 }
967
968 for (uint32_t r = 0; r < rectCount; ++r) {
969 const VkOffset2D offset = pRects[r].rect.offset;
970 const VkExtent2D extent = pRects[r].rect.extent;
971 blorp_clear_attachments(batch, binding_table,
972 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
973 pRects[r].baseArrayLayer,
974 pRects[r].layerCount,
975 offset.x, offset.y,
976 offset.x + extent.width, offset.y + extent.height,
977 true, clear_color, false, 0.0f, 0, 0);
978 }
979 }
980
981 static void
982 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
983 struct blorp_batch *batch,
984 const VkClearAttachment *attachment,
985 uint32_t rectCount, const VkClearRect *pRects)
986 {
987 static const union isl_color_value color_value = { .u32 = { 0, } };
988 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
989 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
990
991 if (att_idx == VK_ATTACHMENT_UNUSED)
992 return;
993
994 struct anv_render_pass_attachment *pass_att =
995 &cmd_buffer->state.pass->attachments[att_idx];
996
997 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
998 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
999
1000 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1001 if (clear_depth) {
1002 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1003 pass_att->format,
1004 VK_IMAGE_ASPECT_DEPTH_BIT,
1005 VK_IMAGE_TILING_OPTIMAL);
1006 }
1007
1008 uint32_t binding_table;
1009 VkResult result =
1010 binding_table_for_surface_state(cmd_buffer,
1011 cmd_buffer->state.null_surface_state,
1012 &binding_table);
1013 if (result != VK_SUCCESS)
1014 return;
1015
1016 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1017 if (subpass->view_mask) {
1018 uint32_t view_idx;
1019 for_each_bit(view_idx, subpass->view_mask) {
1020 for (uint32_t r = 0; r < rectCount; ++r) {
1021 const VkOffset2D offset = pRects[r].rect.offset;
1022 const VkExtent2D extent = pRects[r].rect.extent;
1023 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1024 blorp_clear_attachments(batch, binding_table,
1025 depth_format, pass_att->samples,
1026 view_idx, 1,
1027 offset.x, offset.y,
1028 offset.x + extent.width,
1029 offset.y + extent.height,
1030 false, color_value,
1031 clear_depth, value.depth,
1032 clear_stencil ? 0xff : 0, value.stencil);
1033 }
1034 }
1035 return;
1036 }
1037
1038 for (uint32_t r = 0; r < rectCount; ++r) {
1039 const VkOffset2D offset = pRects[r].rect.offset;
1040 const VkExtent2D extent = pRects[r].rect.extent;
1041 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1042 blorp_clear_attachments(batch, binding_table,
1043 depth_format, pass_att->samples,
1044 pRects[r].baseArrayLayer,
1045 pRects[r].layerCount,
1046 offset.x, offset.y,
1047 offset.x + extent.width, offset.y + extent.height,
1048 false, color_value,
1049 clear_depth, value.depth,
1050 clear_stencil ? 0xff : 0, value.stencil);
1051 }
1052 }
1053
1054 void anv_CmdClearAttachments(
1055 VkCommandBuffer commandBuffer,
1056 uint32_t attachmentCount,
1057 const VkClearAttachment* pAttachments,
1058 uint32_t rectCount,
1059 const VkClearRect* pRects)
1060 {
1061 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1062
1063 /* Because this gets called within a render pass, we tell blorp not to
1064 * trash our depth and stencil buffers.
1065 */
1066 struct blorp_batch batch;
1067 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1068 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1069
1070 for (uint32_t a = 0; a < attachmentCount; ++a) {
1071 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
1072 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1073 clear_color_attachment(cmd_buffer, &batch,
1074 &pAttachments[a],
1075 rectCount, pRects);
1076 } else {
1077 clear_depth_stencil_attachment(cmd_buffer, &batch,
1078 &pAttachments[a],
1079 rectCount, pRects);
1080 }
1081 }
1082
1083 blorp_batch_finish(&batch);
1084 }
1085
1086 enum subpass_stage {
1087 SUBPASS_STAGE_LOAD,
1088 SUBPASS_STAGE_DRAW,
1089 SUBPASS_STAGE_RESOLVE,
1090 };
1091
1092 static bool
1093 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1094 {
1095 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1096 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1097
1098 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1099 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1100 if (a == VK_ATTACHMENT_UNUSED)
1101 continue;
1102
1103 assert(a < cmd_state->pass->attachment_count);
1104 if (cmd_state->attachments[a].pending_clear_aspects) {
1105 return true;
1106 }
1107 }
1108
1109 if (ds != VK_ATTACHMENT_UNUSED) {
1110 assert(ds < cmd_state->pass->attachment_count);
1111 if (cmd_state->attachments[ds].pending_clear_aspects)
1112 return true;
1113 }
1114
1115 return false;
1116 }
1117
1118 void
1119 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1120 {
1121 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1122 const VkRect2D render_area = cmd_buffer->state.render_area;
1123
1124
1125 if (!subpass_needs_clear(cmd_buffer))
1126 return;
1127
1128 /* Because this gets called within a render pass, we tell blorp not to
1129 * trash our depth and stencil buffers.
1130 */
1131 struct blorp_batch batch;
1132 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1133 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1134
1135 VkClearRect clear_rect = {
1136 .rect = cmd_buffer->state.render_area,
1137 .baseArrayLayer = 0,
1138 .layerCount = cmd_buffer->state.framebuffer->layers,
1139 };
1140
1141 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1142 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1143 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1144 if (a == VK_ATTACHMENT_UNUSED)
1145 continue;
1146
1147 assert(a < cmd_state->pass->attachment_count);
1148 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1149
1150 if (!att_state->pending_clear_aspects)
1151 continue;
1152
1153 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1154
1155 struct anv_image_view *iview = fb->attachments[a];
1156 const struct anv_image *image = iview->image;
1157 struct blorp_surf surf;
1158 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1159 att_state->aux_usage, &surf);
1160
1161 if (att_state->fast_clear) {
1162 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1163
1164 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1165 *
1166 * "After Render target fast clear, pipe-control with color cache
1167 * write-flush must be issued before sending any DRAW commands on
1168 * that render target."
1169 *
1170 * This comment is a bit cryptic and doesn't really tell you what's
1171 * going or what's really needed. It appears that fast clear ops are
1172 * not properly synchronized with other drawing. This means that we
1173 * cannot have a fast clear operation in the pipe at the same time as
1174 * other regular drawing operations. We need to use a PIPE_CONTROL
1175 * to ensure that the contents of the previous draw hit the render
1176 * target before we resolve and then use a second PIPE_CONTROL after
1177 * the resolve to ensure that it is completed before any additional
1178 * drawing occurs.
1179 */
1180 cmd_buffer->state.pending_pipe_bits |=
1181 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1182
1183 assert(image->n_planes == 1);
1184 blorp_fast_clear(&batch, &surf, iview->planes[0].isl.format,
1185 iview->planes[0].isl.base_level,
1186 iview->planes[0].isl.base_array_layer, fb->layers,
1187 render_area.offset.x, render_area.offset.y,
1188 render_area.offset.x + render_area.extent.width,
1189 render_area.offset.y + render_area.extent.height);
1190
1191 cmd_buffer->state.pending_pipe_bits |=
1192 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1193 } else {
1194 assert(image->n_planes == 1);
1195 blorp_clear(&batch, &surf, iview->planes[0].isl.format,
1196 anv_swizzle_for_render(iview->planes[0].isl.swizzle),
1197 iview->planes[0].isl.base_level,
1198 iview->planes[0].isl.base_array_layer, fb->layers,
1199 render_area.offset.x, render_area.offset.y,
1200 render_area.offset.x + render_area.extent.width,
1201 render_area.offset.y + render_area.extent.height,
1202 vk_to_isl_color(att_state->clear_value.color), NULL);
1203 }
1204
1205 att_state->pending_clear_aspects = 0;
1206 }
1207
1208 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1209 assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
1210
1211 if (ds != VK_ATTACHMENT_UNUSED &&
1212 cmd_state->attachments[ds].pending_clear_aspects) {
1213
1214 VkClearAttachment clear_att = {
1215 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1216 .clearValue = cmd_state->attachments[ds].clear_value,
1217 };
1218
1219
1220 const uint8_t gen = cmd_buffer->device->info.gen;
1221 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1222 ISL_AUX_USAGE_HIZ;
1223 const struct anv_image_view *iview = fb->attachments[ds];
1224
1225 if (clear_with_hiz) {
1226 const bool clear_depth = clear_att.aspectMask &
1227 VK_IMAGE_ASPECT_DEPTH_BIT;
1228 const bool clear_stencil = clear_att.aspectMask &
1229 VK_IMAGE_ASPECT_STENCIL_BIT;
1230
1231 /* Check against restrictions for depth buffer clearing. A great GPU
1232 * performance benefit isn't expected when using the HZ sequence for
1233 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1234 * a stencil clear in addition to using the BLORP-fallback for depth.
1235 */
1236 if (clear_depth) {
1237 if (!blorp_can_hiz_clear_depth(gen, iview->planes[0].isl.format,
1238 iview->image->samples,
1239 render_area.offset.x,
1240 render_area.offset.y,
1241 render_area.offset.x +
1242 render_area.extent.width,
1243 render_area.offset.y +
1244 render_area.extent.height)) {
1245 clear_with_hiz = false;
1246 } else if (clear_att.clearValue.depthStencil.depth !=
1247 ANV_HZ_FC_VAL) {
1248 /* Don't enable fast depth clears for any color not equal to
1249 * ANV_HZ_FC_VAL.
1250 */
1251 clear_with_hiz = false;
1252 } else if (gen == 8 &&
1253 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1254 iview->image)) {
1255 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1256 * fast-cleared portion of a HiZ buffer. Testing has revealed
1257 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1258 * not support this feature at all.
1259 */
1260 clear_with_hiz = false;
1261 }
1262 }
1263
1264 if (clear_with_hiz) {
1265 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1266 render_area.offset.x,
1267 render_area.offset.y,
1268 render_area.offset.x +
1269 render_area.extent.width,
1270 render_area.offset.y +
1271 render_area.extent.height,
1272 clear_depth, clear_stencil,
1273 clear_att.clearValue.
1274 depthStencil.stencil);
1275
1276 /* From the SKL PRM, Depth Buffer Clear:
1277 *
1278 * Depth Buffer Clear Workaround
1279 * Depth buffer clear pass using any of the methods (WM_STATE,
1280 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1281 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1282 * “set” before starting to render. DepthStall and DepthFlush are
1283 * not needed between consecutive depth clear passes nor is it
1284 * required if the depth-clear pass was done with “full_surf_clear”
1285 * bit set in the 3DSTATE_WM_HZ_OP.
1286 */
1287 if (clear_depth) {
1288 cmd_buffer->state.pending_pipe_bits |=
1289 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1290 }
1291 }
1292 }
1293
1294 if (!clear_with_hiz) {
1295 clear_depth_stencil_attachment(cmd_buffer, &batch,
1296 &clear_att, 1, &clear_rect);
1297 }
1298
1299 cmd_state->attachments[ds].pending_clear_aspects = 0;
1300 }
1301
1302 blorp_batch_finish(&batch);
1303 }
1304
1305 static void
1306 resolve_surface(struct blorp_batch *batch,
1307 struct blorp_surf *src_surf,
1308 uint32_t src_level, uint32_t src_layer,
1309 struct blorp_surf *dst_surf,
1310 uint32_t dst_level, uint32_t dst_layer,
1311 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1312 uint32_t width, uint32_t height)
1313 {
1314 blorp_blit(batch,
1315 src_surf, src_level, src_layer,
1316 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1317 dst_surf, dst_level, dst_layer,
1318 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1319 src_x, src_y, src_x + width, src_y + height,
1320 dst_x, dst_y, dst_x + width, dst_y + height,
1321 0x2600 /* GL_NEAREST */, false, false);
1322 }
1323
1324 static void
1325 resolve_image(struct blorp_batch *batch,
1326 const struct anv_image *src_image,
1327 uint32_t src_level, uint32_t src_layer,
1328 const struct anv_image *dst_image,
1329 uint32_t dst_level, uint32_t dst_layer,
1330 VkImageAspectFlags aspect_mask,
1331 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1332 uint32_t width, uint32_t height)
1333 {
1334 assert(src_image->type == VK_IMAGE_TYPE_2D);
1335 assert(src_image->samples > 1);
1336 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1337 assert(dst_image->samples == 1);
1338 assert(src_image->n_planes == dst_image->n_planes);
1339
1340 uint32_t aspect_bit;
1341
1342 anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
1343 struct blorp_surf src_surf, dst_surf;
1344 get_blorp_surf_for_anv_image(src_image, 1UL << aspect_bit,
1345 ANV_AUX_USAGE_DEFAULT, &src_surf);
1346 get_blorp_surf_for_anv_image(dst_image, 1UL << aspect_bit,
1347 ANV_AUX_USAGE_DEFAULT, &dst_surf);
1348
1349 assert(!src_image->format->can_ycbcr);
1350 assert(!dst_image->format->can_ycbcr);
1351
1352 resolve_surface(batch,
1353 &src_surf, src_level, src_layer,
1354 &dst_surf, dst_level, dst_layer,
1355 src_x, src_y, dst_x, dst_y, width, height);
1356 }
1357 }
1358
1359 void anv_CmdResolveImage(
1360 VkCommandBuffer commandBuffer,
1361 VkImage srcImage,
1362 VkImageLayout srcImageLayout,
1363 VkImage dstImage,
1364 VkImageLayout dstImageLayout,
1365 uint32_t regionCount,
1366 const VkImageResolve* pRegions)
1367 {
1368 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1369 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1370 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1371
1372 struct blorp_batch batch;
1373 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1374
1375 for (uint32_t r = 0; r < regionCount; r++) {
1376 assert(pRegions[r].srcSubresource.aspectMask ==
1377 pRegions[r].dstSubresource.aspectMask);
1378 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1379 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1380
1381 const uint32_t layer_count =
1382 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1383
1384 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
1385 dst_mask = pRegions[r].dstSubresource.aspectMask;
1386
1387 assert(anv_image_aspects_compatible(src_mask, dst_mask));
1388
1389 for (uint32_t layer = 0; layer < layer_count; layer++) {
1390 resolve_image(&batch,
1391 src_image,
1392 pRegions[r].srcSubresource.mipLevel,
1393 pRegions[r].srcSubresource.baseArrayLayer + layer,
1394 dst_image,
1395 pRegions[r].dstSubresource.mipLevel,
1396 pRegions[r].dstSubresource.baseArrayLayer + layer,
1397 pRegions[r].dstSubresource.aspectMask,
1398 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1399 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1400 pRegions[r].extent.width, pRegions[r].extent.height);
1401 }
1402 }
1403
1404 blorp_batch_finish(&batch);
1405 }
1406
1407 static enum isl_aux_usage
1408 fast_clear_aux_usage(const struct anv_image *image,
1409 VkImageAspectFlagBits aspect)
1410 {
1411 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1412 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1413 return ISL_AUX_USAGE_CCS_D;
1414 else
1415 return image->planes[plane].aux_usage;
1416 }
1417
1418 void
1419 anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
1420 const struct anv_image *image,
1421 VkImageAspectFlagBits aspect,
1422 const uint32_t base_level, const uint32_t level_count,
1423 const uint32_t base_layer, uint32_t layer_count)
1424 {
1425 assert(image->type == VK_IMAGE_TYPE_3D || image->extent.depth == 1);
1426
1427 if (image->type == VK_IMAGE_TYPE_3D) {
1428 assert(base_layer == 0);
1429 assert(layer_count == anv_minify(image->extent.depth, base_level));
1430 }
1431
1432 struct blorp_batch batch;
1433 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1434
1435 struct blorp_surf surf;
1436 get_blorp_surf_for_anv_image(image, aspect,
1437 fast_clear_aux_usage(image, aspect),
1438 &surf);
1439
1440 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1441 *
1442 * "After Render target fast clear, pipe-control with color cache
1443 * write-flush must be issued before sending any DRAW commands on
1444 * that render target."
1445 *
1446 * This comment is a bit cryptic and doesn't really tell you what's going
1447 * or what's really needed. It appears that fast clear ops are not
1448 * properly synchronized with other drawing. This means that we cannot
1449 * have a fast clear operation in the pipe at the same time as other
1450 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1451 * that the contents of the previous draw hit the render target before we
1452 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1453 * that it is completed before any additional drawing occurs.
1454 */
1455 cmd_buffer->state.pending_pipe_bits |=
1456 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1457
1458 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1459 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1460 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1461
1462 for (uint32_t l = 0; l < level_count; l++) {
1463 const uint32_t level = base_level + l;
1464
1465 const VkExtent3D extent = {
1466 .width = anv_minify(image->extent.width, level),
1467 .height = anv_minify(image->extent.height, level),
1468 .depth = anv_minify(image->extent.depth, level),
1469 };
1470
1471 if (image->type == VK_IMAGE_TYPE_3D)
1472 layer_count = extent.depth;
1473
1474 assert(level < anv_image_aux_levels(image, aspect));
1475 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
1476 blorp_fast_clear(&batch, &surf, surf.surf->format,
1477 level, base_layer, layer_count,
1478 0, 0,
1479 extent.width / width_div,
1480 extent.height / height_div);
1481 }
1482
1483 cmd_buffer->state.pending_pipe_bits |=
1484 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1485 }
1486
1487 void
1488 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1489 {
1490 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1491 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1492
1493 if (subpass->has_resolve) {
1494 struct blorp_batch batch;
1495 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1496
1497 /* We are about to do some MSAA resolves. We need to flush so that the
1498 * result of writes to the MSAA color attachments show up in the sampler
1499 * when we blit to the single-sampled resolve target.
1500 */
1501 cmd_buffer->state.pending_pipe_bits |=
1502 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1503 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1504
1505 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1506 uint32_t src_att = subpass->color_attachments[i].attachment;
1507 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1508
1509 if (dst_att == VK_ATTACHMENT_UNUSED)
1510 continue;
1511
1512 assert(src_att < cmd_buffer->state.pass->attachment_count);
1513 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1514
1515 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1516 /* From the Vulkan 1.0 spec:
1517 *
1518 * If the first use of an attachment in a render pass is as a
1519 * resolve attachment, then the loadOp is effectively ignored
1520 * as the resolve is guaranteed to overwrite all pixels in the
1521 * render area.
1522 */
1523 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1524 }
1525
1526 struct anv_image_view *src_iview = fb->attachments[src_att];
1527 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1528
1529 enum isl_aux_usage src_aux_usage =
1530 cmd_buffer->state.attachments[src_att].aux_usage;
1531 enum isl_aux_usage dst_aux_usage =
1532 cmd_buffer->state.attachments[dst_att].aux_usage;
1533
1534 const VkRect2D render_area = cmd_buffer->state.render_area;
1535
1536 assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
1537 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
1538
1539 struct blorp_surf src_surf, dst_surf;
1540 get_blorp_surf_for_anv_image(src_iview->image,
1541 VK_IMAGE_ASPECT_COLOR_BIT,
1542 src_aux_usage, &src_surf);
1543 get_blorp_surf_for_anv_image(dst_iview->image,
1544 VK_IMAGE_ASPECT_COLOR_BIT,
1545 dst_aux_usage, &dst_surf);
1546
1547 assert(!src_iview->image->format->can_ycbcr);
1548 assert(!dst_iview->image->format->can_ycbcr);
1549
1550 resolve_surface(&batch,
1551 &src_surf,
1552 src_iview->planes[0].isl.base_level,
1553 src_iview->planes[0].isl.base_array_layer,
1554 &dst_surf,
1555 dst_iview->planes[0].isl.base_level,
1556 dst_iview->planes[0].isl.base_array_layer,
1557 render_area.offset.x, render_area.offset.y,
1558 render_area.offset.x, render_area.offset.y,
1559 render_area.extent.width, render_area.extent.height);
1560 }
1561
1562 blorp_batch_finish(&batch);
1563 }
1564 }
1565
1566 void
1567 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1568 const struct anv_image *image,
1569 uint32_t base_level, uint32_t level_count,
1570 uint32_t base_layer, uint32_t layer_count)
1571 {
1572 struct blorp_batch batch;
1573 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1574
1575 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1576
1577 struct blorp_surf surf;
1578 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
1579 ISL_AUX_USAGE_NONE, &surf);
1580
1581 struct blorp_surf shadow_surf = {
1582 .surf = &image->planes[0].shadow_surface.isl,
1583 .addr = {
1584 .buffer = image->planes[0].bo,
1585 .offset = image->planes[0].bo_offset +
1586 image->planes[0].shadow_surface.offset,
1587 },
1588 };
1589
1590 for (uint32_t l = 0; l < level_count; l++) {
1591 const uint32_t level = base_level + l;
1592
1593 const VkExtent3D extent = {
1594 .width = anv_minify(image->extent.width, level),
1595 .height = anv_minify(image->extent.height, level),
1596 .depth = anv_minify(image->extent.depth, level),
1597 };
1598
1599 if (image->type == VK_IMAGE_TYPE_3D)
1600 layer_count = extent.depth;
1601
1602 for (uint32_t a = 0; a < layer_count; a++) {
1603 const uint32_t layer = base_layer + a;
1604
1605 blorp_copy(&batch, &surf, level, layer,
1606 &shadow_surf, level, layer,
1607 0, 0, 0, 0, extent.width, extent.height);
1608 }
1609 }
1610
1611 blorp_batch_finish(&batch);
1612 }
1613
1614 void
1615 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1616 const struct anv_image *image,
1617 enum blorp_hiz_op op)
1618 {
1619 assert(image);
1620
1621 assert(anv_image_aspect_to_plane(image->aspects,
1622 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1623
1624 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1625 * don't perform such a resolve on gens that don't support it.
1626 */
1627 if (cmd_buffer->device->info.gen < 8 ||
1628 image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ)
1629 return;
1630
1631 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1632 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1633
1634 struct blorp_batch batch;
1635 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1636
1637 struct blorp_surf surf;
1638 get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_DEPTH_BIT,
1639 ISL_AUX_USAGE_NONE, &surf);
1640
1641 /* Manually add the aux HiZ surf */
1642 surf.aux_surf = &image->planes[0].aux_surface.isl,
1643 surf.aux_addr = (struct blorp_address) {
1644 .buffer = image->planes[0].bo,
1645 .offset = image->planes[0].bo_offset +
1646 image->planes[0].aux_surface.offset,
1647 };
1648 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1649
1650 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1651
1652 blorp_hiz_op(&batch, &surf, 0, 0, 1, op);
1653 blorp_batch_finish(&batch);
1654 }
1655
1656 void
1657 anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
1658 const struct anv_state surface_state,
1659 const struct anv_image * const image,
1660 VkImageAspectFlagBits aspect,
1661 const uint8_t level, const uint32_t layer_count,
1662 const enum blorp_fast_clear_op op)
1663 {
1664 assert(cmd_buffer && image);
1665
1666 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1667
1668 /* The resolved subresource range must have a CCS buffer. */
1669 assert(level < anv_image_aux_levels(image, aspect));
1670 assert(layer_count <= anv_image_aux_layers(image, aspect, level));
1671 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT && image->samples == 1);
1672
1673 /* Create a binding table for this surface state. */
1674 uint32_t binding_table;
1675 VkResult result =
1676 binding_table_for_surface_state(cmd_buffer, surface_state,
1677 &binding_table);
1678 if (result != VK_SUCCESS)
1679 return;
1680
1681 struct blorp_batch batch;
1682 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1683 BLORP_BATCH_PREDICATE_ENABLE);
1684
1685 struct blorp_surf surf;
1686 get_blorp_surf_for_anv_image(image, aspect,
1687 fast_clear_aux_usage(image, aspect),
1688 &surf);
1689
1690 blorp_ccs_resolve_attachment(&batch, binding_table, &surf, level,
1691 layer_count,
1692 image->planes[plane].surface.isl.format,
1693 op);
1694
1695 blorp_batch_finish(&batch);
1696 }