intel/blorp: Make the MOCS setting part of blorp_address
[mesa.git] / src / intel / vulkan / anv_blorp.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.lookup_shader = lookup_blorp_shader;
96 device->blorp.upload_shader = upload_blorp_shader;
97 switch (device->info.gen) {
98 case 7:
99 if (device->info.is_haswell) {
100 device->blorp.exec = gen75_blorp_exec;
101 } else {
102 device->blorp.exec = gen7_blorp_exec;
103 }
104 break;
105 case 8:
106 device->blorp.exec = gen8_blorp_exec;
107 break;
108 case 9:
109 device->blorp.exec = gen9_blorp_exec;
110 break;
111 case 10:
112 device->blorp.exec = gen10_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136 bool ok UNUSED;
137
138 /* ASTC is the only format which doesn't support linear layouts.
139 * Create an equivalently sized surface with ISL to get around this.
140 */
141 if (fmtl->txc == ISL_TXC_ASTC) {
142 /* Use an equivalently sized format */
143 format = ISL_FORMAT_R32G32B32A32_UINT;
144 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
145
146 /* Shrink the dimensions for the new format */
147 width = DIV_ROUND_UP(width, fmtl->bw);
148 height = DIV_ROUND_UP(height, fmtl->bh);
149 }
150
151 *blorp_surf = (struct blorp_surf) {
152 .surf = isl_surf,
153 .addr = {
154 .buffer = buffer->bo,
155 .offset = buffer->offset + offset,
156 .mocs = device->default_mocs,
157 },
158 };
159
160 ok = isl_surf_init(&device->isl_dev, isl_surf,
161 .dim = ISL_SURF_DIM_2D,
162 .format = format,
163 .width = width,
164 .height = height,
165 .depth = 1,
166 .levels = 1,
167 .array_len = 1,
168 .samples = 1,
169 .row_pitch = row_pitch,
170 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
171 ISL_SURF_USAGE_RENDER_TARGET_BIT,
172 .tiling_flags = ISL_TILING_LINEAR_BIT);
173 assert(ok);
174 }
175
176 #define ANV_AUX_USAGE_DEFAULT ((enum isl_aux_usage)0xff)
177
178 static void
179 get_blorp_surf_for_anv_image(const struct anv_device *device,
180 const struct anv_image *image,
181 VkImageAspectFlags aspect,
182 enum isl_aux_usage aux_usage,
183 struct blorp_surf *blorp_surf)
184 {
185 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
186
187 if (aux_usage == ANV_AUX_USAGE_DEFAULT)
188 aux_usage = image->planes[plane].aux_usage;
189
190 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
191 aux_usage == ISL_AUX_USAGE_HIZ)
192 aux_usage = ISL_AUX_USAGE_NONE;
193
194 const struct anv_surface *surface = &image->planes[plane].surface;
195 *blorp_surf = (struct blorp_surf) {
196 .surf = &surface->isl,
197 .addr = {
198 .buffer = image->planes[plane].bo,
199 .offset = image->planes[plane].bo_offset + surface->offset,
200 .mocs = device->default_mocs,
201 },
202 };
203
204 if (aux_usage != ISL_AUX_USAGE_NONE) {
205 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
206 blorp_surf->aux_surf = &aux_surface->isl,
207 blorp_surf->aux_addr = (struct blorp_address) {
208 .buffer = image->planes[plane].bo,
209 .offset = image->planes[plane].bo_offset + aux_surface->offset,
210 .mocs = device->default_mocs,
211 };
212 blorp_surf->aux_usage = aux_usage;
213 }
214 }
215
216 void anv_CmdCopyImage(
217 VkCommandBuffer commandBuffer,
218 VkImage srcImage,
219 VkImageLayout srcImageLayout,
220 VkImage dstImage,
221 VkImageLayout dstImageLayout,
222 uint32_t regionCount,
223 const VkImageCopy* pRegions)
224 {
225 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
226 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
227 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
228
229 struct blorp_batch batch;
230 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
231
232 for (unsigned r = 0; r < regionCount; r++) {
233 VkOffset3D srcOffset =
234 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
235 VkOffset3D dstOffset =
236 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
237 VkExtent3D extent =
238 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
239
240 unsigned dst_base_layer, layer_count;
241 if (dst_image->type == VK_IMAGE_TYPE_3D) {
242 dst_base_layer = pRegions[r].dstOffset.z;
243 layer_count = pRegions[r].extent.depth;
244 } else {
245 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
246 layer_count =
247 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
248 }
249
250 unsigned src_base_layer;
251 if (src_image->type == VK_IMAGE_TYPE_3D) {
252 src_base_layer = pRegions[r].srcOffset.z;
253 } else {
254 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
255 assert(layer_count ==
256 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
257 }
258
259 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
260 dst_mask = pRegions[r].dstSubresource.aspectMask;
261
262 assert(anv_image_aspects_compatible(src_mask, dst_mask));
263
264 if (_mesa_bitcount(src_mask) > 1) {
265 uint32_t aspect_bit;
266 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
267 struct blorp_surf src_surf, dst_surf;
268 get_blorp_surf_for_anv_image(cmd_buffer->device,
269 src_image, 1UL << aspect_bit,
270 ANV_AUX_USAGE_DEFAULT, &src_surf);
271 get_blorp_surf_for_anv_image(cmd_buffer->device,
272 dst_image, 1UL << aspect_bit,
273 ANV_AUX_USAGE_DEFAULT, &dst_surf);
274
275 for (unsigned i = 0; i < layer_count; i++) {
276 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
277 src_base_layer + i,
278 &dst_surf, pRegions[r].dstSubresource.mipLevel,
279 dst_base_layer + i,
280 srcOffset.x, srcOffset.y,
281 dstOffset.x, dstOffset.y,
282 extent.width, extent.height);
283 }
284 }
285 } else {
286 struct blorp_surf src_surf, dst_surf;
287 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
288 ANV_AUX_USAGE_DEFAULT, &src_surf);
289 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
290 ANV_AUX_USAGE_DEFAULT, &dst_surf);
291
292 for (unsigned i = 0; i < layer_count; i++) {
293 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
294 src_base_layer + i,
295 &dst_surf, pRegions[r].dstSubresource.mipLevel,
296 dst_base_layer + i,
297 srcOffset.x, srcOffset.y,
298 dstOffset.x, dstOffset.y,
299 extent.width, extent.height);
300 }
301 }
302 }
303
304 blorp_batch_finish(&batch);
305 }
306
307 static void
308 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
309 struct anv_buffer *anv_buffer,
310 struct anv_image *anv_image,
311 uint32_t regionCount,
312 const VkBufferImageCopy* pRegions,
313 bool buffer_to_image)
314 {
315 struct blorp_batch batch;
316 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
317
318 struct {
319 struct blorp_surf surf;
320 uint32_t level;
321 VkOffset3D offset;
322 } image, buffer, *src, *dst;
323
324 buffer.level = 0;
325 buffer.offset = (VkOffset3D) { 0, 0, 0 };
326
327 if (buffer_to_image) {
328 src = &buffer;
329 dst = &image;
330 } else {
331 src = &image;
332 dst = &buffer;
333 }
334
335 for (unsigned r = 0; r < regionCount; r++) {
336 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
337
338 get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
339 ANV_AUX_USAGE_DEFAULT, &image.surf);
340 image.offset =
341 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
342 image.level = pRegions[r].imageSubresource.mipLevel;
343
344 VkExtent3D extent =
345 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
346 if (anv_image->type != VK_IMAGE_TYPE_3D) {
347 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
348 extent.depth =
349 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
350 }
351
352 const enum isl_format buffer_format =
353 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
354 aspect, VK_IMAGE_TILING_LINEAR);
355
356 const VkExtent3D bufferImageExtent = {
357 .width = pRegions[r].bufferRowLength ?
358 pRegions[r].bufferRowLength : extent.width,
359 .height = pRegions[r].bufferImageHeight ?
360 pRegions[r].bufferImageHeight : extent.height,
361 };
362
363 const struct isl_format_layout *buffer_fmtl =
364 isl_format_get_layout(buffer_format);
365
366 const uint32_t buffer_row_pitch =
367 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
368 (buffer_fmtl->bpb / 8);
369
370 const uint32_t buffer_layer_stride =
371 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
372 buffer_row_pitch;
373
374 struct isl_surf buffer_isl_surf;
375 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
376 anv_buffer, pRegions[r].bufferOffset,
377 extent.width, extent.height,
378 buffer_row_pitch, buffer_format,
379 &buffer.surf, &buffer_isl_surf);
380
381 for (unsigned z = 0; z < extent.depth; z++) {
382 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
383 &dst->surf, dst->level, dst->offset.z,
384 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
385 extent.width, extent.height);
386
387 image.offset.z++;
388 buffer.surf.addr.offset += buffer_layer_stride;
389 }
390 }
391
392 blorp_batch_finish(&batch);
393 }
394
395 void anv_CmdCopyBufferToImage(
396 VkCommandBuffer commandBuffer,
397 VkBuffer srcBuffer,
398 VkImage dstImage,
399 VkImageLayout dstImageLayout,
400 uint32_t regionCount,
401 const VkBufferImageCopy* pRegions)
402 {
403 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
404 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
405 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
406
407 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
408 regionCount, pRegions, true);
409 }
410
411 void anv_CmdCopyImageToBuffer(
412 VkCommandBuffer commandBuffer,
413 VkImage srcImage,
414 VkImageLayout srcImageLayout,
415 VkBuffer dstBuffer,
416 uint32_t regionCount,
417 const VkBufferImageCopy* pRegions)
418 {
419 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
420 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
421 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
422
423 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
424 regionCount, pRegions, false);
425 }
426
427 static bool
428 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
429 {
430 bool flip = false;
431 if (*src0 > *src1) {
432 unsigned tmp = *src0;
433 *src0 = *src1;
434 *src1 = tmp;
435 flip = !flip;
436 }
437
438 if (*dst0 > *dst1) {
439 unsigned tmp = *dst0;
440 *dst0 = *dst1;
441 *dst1 = tmp;
442 flip = !flip;
443 }
444
445 return flip;
446 }
447
448 void anv_CmdBlitImage(
449 VkCommandBuffer commandBuffer,
450 VkImage srcImage,
451 VkImageLayout srcImageLayout,
452 VkImage dstImage,
453 VkImageLayout dstImageLayout,
454 uint32_t regionCount,
455 const VkImageBlit* pRegions,
456 VkFilter filter)
457
458 {
459 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
460 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
461 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
462
463 struct blorp_surf src, dst;
464
465 uint32_t gl_filter;
466 switch (filter) {
467 case VK_FILTER_NEAREST:
468 gl_filter = 0x2600; /* GL_NEAREST */
469 break;
470 case VK_FILTER_LINEAR:
471 gl_filter = 0x2601; /* GL_LINEAR */
472 break;
473 default:
474 unreachable("Invalid filter");
475 }
476
477 struct blorp_batch batch;
478 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
479
480 for (unsigned r = 0; r < regionCount; r++) {
481 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
482 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
483
484 get_blorp_surf_for_anv_image(cmd_buffer->device,
485 src_image, src_res->aspectMask,
486 ANV_AUX_USAGE_DEFAULT, &src);
487 get_blorp_surf_for_anv_image(cmd_buffer->device,
488 dst_image, dst_res->aspectMask,
489 ANV_AUX_USAGE_DEFAULT, &dst);
490
491 struct anv_format_plane src_format =
492 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
493 src_res->aspectMask, src_image->tiling);
494 struct anv_format_plane dst_format =
495 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
496 dst_res->aspectMask, dst_image->tiling);
497
498 unsigned dst_start, dst_end;
499 if (dst_image->type == VK_IMAGE_TYPE_3D) {
500 assert(dst_res->baseArrayLayer == 0);
501 dst_start = pRegions[r].dstOffsets[0].z;
502 dst_end = pRegions[r].dstOffsets[1].z;
503 } else {
504 dst_start = dst_res->baseArrayLayer;
505 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
506 }
507
508 unsigned src_start, src_end;
509 if (src_image->type == VK_IMAGE_TYPE_3D) {
510 assert(src_res->baseArrayLayer == 0);
511 src_start = pRegions[r].srcOffsets[0].z;
512 src_end = pRegions[r].srcOffsets[1].z;
513 } else {
514 src_start = src_res->baseArrayLayer;
515 src_end = src_start + anv_get_layerCount(src_image, src_res);
516 }
517
518 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
519 float src_z_step = (float)(src_end + 1 - src_start) /
520 (float)(dst_end + 1 - dst_start);
521
522 if (flip_z) {
523 src_start = src_end;
524 src_z_step *= -1;
525 }
526
527 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
528 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
529 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
530 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
531 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
532
533 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
534 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
535 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
536 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
537 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
538
539 const unsigned num_layers = dst_end - dst_start;
540 for (unsigned i = 0; i < num_layers; i++) {
541 unsigned dst_z = dst_start + i;
542 unsigned src_z = src_start + i * src_z_step;
543
544 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
545 src_format.isl_format, src_format.swizzle,
546 &dst, dst_res->mipLevel, dst_z,
547 dst_format.isl_format,
548 anv_swizzle_for_render(dst_format.swizzle),
549 src_x0, src_y0, src_x1, src_y1,
550 dst_x0, dst_y0, dst_x1, dst_y1,
551 gl_filter, flip_x, flip_y);
552 }
553
554 }
555
556 blorp_batch_finish(&batch);
557 }
558
559 static enum isl_format
560 isl_format_for_size(unsigned size_B)
561 {
562 switch (size_B) {
563 case 4: return ISL_FORMAT_R32_UINT;
564 case 8: return ISL_FORMAT_R32G32_UINT;
565 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
566 default:
567 unreachable("Not a power-of-two format size");
568 }
569 }
570
571 /**
572 * Returns the greatest common divisor of a and b that is a power of two.
573 */
574 static uint64_t
575 gcd_pow2_u64(uint64_t a, uint64_t b)
576 {
577 assert(a > 0 || b > 0);
578
579 unsigned a_log2 = ffsll(a) - 1;
580 unsigned b_log2 = ffsll(b) - 1;
581
582 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
583 * case, the MIN2() will take the other one. If both are 0 then we will
584 * hit the assert above.
585 */
586 return 1 << MIN2(a_log2, b_log2);
587 }
588
589 /* This is maximum possible width/height our HW can handle */
590 #define MAX_SURFACE_DIM (1ull << 14)
591
592 void anv_CmdCopyBuffer(
593 VkCommandBuffer commandBuffer,
594 VkBuffer srcBuffer,
595 VkBuffer dstBuffer,
596 uint32_t regionCount,
597 const VkBufferCopy* pRegions)
598 {
599 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
600 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
601 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
602
603 struct blorp_batch batch;
604 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
605
606 for (unsigned r = 0; r < regionCount; r++) {
607 struct blorp_address src = {
608 .buffer = src_buffer->bo,
609 .offset = src_buffer->offset + pRegions[r].srcOffset,
610 .mocs = cmd_buffer->device->default_mocs,
611 };
612 struct blorp_address dst = {
613 .buffer = dst_buffer->bo,
614 .offset = dst_buffer->offset + pRegions[r].dstOffset,
615 .mocs = cmd_buffer->device->default_mocs,
616 };
617
618 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
619 }
620
621 blorp_batch_finish(&batch);
622 }
623
624 void anv_CmdUpdateBuffer(
625 VkCommandBuffer commandBuffer,
626 VkBuffer dstBuffer,
627 VkDeviceSize dstOffset,
628 VkDeviceSize dataSize,
629 const void* pData)
630 {
631 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
632 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
633
634 struct blorp_batch batch;
635 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
636
637 /* We can't quite grab a full block because the state stream needs a
638 * little data at the top to build its linked list.
639 */
640 const uint32_t max_update_size =
641 cmd_buffer->device->dynamic_state_pool.block_size - 64;
642
643 assert(max_update_size < MAX_SURFACE_DIM * 4);
644
645 /* We're about to read data that was written from the CPU. Flush the
646 * texture cache so we don't get anything stale.
647 */
648 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
649
650 while (dataSize) {
651 const uint32_t copy_size = MIN2(dataSize, max_update_size);
652
653 struct anv_state tmp_data =
654 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
655
656 memcpy(tmp_data.map, pData, copy_size);
657
658 anv_state_flush(cmd_buffer->device, tmp_data);
659
660 struct blorp_address src = {
661 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
662 .offset = tmp_data.offset,
663 .mocs = cmd_buffer->device->default_mocs,
664 };
665 struct blorp_address dst = {
666 .buffer = dst_buffer->bo,
667 .offset = dst_buffer->offset + dstOffset,
668 .mocs = cmd_buffer->device->default_mocs,
669 };
670
671 blorp_buffer_copy(&batch, src, dst, copy_size);
672
673 dataSize -= copy_size;
674 dstOffset += copy_size;
675 pData = (void *)pData + copy_size;
676 }
677
678 blorp_batch_finish(&batch);
679 }
680
681 void anv_CmdFillBuffer(
682 VkCommandBuffer commandBuffer,
683 VkBuffer dstBuffer,
684 VkDeviceSize dstOffset,
685 VkDeviceSize fillSize,
686 uint32_t data)
687 {
688 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
689 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
690 struct blorp_surf surf;
691 struct isl_surf isl_surf;
692
693 struct blorp_batch batch;
694 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
695
696 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
697
698 /* From the Vulkan spec:
699 *
700 * "size is the number of bytes to fill, and must be either a multiple
701 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
702 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
703 * buffer is not a multiple of 4, then the nearest smaller multiple is
704 * used."
705 */
706 fillSize &= ~3ull;
707
708 /* First, we compute the biggest format that can be used with the
709 * given offsets and size.
710 */
711 int bs = 16;
712 bs = gcd_pow2_u64(bs, dstOffset);
713 bs = gcd_pow2_u64(bs, fillSize);
714 enum isl_format isl_format = isl_format_for_size(bs);
715
716 union isl_color_value color = {
717 .u32 = { data, data, data, data },
718 };
719
720 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
721 while (fillSize >= max_fill_size) {
722 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
723 dst_buffer, dstOffset,
724 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
725 MAX_SURFACE_DIM * bs, isl_format,
726 &surf, &isl_surf);
727
728 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
729 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
730 color, NULL);
731 fillSize -= max_fill_size;
732 dstOffset += max_fill_size;
733 }
734
735 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
736 assert(height < MAX_SURFACE_DIM);
737 if (height != 0) {
738 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
739 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
740 dst_buffer, dstOffset,
741 MAX_SURFACE_DIM, height,
742 MAX_SURFACE_DIM * bs, isl_format,
743 &surf, &isl_surf);
744
745 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
746 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
747 color, NULL);
748 fillSize -= rect_fill_size;
749 dstOffset += rect_fill_size;
750 }
751
752 if (fillSize != 0) {
753 const uint32_t width = fillSize / bs;
754 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
755 dst_buffer, dstOffset,
756 width, 1,
757 width * bs, isl_format,
758 &surf, &isl_surf);
759
760 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
761 0, 0, 1, 0, 0, width, 1,
762 color, NULL);
763 }
764
765 blorp_batch_finish(&batch);
766 }
767
768 void anv_CmdClearColorImage(
769 VkCommandBuffer commandBuffer,
770 VkImage _image,
771 VkImageLayout imageLayout,
772 const VkClearColorValue* pColor,
773 uint32_t rangeCount,
774 const VkImageSubresourceRange* pRanges)
775 {
776 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
777 ANV_FROM_HANDLE(anv_image, image, _image);
778
779 static const bool color_write_disable[4] = { false, false, false, false };
780
781 struct blorp_batch batch;
782 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
783
784
785 for (unsigned r = 0; r < rangeCount; r++) {
786 if (pRanges[r].aspectMask == 0)
787 continue;
788
789 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
790
791 struct blorp_surf surf;
792 get_blorp_surf_for_anv_image(cmd_buffer->device,
793 image, pRanges[r].aspectMask,
794 ANV_AUX_USAGE_DEFAULT, &surf);
795
796 struct anv_format_plane src_format =
797 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
798 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
799
800 unsigned base_layer = pRanges[r].baseArrayLayer;
801 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
802
803 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
804 const unsigned level = pRanges[r].baseMipLevel + i;
805 const unsigned level_width = anv_minify(image->extent.width, level);
806 const unsigned level_height = anv_minify(image->extent.height, level);
807
808 if (image->type == VK_IMAGE_TYPE_3D) {
809 base_layer = 0;
810 layer_count = anv_minify(image->extent.depth, level);
811 }
812
813 blorp_clear(&batch, &surf,
814 src_format.isl_format, src_format.swizzle,
815 level, base_layer, layer_count,
816 0, 0, level_width, level_height,
817 vk_to_isl_color(*pColor), color_write_disable);
818 }
819 }
820
821 blorp_batch_finish(&batch);
822 }
823
824 void anv_CmdClearDepthStencilImage(
825 VkCommandBuffer commandBuffer,
826 VkImage image_h,
827 VkImageLayout imageLayout,
828 const VkClearDepthStencilValue* pDepthStencil,
829 uint32_t rangeCount,
830 const VkImageSubresourceRange* pRanges)
831 {
832 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
833 ANV_FROM_HANDLE(anv_image, image, image_h);
834
835 struct blorp_batch batch;
836 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
837
838 struct blorp_surf depth, stencil;
839 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
840 get_blorp_surf_for_anv_image(cmd_buffer->device,
841 image, VK_IMAGE_ASPECT_DEPTH_BIT,
842 ISL_AUX_USAGE_NONE, &depth);
843 } else {
844 memset(&depth, 0, sizeof(depth));
845 }
846
847 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
848 get_blorp_surf_for_anv_image(cmd_buffer->device,
849 image, VK_IMAGE_ASPECT_STENCIL_BIT,
850 ISL_AUX_USAGE_NONE, &stencil);
851 } else {
852 memset(&stencil, 0, sizeof(stencil));
853 }
854
855 for (unsigned r = 0; r < rangeCount; r++) {
856 if (pRanges[r].aspectMask == 0)
857 continue;
858
859 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
860 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
861
862 unsigned base_layer = pRanges[r].baseArrayLayer;
863 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
864
865 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
866 const unsigned level = pRanges[r].baseMipLevel + i;
867 const unsigned level_width = anv_minify(image->extent.width, level);
868 const unsigned level_height = anv_minify(image->extent.height, level);
869
870 if (image->type == VK_IMAGE_TYPE_3D)
871 layer_count = anv_minify(image->extent.depth, level);
872
873 blorp_clear_depth_stencil(&batch, &depth, &stencil,
874 level, base_layer, layer_count,
875 0, 0, level_width, level_height,
876 clear_depth, pDepthStencil->depth,
877 clear_stencil ? 0xff : 0,
878 pDepthStencil->stencil);
879 }
880 }
881
882 blorp_batch_finish(&batch);
883 }
884
885 VkResult
886 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
887 uint32_t num_entries,
888 uint32_t *state_offset,
889 struct anv_state *bt_state)
890 {
891 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
892 state_offset);
893 if (bt_state->map == NULL) {
894 /* We ran out of space. Grab a new binding table block. */
895 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
896 if (result != VK_SUCCESS)
897 return result;
898
899 /* Re-emit state base addresses so we get the new surface state base
900 * address before we start emitting binding tables etc.
901 */
902 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
903
904 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
905 state_offset);
906 assert(bt_state->map != NULL);
907 }
908
909 return VK_SUCCESS;
910 }
911
912 static VkResult
913 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
914 struct anv_state surface_state,
915 uint32_t *bt_offset)
916 {
917 uint32_t state_offset;
918 struct anv_state bt_state;
919
920 VkResult result =
921 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
922 &bt_state);
923 if (result != VK_SUCCESS)
924 return result;
925
926 uint32_t *bt_map = bt_state.map;
927 bt_map[0] = surface_state.offset + state_offset;
928
929 *bt_offset = bt_state.offset;
930 return VK_SUCCESS;
931 }
932
933 static void
934 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
935 struct blorp_batch *batch,
936 const VkClearAttachment *attachment,
937 uint32_t rectCount, const VkClearRect *pRects)
938 {
939 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
940 const uint32_t color_att = attachment->colorAttachment;
941 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
942
943 if (att_idx == VK_ATTACHMENT_UNUSED)
944 return;
945
946 struct anv_render_pass_attachment *pass_att =
947 &cmd_buffer->state.pass->attachments[att_idx];
948 struct anv_attachment_state *att_state =
949 &cmd_buffer->state.attachments[att_idx];
950
951 uint32_t binding_table;
952 VkResult result =
953 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
954 &binding_table);
955 if (result != VK_SUCCESS)
956 return;
957
958 union isl_color_value clear_color =
959 vk_to_isl_color(attachment->clearValue.color);
960
961 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
962 if (subpass->view_mask) {
963 uint32_t view_idx;
964 for_each_bit(view_idx, subpass->view_mask) {
965 for (uint32_t r = 0; r < rectCount; ++r) {
966 const VkOffset2D offset = pRects[r].rect.offset;
967 const VkExtent2D extent = pRects[r].rect.extent;
968 blorp_clear_attachments(batch, binding_table,
969 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
970 view_idx, 1,
971 offset.x, offset.y,
972 offset.x + extent.width,
973 offset.y + extent.height,
974 true, clear_color, false, 0.0f, 0, 0);
975 }
976 }
977 return;
978 }
979
980 for (uint32_t r = 0; r < rectCount; ++r) {
981 const VkOffset2D offset = pRects[r].rect.offset;
982 const VkExtent2D extent = pRects[r].rect.extent;
983 blorp_clear_attachments(batch, binding_table,
984 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
985 pRects[r].baseArrayLayer,
986 pRects[r].layerCount,
987 offset.x, offset.y,
988 offset.x + extent.width, offset.y + extent.height,
989 true, clear_color, false, 0.0f, 0, 0);
990 }
991 }
992
993 static void
994 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
995 struct blorp_batch *batch,
996 const VkClearAttachment *attachment,
997 uint32_t rectCount, const VkClearRect *pRects)
998 {
999 static const union isl_color_value color_value = { .u32 = { 0, } };
1000 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1001 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1002
1003 if (att_idx == VK_ATTACHMENT_UNUSED)
1004 return;
1005
1006 struct anv_render_pass_attachment *pass_att =
1007 &cmd_buffer->state.pass->attachments[att_idx];
1008
1009 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1010 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1011
1012 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1013 if (clear_depth) {
1014 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1015 pass_att->format,
1016 VK_IMAGE_ASPECT_DEPTH_BIT,
1017 VK_IMAGE_TILING_OPTIMAL);
1018 }
1019
1020 uint32_t binding_table;
1021 VkResult result =
1022 binding_table_for_surface_state(cmd_buffer,
1023 cmd_buffer->state.null_surface_state,
1024 &binding_table);
1025 if (result != VK_SUCCESS)
1026 return;
1027
1028 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1029 if (subpass->view_mask) {
1030 uint32_t view_idx;
1031 for_each_bit(view_idx, subpass->view_mask) {
1032 for (uint32_t r = 0; r < rectCount; ++r) {
1033 const VkOffset2D offset = pRects[r].rect.offset;
1034 const VkExtent2D extent = pRects[r].rect.extent;
1035 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1036 blorp_clear_attachments(batch, binding_table,
1037 depth_format, pass_att->samples,
1038 view_idx, 1,
1039 offset.x, offset.y,
1040 offset.x + extent.width,
1041 offset.y + extent.height,
1042 false, color_value,
1043 clear_depth, value.depth,
1044 clear_stencil ? 0xff : 0, value.stencil);
1045 }
1046 }
1047 return;
1048 }
1049
1050 for (uint32_t r = 0; r < rectCount; ++r) {
1051 const VkOffset2D offset = pRects[r].rect.offset;
1052 const VkExtent2D extent = pRects[r].rect.extent;
1053 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1054 blorp_clear_attachments(batch, binding_table,
1055 depth_format, pass_att->samples,
1056 pRects[r].baseArrayLayer,
1057 pRects[r].layerCount,
1058 offset.x, offset.y,
1059 offset.x + extent.width, offset.y + extent.height,
1060 false, color_value,
1061 clear_depth, value.depth,
1062 clear_stencil ? 0xff : 0, value.stencil);
1063 }
1064 }
1065
1066 void anv_CmdClearAttachments(
1067 VkCommandBuffer commandBuffer,
1068 uint32_t attachmentCount,
1069 const VkClearAttachment* pAttachments,
1070 uint32_t rectCount,
1071 const VkClearRect* pRects)
1072 {
1073 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1074
1075 /* Because this gets called within a render pass, we tell blorp not to
1076 * trash our depth and stencil buffers.
1077 */
1078 struct blorp_batch batch;
1079 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1080 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1081
1082 for (uint32_t a = 0; a < attachmentCount; ++a) {
1083 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1084 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1085 clear_color_attachment(cmd_buffer, &batch,
1086 &pAttachments[a],
1087 rectCount, pRects);
1088 } else {
1089 clear_depth_stencil_attachment(cmd_buffer, &batch,
1090 &pAttachments[a],
1091 rectCount, pRects);
1092 }
1093 }
1094
1095 blorp_batch_finish(&batch);
1096 }
1097
1098 enum subpass_stage {
1099 SUBPASS_STAGE_LOAD,
1100 SUBPASS_STAGE_DRAW,
1101 SUBPASS_STAGE_RESOLVE,
1102 };
1103
1104 static bool
1105 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1106 {
1107 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1108 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1109
1110 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1111 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1112 if (a == VK_ATTACHMENT_UNUSED)
1113 continue;
1114
1115 assert(a < cmd_state->pass->attachment_count);
1116 if (cmd_state->attachments[a].pending_clear_aspects) {
1117 return true;
1118 }
1119 }
1120
1121 if (ds != VK_ATTACHMENT_UNUSED) {
1122 assert(ds < cmd_state->pass->attachment_count);
1123 if (cmd_state->attachments[ds].pending_clear_aspects)
1124 return true;
1125 }
1126
1127 return false;
1128 }
1129
1130 void
1131 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1132 {
1133 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1134 const VkRect2D render_area = cmd_buffer->state.render_area;
1135
1136
1137 if (!subpass_needs_clear(cmd_buffer))
1138 return;
1139
1140 /* Because this gets called within a render pass, we tell blorp not to
1141 * trash our depth and stencil buffers.
1142 */
1143 struct blorp_batch batch;
1144 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1145 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1146
1147 VkClearRect clear_rect = {
1148 .rect = cmd_buffer->state.render_area,
1149 .baseArrayLayer = 0,
1150 .layerCount = cmd_buffer->state.framebuffer->layers,
1151 };
1152
1153 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1154 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1155 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1156 if (a == VK_ATTACHMENT_UNUSED)
1157 continue;
1158
1159 assert(a < cmd_state->pass->attachment_count);
1160 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1161
1162 if (!att_state->pending_clear_aspects)
1163 continue;
1164
1165 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1166
1167 struct anv_image_view *iview = fb->attachments[a];
1168 const struct anv_image *image = iview->image;
1169 struct blorp_surf surf;
1170 get_blorp_surf_for_anv_image(cmd_buffer->device,
1171 image, VK_IMAGE_ASPECT_COLOR_BIT,
1172 att_state->aux_usage, &surf);
1173
1174 if (att_state->fast_clear) {
1175 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1176
1177 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1178 *
1179 * "After Render target fast clear, pipe-control with color cache
1180 * write-flush must be issued before sending any DRAW commands on
1181 * that render target."
1182 *
1183 * This comment is a bit cryptic and doesn't really tell you what's
1184 * going or what's really needed. It appears that fast clear ops are
1185 * not properly synchronized with other drawing. This means that we
1186 * cannot have a fast clear operation in the pipe at the same time as
1187 * other regular drawing operations. We need to use a PIPE_CONTROL
1188 * to ensure that the contents of the previous draw hit the render
1189 * target before we resolve and then use a second PIPE_CONTROL after
1190 * the resolve to ensure that it is completed before any additional
1191 * drawing occurs.
1192 */
1193 cmd_buffer->state.pending_pipe_bits |=
1194 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1195
1196 assert(image->n_planes == 1);
1197 blorp_fast_clear(&batch, &surf, iview->planes[0].isl.format,
1198 iview->planes[0].isl.base_level,
1199 iview->planes[0].isl.base_array_layer, fb->layers,
1200 render_area.offset.x, render_area.offset.y,
1201 render_area.offset.x + render_area.extent.width,
1202 render_area.offset.y + render_area.extent.height);
1203
1204 cmd_buffer->state.pending_pipe_bits |=
1205 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1206 } else {
1207 assert(image->n_planes == 1);
1208 blorp_clear(&batch, &surf, iview->planes[0].isl.format,
1209 anv_swizzle_for_render(iview->planes[0].isl.swizzle),
1210 iview->planes[0].isl.base_level,
1211 iview->planes[0].isl.base_array_layer, fb->layers,
1212 render_area.offset.x, render_area.offset.y,
1213 render_area.offset.x + render_area.extent.width,
1214 render_area.offset.y + render_area.extent.height,
1215 vk_to_isl_color(att_state->clear_value.color), NULL);
1216 }
1217
1218 att_state->pending_clear_aspects = 0;
1219 }
1220
1221 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1222 assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
1223
1224 if (ds != VK_ATTACHMENT_UNUSED &&
1225 cmd_state->attachments[ds].pending_clear_aspects) {
1226
1227 VkClearAttachment clear_att = {
1228 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1229 .clearValue = cmd_state->attachments[ds].clear_value,
1230 };
1231
1232
1233 const uint8_t gen = cmd_buffer->device->info.gen;
1234 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1235 ISL_AUX_USAGE_HIZ;
1236 const struct anv_image_view *iview = fb->attachments[ds];
1237
1238 if (clear_with_hiz) {
1239 const bool clear_depth = clear_att.aspectMask &
1240 VK_IMAGE_ASPECT_DEPTH_BIT;
1241 const bool clear_stencil = clear_att.aspectMask &
1242 VK_IMAGE_ASPECT_STENCIL_BIT;
1243
1244 /* Check against restrictions for depth buffer clearing. A great GPU
1245 * performance benefit isn't expected when using the HZ sequence for
1246 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1247 * a stencil clear in addition to using the BLORP-fallback for depth.
1248 */
1249 if (clear_depth) {
1250 if (!blorp_can_hiz_clear_depth(gen, iview->planes[0].isl.format,
1251 iview->image->samples,
1252 render_area.offset.x,
1253 render_area.offset.y,
1254 render_area.offset.x +
1255 render_area.extent.width,
1256 render_area.offset.y +
1257 render_area.extent.height)) {
1258 clear_with_hiz = false;
1259 } else if (clear_att.clearValue.depthStencil.depth !=
1260 ANV_HZ_FC_VAL) {
1261 /* Don't enable fast depth clears for any color not equal to
1262 * ANV_HZ_FC_VAL.
1263 */
1264 clear_with_hiz = false;
1265 } else if (gen == 8 &&
1266 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1267 iview->image)) {
1268 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1269 * fast-cleared portion of a HiZ buffer. Testing has revealed
1270 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1271 * not support this feature at all.
1272 */
1273 clear_with_hiz = false;
1274 }
1275 }
1276
1277 if (clear_with_hiz) {
1278 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1279 render_area.offset.x,
1280 render_area.offset.y,
1281 render_area.offset.x +
1282 render_area.extent.width,
1283 render_area.offset.y +
1284 render_area.extent.height,
1285 clear_depth, clear_stencil,
1286 clear_att.clearValue.
1287 depthStencil.stencil);
1288
1289 /* From the SKL PRM, Depth Buffer Clear:
1290 *
1291 * Depth Buffer Clear Workaround
1292 * Depth buffer clear pass using any of the methods (WM_STATE,
1293 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1294 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1295 * “set” before starting to render. DepthStall and DepthFlush are
1296 * not needed between consecutive depth clear passes nor is it
1297 * required if the depth-clear pass was done with “full_surf_clear”
1298 * bit set in the 3DSTATE_WM_HZ_OP.
1299 */
1300 if (clear_depth) {
1301 cmd_buffer->state.pending_pipe_bits |=
1302 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1303 }
1304 }
1305 }
1306
1307 if (!clear_with_hiz) {
1308 clear_depth_stencil_attachment(cmd_buffer, &batch,
1309 &clear_att, 1, &clear_rect);
1310 }
1311
1312 cmd_state->attachments[ds].pending_clear_aspects = 0;
1313 }
1314
1315 blorp_batch_finish(&batch);
1316 }
1317
1318 static void
1319 resolve_surface(struct blorp_batch *batch,
1320 struct blorp_surf *src_surf,
1321 uint32_t src_level, uint32_t src_layer,
1322 struct blorp_surf *dst_surf,
1323 uint32_t dst_level, uint32_t dst_layer,
1324 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1325 uint32_t width, uint32_t height)
1326 {
1327 blorp_blit(batch,
1328 src_surf, src_level, src_layer,
1329 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1330 dst_surf, dst_level, dst_layer,
1331 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1332 src_x, src_y, src_x + width, src_y + height,
1333 dst_x, dst_y, dst_x + width, dst_y + height,
1334 0x2600 /* GL_NEAREST */, false, false);
1335 }
1336
1337 static void
1338 resolve_image(struct anv_device *device,
1339 struct blorp_batch *batch,
1340 const struct anv_image *src_image,
1341 uint32_t src_level, uint32_t src_layer,
1342 const struct anv_image *dst_image,
1343 uint32_t dst_level, uint32_t dst_layer,
1344 VkImageAspectFlags aspect_mask,
1345 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1346 uint32_t width, uint32_t height)
1347 {
1348 assert(src_image->type == VK_IMAGE_TYPE_2D);
1349 assert(src_image->samples > 1);
1350 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1351 assert(dst_image->samples == 1);
1352 assert(src_image->n_planes == dst_image->n_planes);
1353
1354 uint32_t aspect_bit;
1355
1356 anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
1357 struct blorp_surf src_surf, dst_surf;
1358 get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
1359 ANV_AUX_USAGE_DEFAULT, &src_surf);
1360 get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
1361 ANV_AUX_USAGE_DEFAULT, &dst_surf);
1362
1363 assert(!src_image->format->can_ycbcr);
1364 assert(!dst_image->format->can_ycbcr);
1365
1366 resolve_surface(batch,
1367 &src_surf, src_level, src_layer,
1368 &dst_surf, dst_level, dst_layer,
1369 src_x, src_y, dst_x, dst_y, width, height);
1370 }
1371 }
1372
1373 void anv_CmdResolveImage(
1374 VkCommandBuffer commandBuffer,
1375 VkImage srcImage,
1376 VkImageLayout srcImageLayout,
1377 VkImage dstImage,
1378 VkImageLayout dstImageLayout,
1379 uint32_t regionCount,
1380 const VkImageResolve* pRegions)
1381 {
1382 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1383 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1384 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1385
1386 struct blorp_batch batch;
1387 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1388
1389 for (uint32_t r = 0; r < regionCount; r++) {
1390 assert(pRegions[r].srcSubresource.aspectMask ==
1391 pRegions[r].dstSubresource.aspectMask);
1392 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1393 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1394
1395 const uint32_t layer_count =
1396 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1397
1398 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
1399 dst_mask = pRegions[r].dstSubresource.aspectMask;
1400
1401 assert(anv_image_aspects_compatible(src_mask, dst_mask));
1402
1403 for (uint32_t layer = 0; layer < layer_count; layer++) {
1404 resolve_image(cmd_buffer->device, &batch,
1405 src_image,
1406 pRegions[r].srcSubresource.mipLevel,
1407 pRegions[r].srcSubresource.baseArrayLayer + layer,
1408 dst_image,
1409 pRegions[r].dstSubresource.mipLevel,
1410 pRegions[r].dstSubresource.baseArrayLayer + layer,
1411 pRegions[r].dstSubresource.aspectMask,
1412 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1413 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1414 pRegions[r].extent.width, pRegions[r].extent.height);
1415 }
1416 }
1417
1418 blorp_batch_finish(&batch);
1419 }
1420
1421 static enum isl_aux_usage
1422 fast_clear_aux_usage(const struct anv_image *image,
1423 VkImageAspectFlagBits aspect)
1424 {
1425 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1426 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1427 return ISL_AUX_USAGE_CCS_D;
1428 else
1429 return image->planes[plane].aux_usage;
1430 }
1431
1432 void
1433 anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
1434 const struct anv_image *image,
1435 VkImageAspectFlagBits aspect,
1436 const uint32_t base_level, const uint32_t level_count,
1437 const uint32_t base_layer, uint32_t layer_count)
1438 {
1439 assert(image->type == VK_IMAGE_TYPE_3D || image->extent.depth == 1);
1440
1441 if (image->type == VK_IMAGE_TYPE_3D) {
1442 assert(base_layer == 0);
1443 assert(layer_count == anv_minify(image->extent.depth, base_level));
1444 }
1445
1446 struct blorp_batch batch;
1447 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1448
1449 struct blorp_surf surf;
1450 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1451 fast_clear_aux_usage(image, aspect),
1452 &surf);
1453
1454 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1455 *
1456 * "After Render target fast clear, pipe-control with color cache
1457 * write-flush must be issued before sending any DRAW commands on
1458 * that render target."
1459 *
1460 * This comment is a bit cryptic and doesn't really tell you what's going
1461 * or what's really needed. It appears that fast clear ops are not
1462 * properly synchronized with other drawing. This means that we cannot
1463 * have a fast clear operation in the pipe at the same time as other
1464 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1465 * that the contents of the previous draw hit the render target before we
1466 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1467 * that it is completed before any additional drawing occurs.
1468 */
1469 cmd_buffer->state.pending_pipe_bits |=
1470 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1471
1472 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1473 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1474 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1475
1476 for (uint32_t l = 0; l < level_count; l++) {
1477 const uint32_t level = base_level + l;
1478
1479 const VkExtent3D extent = {
1480 .width = anv_minify(image->extent.width, level),
1481 .height = anv_minify(image->extent.height, level),
1482 .depth = anv_minify(image->extent.depth, level),
1483 };
1484
1485 if (image->type == VK_IMAGE_TYPE_3D)
1486 layer_count = extent.depth;
1487
1488 assert(level < anv_image_aux_levels(image, aspect));
1489 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
1490 blorp_fast_clear(&batch, &surf, surf.surf->format,
1491 level, base_layer, layer_count,
1492 0, 0,
1493 extent.width / width_div,
1494 extent.height / height_div);
1495 }
1496
1497 cmd_buffer->state.pending_pipe_bits |=
1498 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1499 }
1500
1501 void
1502 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1503 {
1504 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1505 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1506
1507 if (subpass->has_resolve) {
1508 struct blorp_batch batch;
1509 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1510
1511 /* We are about to do some MSAA resolves. We need to flush so that the
1512 * result of writes to the MSAA color attachments show up in the sampler
1513 * when we blit to the single-sampled resolve target.
1514 */
1515 cmd_buffer->state.pending_pipe_bits |=
1516 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1517 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1518
1519 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1520 uint32_t src_att = subpass->color_attachments[i].attachment;
1521 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1522
1523 if (dst_att == VK_ATTACHMENT_UNUSED)
1524 continue;
1525
1526 assert(src_att < cmd_buffer->state.pass->attachment_count);
1527 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1528
1529 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1530 /* From the Vulkan 1.0 spec:
1531 *
1532 * If the first use of an attachment in a render pass is as a
1533 * resolve attachment, then the loadOp is effectively ignored
1534 * as the resolve is guaranteed to overwrite all pixels in the
1535 * render area.
1536 */
1537 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1538 }
1539
1540 struct anv_image_view *src_iview = fb->attachments[src_att];
1541 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1542
1543 enum isl_aux_usage src_aux_usage =
1544 cmd_buffer->state.attachments[src_att].aux_usage;
1545 enum isl_aux_usage dst_aux_usage =
1546 cmd_buffer->state.attachments[dst_att].aux_usage;
1547
1548 const VkRect2D render_area = cmd_buffer->state.render_area;
1549
1550 assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
1551 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
1552
1553 struct blorp_surf src_surf, dst_surf;
1554 get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
1555 VK_IMAGE_ASPECT_COLOR_BIT,
1556 src_aux_usage, &src_surf);
1557 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
1558 VK_IMAGE_ASPECT_COLOR_BIT,
1559 dst_aux_usage, &dst_surf);
1560
1561 assert(!src_iview->image->format->can_ycbcr);
1562 assert(!dst_iview->image->format->can_ycbcr);
1563
1564 resolve_surface(&batch,
1565 &src_surf,
1566 src_iview->planes[0].isl.base_level,
1567 src_iview->planes[0].isl.base_array_layer,
1568 &dst_surf,
1569 dst_iview->planes[0].isl.base_level,
1570 dst_iview->planes[0].isl.base_array_layer,
1571 render_area.offset.x, render_area.offset.y,
1572 render_area.offset.x, render_area.offset.y,
1573 render_area.extent.width, render_area.extent.height);
1574 }
1575
1576 blorp_batch_finish(&batch);
1577 }
1578 }
1579
1580 void
1581 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1582 const struct anv_image *image,
1583 uint32_t base_level, uint32_t level_count,
1584 uint32_t base_layer, uint32_t layer_count)
1585 {
1586 struct blorp_batch batch;
1587 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1588
1589 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1590
1591 struct blorp_surf surf;
1592 get_blorp_surf_for_anv_image(cmd_buffer->device,
1593 image, VK_IMAGE_ASPECT_COLOR_BIT,
1594 ISL_AUX_USAGE_NONE, &surf);
1595
1596 struct blorp_surf shadow_surf = {
1597 .surf = &image->planes[0].shadow_surface.isl,
1598 .addr = {
1599 .buffer = image->planes[0].bo,
1600 .offset = image->planes[0].bo_offset +
1601 image->planes[0].shadow_surface.offset,
1602 .mocs = cmd_buffer->device->default_mocs,
1603 },
1604 };
1605
1606 for (uint32_t l = 0; l < level_count; l++) {
1607 const uint32_t level = base_level + l;
1608
1609 const VkExtent3D extent = {
1610 .width = anv_minify(image->extent.width, level),
1611 .height = anv_minify(image->extent.height, level),
1612 .depth = anv_minify(image->extent.depth, level),
1613 };
1614
1615 if (image->type == VK_IMAGE_TYPE_3D)
1616 layer_count = extent.depth;
1617
1618 for (uint32_t a = 0; a < layer_count; a++) {
1619 const uint32_t layer = base_layer + a;
1620
1621 blorp_copy(&batch, &surf, level, layer,
1622 &shadow_surf, level, layer,
1623 0, 0, 0, 0, extent.width, extent.height);
1624 }
1625 }
1626
1627 blorp_batch_finish(&batch);
1628 }
1629
1630 void
1631 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1632 const struct anv_image *image,
1633 enum blorp_hiz_op op)
1634 {
1635 assert(image);
1636
1637 assert(anv_image_aspect_to_plane(image->aspects,
1638 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1639
1640 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1641 * don't perform such a resolve on gens that don't support it.
1642 */
1643 if (cmd_buffer->device->info.gen < 8 ||
1644 image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ)
1645 return;
1646
1647 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1648 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1649
1650 struct blorp_batch batch;
1651 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1652
1653 struct blorp_surf surf;
1654 get_blorp_surf_for_anv_image(cmd_buffer->device,
1655 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1656 ISL_AUX_USAGE_NONE, &surf);
1657
1658 /* Manually add the aux HiZ surf */
1659 surf.aux_surf = &image->planes[0].aux_surface.isl,
1660 surf.aux_addr = (struct blorp_address) {
1661 .buffer = image->planes[0].bo,
1662 .offset = image->planes[0].bo_offset +
1663 image->planes[0].aux_surface.offset,
1664 .mocs = cmd_buffer->device->default_mocs,
1665 };
1666 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1667
1668 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1669
1670 blorp_hiz_op(&batch, &surf, 0, 0, 1, op);
1671 blorp_batch_finish(&batch);
1672 }
1673
1674 void
1675 anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
1676 const struct anv_state surface_state,
1677 const struct anv_image * const image,
1678 VkImageAspectFlagBits aspect,
1679 const uint8_t level, const uint32_t layer_count,
1680 const enum blorp_fast_clear_op op)
1681 {
1682 assert(cmd_buffer && image);
1683
1684 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1685
1686 /* The resolved subresource range must have a CCS buffer. */
1687 assert(level < anv_image_aux_levels(image, aspect));
1688 assert(layer_count <= anv_image_aux_layers(image, aspect, level));
1689 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV && image->samples == 1);
1690
1691 /* Create a binding table for this surface state. */
1692 uint32_t binding_table;
1693 VkResult result =
1694 binding_table_for_surface_state(cmd_buffer, surface_state,
1695 &binding_table);
1696 if (result != VK_SUCCESS)
1697 return;
1698
1699 struct blorp_batch batch;
1700 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1701 BLORP_BATCH_PREDICATE_ENABLE);
1702
1703 struct blorp_surf surf;
1704 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1705 fast_clear_aux_usage(image, aspect),
1706 &surf);
1707
1708 blorp_ccs_resolve_attachment(&batch, binding_table, &surf, level,
1709 layer_count,
1710 image->planes[plane].surface.isl.format,
1711 op);
1712
1713 blorp_batch_finish(&batch);
1714 }