f4ef4fab77806c24da3002cb7668e61f63d3e37a
[mesa.git] / src / amd / vulkan / radv_image.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "vk_format.h"
31 #include "vk_util.h"
32 #include "radv_radeon_winsys.h"
33 #include "sid.h"
34 #include "util/debug.h"
35 #include "util/u_atomic.h"
36 #include "vulkan/util/vk_format.h"
37
38 #include "gfx10_format_table.h"
39
40 static unsigned
41 radv_choose_tiling(struct radv_device *device,
42 const VkImageCreateInfo *pCreateInfo,
43 VkFormat format)
44 {
45 if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) {
46 assert(pCreateInfo->samples <= 1);
47 return RADEON_SURF_MODE_LINEAR_ALIGNED;
48 }
49
50 if (!vk_format_is_compressed(format) &&
51 !vk_format_is_depth_or_stencil(format)
52 && device->physical_device->rad_info.chip_class <= GFX8) {
53 /* this causes hangs in some VK CTS tests on GFX9. */
54 /* Textures with a very small height are recommended to be linear. */
55 if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
56 /* Only very thin and long 2D textures should benefit from
57 * linear_aligned. */
58 (pCreateInfo->extent.width > 8 && pCreateInfo->extent.height <= 2))
59 return RADEON_SURF_MODE_LINEAR_ALIGNED;
60 }
61
62 /* MSAA resources must be 2D tiled. */
63 if (pCreateInfo->samples > 1)
64 return RADEON_SURF_MODE_2D;
65
66 return RADEON_SURF_MODE_2D;
67 }
68
69 static bool
70 radv_use_tc_compat_htile_for_image(struct radv_device *device,
71 const VkImageCreateInfo *pCreateInfo,
72 VkFormat format)
73 {
74 /* TC-compat HTILE is only available for GFX8+. */
75 if (device->physical_device->rad_info.chip_class < GFX8)
76 return false;
77
78 if ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT))
79 return false;
80
81 if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR)
82 return false;
83
84 if (pCreateInfo->mipLevels > 1)
85 return false;
86
87 /* Do not enable TC-compatible HTILE if the image isn't readable by a
88 * shader because no texture fetches will happen.
89 */
90 if (!(pCreateInfo->usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
91 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
92 VK_IMAGE_USAGE_TRANSFER_SRC_BIT)))
93 return false;
94
95 /* FIXME: for some reason TC compat with 2/4/8 samples breaks some cts
96 * tests - disable for now. On GFX10 D32_SFLOAT is affected as well.
97 */
98 if (pCreateInfo->samples >= 2 &&
99 (format == VK_FORMAT_D32_SFLOAT_S8_UINT ||
100 (format == VK_FORMAT_D32_SFLOAT &&
101 device->physical_device->rad_info.chip_class >= GFX10)))
102 return false;
103
104 /* GFX9 supports both 32-bit and 16-bit depth surfaces, while GFX8 only
105 * supports 32-bit. Though, it's possible to enable TC-compat for
106 * 16-bit depth surfaces if no Z planes are compressed.
107 */
108 if (format != VK_FORMAT_D32_SFLOAT_S8_UINT &&
109 format != VK_FORMAT_D32_SFLOAT &&
110 format != VK_FORMAT_D16_UNORM)
111 return false;
112
113 if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
114 const struct VkImageFormatListCreateInfo *format_list =
115 (const struct VkImageFormatListCreateInfo *)
116 vk_find_struct_const(pCreateInfo->pNext,
117 IMAGE_FORMAT_LIST_CREATE_INFO);
118
119 /* We have to ignore the existence of the list if viewFormatCount = 0 */
120 if (format_list && format_list->viewFormatCount) {
121 /* compatibility is transitive, so we only need to check
122 * one format with everything else.
123 */
124 for (unsigned i = 0; i < format_list->viewFormatCount; ++i) {
125 if (format_list->pViewFormats[i] == VK_FORMAT_UNDEFINED)
126 continue;
127
128 if (format != format_list->pViewFormats[i])
129 return false;
130 }
131 } else {
132 return false;
133 }
134 }
135
136 return true;
137 }
138
139 static bool
140 radv_surface_has_scanout(struct radv_device *device, const struct radv_image_create_info *info)
141 {
142 if (info->bo_metadata) {
143 if (device->physical_device->rad_info.chip_class >= GFX9)
144 return info->bo_metadata->u.gfx9.scanout;
145 else
146 return info->bo_metadata->u.legacy.scanout;
147 }
148
149 return info->scanout;
150 }
151
152 static bool
153 radv_image_use_fast_clear_for_image(const struct radv_device *device,
154 const struct radv_image *image)
155 {
156 if (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS)
157 return true;
158
159 if (image->info.samples <= 1 &&
160 image->info.width * image->info.height <= 512 * 512) {
161 /* Do not enable CMASK or DCC for small surfaces where the cost
162 * of the eliminate pass can be higher than the benefit of fast
163 * clear. RadeonSI does this, but the image threshold is
164 * different.
165 */
166 return false;
167 }
168
169 return image->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT &&
170 (image->exclusive || image->queue_family_mask == 1);
171 }
172
173 static bool
174 radv_use_dcc_for_image(struct radv_device *device,
175 const struct radv_image *image,
176 const VkImageCreateInfo *pCreateInfo,
177 VkFormat format)
178 {
179 bool dcc_compatible_formats;
180 bool blendable;
181
182 /* DCC (Delta Color Compression) is only available for GFX8+. */
183 if (device->physical_device->rad_info.chip_class < GFX8)
184 return false;
185
186 if (device->instance->debug_flags & RADV_DEBUG_NO_DCC)
187 return false;
188
189 if (image->shareable)
190 return false;
191
192 /* TODO: Enable DCC for storage images. */
193 if ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT))
194 return false;
195
196 if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR)
197 return false;
198
199 if (vk_format_is_subsampled(format) ||
200 vk_format_get_plane_count(format) > 1)
201 return false;
202
203 if (!radv_image_use_fast_clear_for_image(device, image))
204 return false;
205
206 /* TODO: Enable DCC for mipmaps on GFX9+. */
207 if ((pCreateInfo->arrayLayers > 1 || pCreateInfo->mipLevels > 1) &&
208 device->physical_device->rad_info.chip_class >= GFX9)
209 return false;
210
211 /* Do not enable DCC for mipmapped arrays because performance is worse. */
212 if (pCreateInfo->arrayLayers > 1 && pCreateInfo->mipLevels > 1)
213 return false;
214
215 /* FIXME: DCC for MSAA with 4x and 8x samples doesn't work yet, while
216 * 2x can be enabled with an option.
217 */
218 if (pCreateInfo->samples > 2 ||
219 (pCreateInfo->samples == 2 &&
220 !device->physical_device->dcc_msaa_allowed))
221 return false;
222
223 /* Determine if the formats are DCC compatible. */
224 dcc_compatible_formats =
225 radv_is_colorbuffer_format_supported(format,
226 &blendable);
227
228 if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
229 const struct VkImageFormatListCreateInfo *format_list =
230 (const struct VkImageFormatListCreateInfo *)
231 vk_find_struct_const(pCreateInfo->pNext,
232 IMAGE_FORMAT_LIST_CREATE_INFO);
233
234 /* We have to ignore the existence of the list if viewFormatCount = 0 */
235 if (format_list && format_list->viewFormatCount) {
236 /* compatibility is transitive, so we only need to check
237 * one format with everything else. */
238 for (unsigned i = 0; i < format_list->viewFormatCount; ++i) {
239 if (format_list->pViewFormats[i] == VK_FORMAT_UNDEFINED)
240 continue;
241
242 if (!radv_dcc_formats_compatible(format,
243 format_list->pViewFormats[i]))
244 dcc_compatible_formats = false;
245 }
246 } else {
247 dcc_compatible_formats = false;
248 }
249 }
250
251 if (!dcc_compatible_formats)
252 return false;
253
254 return true;
255 }
256
257 static inline bool
258 radv_use_fmask_for_image(const struct radv_device *device,
259 const struct radv_image *image)
260 {
261 return image->info.samples > 1 &&
262 ((image->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) ||
263 (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS));
264 }
265
266 static inline bool
267 radv_use_htile_for_image(const struct radv_device *device,
268 const struct radv_image *image)
269 {
270 return image->info.levels == 1 &&
271 ((image->info.width * image->info.height >= 8 * 8) ||
272 (device->instance->debug_flags & RADV_DEBUG_FORCE_COMPRESS));
273 }
274
275 static bool
276 radv_use_tc_compat_cmask_for_image(struct radv_device *device,
277 struct radv_image *image)
278 {
279 if (!(device->instance->perftest_flags & RADV_PERFTEST_TC_COMPAT_CMASK))
280 return false;
281
282 /* TC-compat CMASK is only available for GFX8+. */
283 if (device->physical_device->rad_info.chip_class < GFX8)
284 return false;
285
286 if (image->usage & VK_IMAGE_USAGE_STORAGE_BIT)
287 return false;
288
289 if (radv_image_has_dcc(image))
290 return false;
291
292 if (!radv_image_has_cmask(image))
293 return false;
294
295 return true;
296 }
297
298 static uint32_t si_get_bo_metadata_word1(const struct radv_device *device)
299 {
300 return (ATI_VENDOR_ID << 16) | device->physical_device->rad_info.pci_id;
301 }
302
303 static bool
304 radv_is_valid_opaque_metadata(const struct radv_device *device,
305 const struct radeon_bo_metadata *md)
306 {
307 if (md->metadata[0] != 1 ||
308 md->metadata[1] != si_get_bo_metadata_word1(device))
309 return false;
310
311 if (md->size_metadata < 40)
312 return false;
313
314 return true;
315 }
316
317 static void
318 radv_patch_surface_from_metadata(struct radv_device *device,
319 struct radeon_surf *surface,
320 const struct radeon_bo_metadata *md)
321 {
322 surface->flags = RADEON_SURF_CLR(surface->flags, MODE);
323
324 if (device->physical_device->rad_info.chip_class >= GFX9) {
325 if (md->u.gfx9.swizzle_mode > 0)
326 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
327 else
328 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE);
329
330 surface->u.gfx9.surf.swizzle_mode = md->u.gfx9.swizzle_mode;
331 } else {
332 surface->u.legacy.pipe_config = md->u.legacy.pipe_config;
333 surface->u.legacy.bankw = md->u.legacy.bankw;
334 surface->u.legacy.bankh = md->u.legacy.bankh;
335 surface->u.legacy.tile_split = md->u.legacy.tile_split;
336 surface->u.legacy.mtilea = md->u.legacy.mtilea;
337 surface->u.legacy.num_banks = md->u.legacy.num_banks;
338
339 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
340 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
341 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
342 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
343 else
344 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE);
345
346 }
347 }
348
349 static VkResult
350 radv_patch_image_dimensions(struct radv_device *device,
351 struct radv_image *image,
352 const struct radv_image_create_info *create_info,
353 struct ac_surf_info *image_info)
354 {
355 unsigned width = image->info.width;
356 unsigned height = image->info.height;
357
358 /*
359 * minigbm sometimes allocates bigger images which is going to result in
360 * weird strides and other properties. Lets be lenient where possible and
361 * fail it on GFX10 (as we cannot cope there).
362 *
363 * Example hack: https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1457777/
364 */
365 if (create_info->bo_metadata &&
366 radv_is_valid_opaque_metadata(device, create_info->bo_metadata)) {
367 const struct radeon_bo_metadata *md = create_info->bo_metadata;
368
369 if (device->physical_device->rad_info.chip_class >= GFX10) {
370 width = G_00A004_WIDTH_LO(md->metadata[3]) +
371 (G_00A008_WIDTH_HI(md->metadata[4]) << 2) + 1;
372 height = G_00A008_HEIGHT(md->metadata[4]) + 1;
373 } else {
374 width = G_008F18_WIDTH(md->metadata[4]) + 1;
375 height = G_008F18_HEIGHT(md->metadata[4]) + 1;
376 }
377 }
378
379 if (image->info.width == width && image->info.height == height)
380 return VK_SUCCESS;
381
382 if (width < image->info.width || height < image->info.height) {
383 fprintf(stderr,
384 "The imported image has smaller dimensions than the internal\n"
385 "dimensions. Using it is going to fail badly, so we reject\n"
386 "this import.\n"
387 "(internal dimensions: %d x %d, external dimensions: %d x %d)\n",
388 image->info.width, image->info.height, width, height);
389 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
390 } else if (device->physical_device->rad_info.chip_class >= GFX10) {
391 fprintf(stderr,
392 "Tried to import an image with inconsistent width on GFX10.\n"
393 "As GFX10 has no separate stride fields we cannot cope with\n"
394 "an inconsistency in width and will fail this import.\n"
395 "(internal dimensions: %d x %d, external dimensions: %d x %d)\n",
396 image->info.width, image->info.height, width, height);
397 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
398 } else {
399 fprintf(stderr,
400 "Tried to import an image with inconsistent width on pre-GFX10.\n"
401 "As GFX10 has no separate stride fields we cannot cope with\n"
402 "an inconsistency and would fail on GFX10.\n"
403 "(internal dimensions: %d x %d, external dimensions: %d x %d)\n",
404 image->info.width, image->info.height, width, height);
405 }
406 image_info->width = width;
407 image_info->height = height;
408
409 return VK_SUCCESS;
410 }
411
412 static VkResult
413 radv_patch_image_from_extra_info(struct radv_device *device,
414 struct radv_image *image,
415 const struct radv_image_create_info *create_info,
416 struct ac_surf_info *image_info)
417 {
418 VkResult result = radv_patch_image_dimensions(device, image, create_info, image_info);
419 if (result != VK_SUCCESS)
420 return result;
421
422 for (unsigned plane = 0; plane < image->plane_count; ++plane) {
423 if (create_info->bo_metadata) {
424 radv_patch_surface_from_metadata(device, &image->planes[plane].surface,
425 create_info->bo_metadata);
426 }
427
428 if (radv_surface_has_scanout(device, create_info)) {
429 image->planes[plane].surface.flags |= RADEON_SURF_SCANOUT;
430 image->planes[plane].surface.flags |= RADEON_SURF_DISABLE_DCC;
431
432 image->info.surf_index = NULL;
433 }
434 }
435 return VK_SUCCESS;
436 }
437
438 static uint32_t
439 radv_get_surface_flags(struct radv_device *device,
440 const struct radv_image *image,
441 unsigned plane_id,
442 const VkImageCreateInfo *pCreateInfo,
443 VkFormat image_format)
444 {
445 uint32_t flags;
446 unsigned array_mode = radv_choose_tiling(device, pCreateInfo, image_format);
447 VkFormat format = vk_format_get_plane_format(image_format, plane_id);
448 const struct vk_format_description *desc = vk_format_description(format);
449 bool is_depth, is_stencil;
450
451 is_depth = vk_format_has_depth(desc);
452 is_stencil = vk_format_has_stencil(desc);
453
454
455 flags = RADEON_SURF_SET(array_mode, MODE);
456
457 switch (pCreateInfo->imageType){
458 case VK_IMAGE_TYPE_1D:
459 if (pCreateInfo->arrayLayers > 1)
460 flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
461 else
462 flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
463 break;
464 case VK_IMAGE_TYPE_2D:
465 if (pCreateInfo->arrayLayers > 1)
466 flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
467 else
468 flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
469 break;
470 case VK_IMAGE_TYPE_3D:
471 flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
472 break;
473 default:
474 unreachable("unhandled image type");
475 }
476
477 /* Required for clearing/initializing a specific layer on GFX8. */
478 flags |= RADEON_SURF_CONTIGUOUS_DCC_LAYERS;
479
480 if (is_depth) {
481 flags |= RADEON_SURF_ZBUFFER;
482 if (!radv_use_htile_for_image(device, image) ||
483 (device->instance->debug_flags & RADV_DEBUG_NO_HIZ))
484 flags |= RADEON_SURF_NO_HTILE;
485 if (radv_use_tc_compat_htile_for_image(device, pCreateInfo, image_format))
486 flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
487 }
488
489 if (is_stencil)
490 flags |= RADEON_SURF_SBUFFER;
491
492 if (device->physical_device->rad_info.chip_class >= GFX9 &&
493 pCreateInfo->imageType == VK_IMAGE_TYPE_3D &&
494 vk_format_get_blocksizebits(image_format) == 128 &&
495 vk_format_is_compressed(image_format))
496 flags |= RADEON_SURF_NO_RENDER_TARGET;
497
498 if (!radv_use_dcc_for_image(device, image, pCreateInfo, image_format))
499 flags |= RADEON_SURF_DISABLE_DCC;
500
501 if (!radv_use_fmask_for_image(device, image))
502 flags |= RADEON_SURF_NO_FMASK;
503
504 return flags;
505 }
506
507 static inline unsigned
508 si_tile_mode_index(const struct radv_image_plane *plane, unsigned level, bool stencil)
509 {
510 if (stencil)
511 return plane->surface.u.legacy.stencil_tiling_index[level];
512 else
513 return plane->surface.u.legacy.tiling_index[level];
514 }
515
516 static unsigned radv_map_swizzle(unsigned swizzle)
517 {
518 switch (swizzle) {
519 case VK_SWIZZLE_Y:
520 return V_008F0C_SQ_SEL_Y;
521 case VK_SWIZZLE_Z:
522 return V_008F0C_SQ_SEL_Z;
523 case VK_SWIZZLE_W:
524 return V_008F0C_SQ_SEL_W;
525 case VK_SWIZZLE_0:
526 return V_008F0C_SQ_SEL_0;
527 case VK_SWIZZLE_1:
528 return V_008F0C_SQ_SEL_1;
529 default: /* VK_SWIZZLE_X */
530 return V_008F0C_SQ_SEL_X;
531 }
532 }
533
534 static void
535 radv_make_buffer_descriptor(struct radv_device *device,
536 struct radv_buffer *buffer,
537 VkFormat vk_format,
538 unsigned offset,
539 unsigned range,
540 uint32_t *state)
541 {
542 const struct vk_format_description *desc;
543 unsigned stride;
544 uint64_t gpu_address = radv_buffer_get_va(buffer->bo);
545 uint64_t va = gpu_address + buffer->offset;
546 unsigned num_format, data_format;
547 int first_non_void;
548 desc = vk_format_description(vk_format);
549 first_non_void = vk_format_get_first_non_void_channel(vk_format);
550 stride = desc->block.bits / 8;
551
552 va += offset;
553 state[0] = va;
554 state[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
555 S_008F04_STRIDE(stride);
556
557 if (device->physical_device->rad_info.chip_class != GFX8 && stride) {
558 range /= stride;
559 }
560
561 state[2] = range;
562 state[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc->swizzle[0])) |
563 S_008F0C_DST_SEL_Y(radv_map_swizzle(desc->swizzle[1])) |
564 S_008F0C_DST_SEL_Z(radv_map_swizzle(desc->swizzle[2])) |
565 S_008F0C_DST_SEL_W(radv_map_swizzle(desc->swizzle[3]));
566
567 if (device->physical_device->rad_info.chip_class >= GFX10) {
568 const struct gfx10_format *fmt = &gfx10_format_table[vk_format_to_pipe_format(vk_format)];
569
570 /* OOB_SELECT chooses the out-of-bounds check:
571 * - 0: (index >= NUM_RECORDS) || (offset >= STRIDE)
572 * - 1: index >= NUM_RECORDS
573 * - 2: NUM_RECORDS == 0
574 * - 3: if SWIZZLE_ENABLE == 0: offset >= NUM_RECORDS
575 * else: swizzle_address >= NUM_RECORDS
576 */
577 state[3] |= S_008F0C_FORMAT(fmt->img_format) |
578 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_STRUCTURED_WITH_OFFSET) |
579 S_008F0C_RESOURCE_LEVEL(1);
580 } else {
581 num_format = radv_translate_buffer_numformat(desc, first_non_void);
582 data_format = radv_translate_buffer_dataformat(desc, first_non_void);
583
584 assert(data_format != V_008F0C_BUF_DATA_FORMAT_INVALID);
585 assert(num_format != ~0);
586
587 state[3] |= S_008F0C_NUM_FORMAT(num_format) |
588 S_008F0C_DATA_FORMAT(data_format);
589 }
590 }
591
592 static void
593 si_set_mutable_tex_desc_fields(struct radv_device *device,
594 struct radv_image *image,
595 const struct legacy_surf_level *base_level_info,
596 unsigned plane_id,
597 unsigned base_level, unsigned first_level,
598 unsigned block_width, bool is_stencil,
599 bool is_storage_image, bool disable_compression,
600 uint32_t *state)
601 {
602 struct radv_image_plane *plane = &image->planes[plane_id];
603 uint64_t gpu_address = image->bo ? radv_buffer_get_va(image->bo) + image->offset : 0;
604 uint64_t va = gpu_address + plane->offset;
605 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
606 uint64_t meta_va = 0;
607 if (chip_class >= GFX9) {
608 if (is_stencil)
609 va += plane->surface.u.gfx9.stencil_offset;
610 else
611 va += plane->surface.u.gfx9.surf_offset;
612 } else
613 va += base_level_info->offset;
614
615 state[0] = va >> 8;
616 if (chip_class >= GFX9 ||
617 base_level_info->mode == RADEON_SURF_MODE_2D)
618 state[0] |= plane->surface.tile_swizzle;
619 state[1] &= C_008F14_BASE_ADDRESS_HI;
620 state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40);
621
622 if (chip_class >= GFX8) {
623 state[6] &= C_008F28_COMPRESSION_EN;
624 state[7] = 0;
625 if (!disable_compression && radv_dcc_enabled(image, first_level)) {
626 meta_va = gpu_address + plane->surface.dcc_offset;
627 if (chip_class <= GFX8)
628 meta_va += base_level_info->dcc_offset;
629
630 unsigned dcc_tile_swizzle = plane->surface.tile_swizzle << 8;
631 dcc_tile_swizzle &= plane->surface.dcc_alignment - 1;
632 meta_va |= dcc_tile_swizzle;
633 } else if (!disable_compression &&
634 radv_image_is_tc_compat_htile(image)) {
635 meta_va = gpu_address + plane->surface.htile_offset;
636 }
637
638 if (meta_va) {
639 state[6] |= S_008F28_COMPRESSION_EN(1);
640 if (chip_class <= GFX9)
641 state[7] = meta_va >> 8;
642 }
643 }
644
645 if (chip_class >= GFX10) {
646 state[3] &= C_00A00C_SW_MODE;
647
648 if (is_stencil) {
649 state[3] |= S_00A00C_SW_MODE(plane->surface.u.gfx9.stencil.swizzle_mode);
650 } else {
651 state[3] |= S_00A00C_SW_MODE(plane->surface.u.gfx9.surf.swizzle_mode);
652 }
653
654 state[6] &= C_00A018_META_DATA_ADDRESS_LO &
655 C_00A018_META_PIPE_ALIGNED;
656
657 if (meta_va) {
658 struct gfx9_surf_meta_flags meta = {
659 .rb_aligned = 1,
660 .pipe_aligned = 1,
661 };
662
663 if (plane->surface.dcc_offset)
664 meta = plane->surface.u.gfx9.dcc;
665
666 state[6] |= S_00A018_META_PIPE_ALIGNED(meta.pipe_aligned) |
667 S_00A018_META_DATA_ADDRESS_LO(meta_va >> 8);
668 }
669
670 state[7] = meta_va >> 16;
671 } else if (chip_class == GFX9) {
672 state[3] &= C_008F1C_SW_MODE;
673 state[4] &= C_008F20_PITCH;
674
675 if (is_stencil) {
676 state[3] |= S_008F1C_SW_MODE(plane->surface.u.gfx9.stencil.swizzle_mode);
677 state[4] |= S_008F20_PITCH(plane->surface.u.gfx9.stencil.epitch);
678 } else {
679 state[3] |= S_008F1C_SW_MODE(plane->surface.u.gfx9.surf.swizzle_mode);
680 state[4] |= S_008F20_PITCH(plane->surface.u.gfx9.surf.epitch);
681 }
682
683 state[5] &= C_008F24_META_DATA_ADDRESS &
684 C_008F24_META_PIPE_ALIGNED &
685 C_008F24_META_RB_ALIGNED;
686 if (meta_va) {
687 struct gfx9_surf_meta_flags meta = {
688 .rb_aligned = 1,
689 .pipe_aligned = 1,
690 };
691
692 if (plane->surface.dcc_offset)
693 meta = plane->surface.u.gfx9.dcc;
694
695 state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) |
696 S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) |
697 S_008F24_META_RB_ALIGNED(meta.rb_aligned);
698 }
699 } else {
700 /* GFX6-GFX8 */
701 unsigned pitch = base_level_info->nblk_x * block_width;
702 unsigned index = si_tile_mode_index(plane, base_level, is_stencil);
703
704 state[3] &= C_008F1C_TILING_INDEX;
705 state[3] |= S_008F1C_TILING_INDEX(index);
706 state[4] &= C_008F20_PITCH;
707 state[4] |= S_008F20_PITCH(pitch - 1);
708 }
709 }
710
711 static unsigned radv_tex_dim(VkImageType image_type, VkImageViewType view_type,
712 unsigned nr_layers, unsigned nr_samples, bool is_storage_image, bool gfx9)
713 {
714 if (view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
715 return is_storage_image ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_CUBE;
716
717 /* GFX9 allocates 1D textures as 2D. */
718 if (gfx9 && image_type == VK_IMAGE_TYPE_1D)
719 image_type = VK_IMAGE_TYPE_2D;
720 switch (image_type) {
721 case VK_IMAGE_TYPE_1D:
722 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY : V_008F1C_SQ_RSRC_IMG_1D;
723 case VK_IMAGE_TYPE_2D:
724 if (nr_samples > 1)
725 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY : V_008F1C_SQ_RSRC_IMG_2D_MSAA;
726 else
727 return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_2D;
728 case VK_IMAGE_TYPE_3D:
729 if (view_type == VK_IMAGE_VIEW_TYPE_3D)
730 return V_008F1C_SQ_RSRC_IMG_3D;
731 else
732 return V_008F1C_SQ_RSRC_IMG_2D_ARRAY;
733 default:
734 unreachable("illegal image type");
735 }
736 }
737
738 static unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle[4])
739 {
740 unsigned bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
741
742 if (swizzle[3] == VK_SWIZZLE_X) {
743 /* For the pre-defined border color values (white, opaque
744 * black, transparent black), the only thing that matters is
745 * that the alpha channel winds up in the correct place
746 * (because the RGB channels are all the same) so either of
747 * these enumerations will work.
748 */
749 if (swizzle[2] == VK_SWIZZLE_Y)
750 bc_swizzle = V_008F20_BC_SWIZZLE_WZYX;
751 else
752 bc_swizzle = V_008F20_BC_SWIZZLE_WXYZ;
753 } else if (swizzle[0] == VK_SWIZZLE_X) {
754 if (swizzle[1] == VK_SWIZZLE_Y)
755 bc_swizzle = V_008F20_BC_SWIZZLE_XYZW;
756 else
757 bc_swizzle = V_008F20_BC_SWIZZLE_XWYZ;
758 } else if (swizzle[1] == VK_SWIZZLE_X) {
759 bc_swizzle = V_008F20_BC_SWIZZLE_YXWZ;
760 } else if (swizzle[2] == VK_SWIZZLE_X) {
761 bc_swizzle = V_008F20_BC_SWIZZLE_ZYXW;
762 }
763
764 return bc_swizzle;
765 }
766
767 bool vi_alpha_is_on_msb(struct radv_device *device, VkFormat format)
768 {
769 const struct vk_format_description *desc = vk_format_description(format);
770
771 if (device->physical_device->rad_info.chip_class >= GFX10 && desc->nr_channels == 1)
772 return desc->swizzle[3] == VK_SWIZZLE_X;
773
774 return radv_translate_colorswap(format, false) <= 1;
775 }
776 /**
777 * Build the sampler view descriptor for a texture (GFX10).
778 */
779 static void
780 gfx10_make_texture_descriptor(struct radv_device *device,
781 struct radv_image *image,
782 bool is_storage_image,
783 VkImageViewType view_type,
784 VkFormat vk_format,
785 const VkComponentMapping *mapping,
786 unsigned first_level, unsigned last_level,
787 unsigned first_layer, unsigned last_layer,
788 unsigned width, unsigned height, unsigned depth,
789 uint32_t *state,
790 uint32_t *fmask_state)
791 {
792 const struct vk_format_description *desc;
793 enum vk_swizzle swizzle[4];
794 unsigned img_format;
795 unsigned type;
796
797 desc = vk_format_description(vk_format);
798 img_format = gfx10_format_table[vk_format_to_pipe_format(vk_format)].img_format;
799
800 if (desc->colorspace == VK_FORMAT_COLORSPACE_ZS) {
801 const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
802 vk_format_compose_swizzles(mapping, swizzle_xxxx, swizzle);
803 } else {
804 vk_format_compose_swizzles(mapping, desc->swizzle, swizzle);
805 }
806
807 type = radv_tex_dim(image->type, view_type, image->info.array_size, image->info.samples,
808 is_storage_image, device->physical_device->rad_info.chip_class == GFX9);
809 if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) {
810 height = 1;
811 depth = image->info.array_size;
812 } else if (type == V_008F1C_SQ_RSRC_IMG_2D_ARRAY ||
813 type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
814 if (view_type != VK_IMAGE_VIEW_TYPE_3D)
815 depth = image->info.array_size;
816 } else if (type == V_008F1C_SQ_RSRC_IMG_CUBE)
817 depth = image->info.array_size / 6;
818
819 state[0] = 0;
820 state[1] = S_00A004_FORMAT(img_format) |
821 S_00A004_WIDTH_LO(width - 1);
822 state[2] = S_00A008_WIDTH_HI((width - 1) >> 2) |
823 S_00A008_HEIGHT(height - 1) |
824 S_00A008_RESOURCE_LEVEL(1);
825 state[3] = S_00A00C_DST_SEL_X(radv_map_swizzle(swizzle[0])) |
826 S_00A00C_DST_SEL_Y(radv_map_swizzle(swizzle[1])) |
827 S_00A00C_DST_SEL_Z(radv_map_swizzle(swizzle[2])) |
828 S_00A00C_DST_SEL_W(radv_map_swizzle(swizzle[3])) |
829 S_00A00C_BASE_LEVEL(image->info.samples > 1 ?
830 0 : first_level) |
831 S_00A00C_LAST_LEVEL(image->info.samples > 1 ?
832 util_logbase2(image->info.samples) :
833 last_level) |
834 S_00A00C_BC_SWIZZLE(gfx9_border_color_swizzle(swizzle)) |
835 S_00A00C_TYPE(type);
836 /* Depth is the the last accessible layer on gfx9+. The hw doesn't need
837 * to know the total number of layers.
838 */
839 state[4] = S_00A010_DEPTH(type == V_008F1C_SQ_RSRC_IMG_3D ? depth - 1 : last_layer) |
840 S_00A010_BASE_ARRAY(first_layer);
841 state[5] = S_00A014_ARRAY_PITCH(0) |
842 S_00A014_MAX_MIP(image->info.samples > 1 ?
843 util_logbase2(image->info.samples) :
844 image->info.levels - 1) |
845 S_00A014_PERF_MOD(4);
846 state[6] = 0;
847 state[7] = 0;
848
849 if (radv_dcc_enabled(image, first_level)) {
850 state[6] |= S_00A018_MAX_UNCOMPRESSED_BLOCK_SIZE(V_028C78_MAX_BLOCK_SIZE_256B) |
851 S_00A018_MAX_COMPRESSED_BLOCK_SIZE(V_028C78_MAX_BLOCK_SIZE_128B) |
852 S_00A018_ALPHA_IS_ON_MSB(vi_alpha_is_on_msb(device, vk_format));
853 }
854
855 /* Initialize the sampler view for FMASK. */
856 if (radv_image_has_fmask(image)) {
857 uint64_t gpu_address = radv_buffer_get_va(image->bo);
858 uint32_t format;
859 uint64_t va;
860
861 assert(image->plane_count == 1);
862
863 va = gpu_address + image->offset + image->planes[0].surface.fmask_offset;
864
865 switch (image->info.samples) {
866 case 2:
867 format = V_008F0C_IMG_FORMAT_FMASK8_S2_F2;
868 break;
869 case 4:
870 format = V_008F0C_IMG_FORMAT_FMASK8_S4_F4;
871 break;
872 case 8:
873 format = V_008F0C_IMG_FORMAT_FMASK32_S8_F8;
874 break;
875 default:
876 unreachable("invalid nr_samples");
877 }
878
879 fmask_state[0] = (va >> 8) | image->planes[0].surface.fmask_tile_swizzle;
880 fmask_state[1] = S_00A004_BASE_ADDRESS_HI(va >> 40) |
881 S_00A004_FORMAT(format) |
882 S_00A004_WIDTH_LO(width - 1);
883 fmask_state[2] = S_00A008_WIDTH_HI((width - 1) >> 2) |
884 S_00A008_HEIGHT(height - 1) |
885 S_00A008_RESOURCE_LEVEL(1);
886 fmask_state[3] = S_00A00C_DST_SEL_X(V_008F1C_SQ_SEL_X) |
887 S_00A00C_DST_SEL_Y(V_008F1C_SQ_SEL_X) |
888 S_00A00C_DST_SEL_Z(V_008F1C_SQ_SEL_X) |
889 S_00A00C_DST_SEL_W(V_008F1C_SQ_SEL_X) |
890 S_00A00C_SW_MODE(image->planes[0].surface.u.gfx9.fmask.swizzle_mode) |
891 S_00A00C_TYPE(radv_tex_dim(image->type, view_type, image->info.array_size, 0, false, false));
892 fmask_state[4] = S_00A010_DEPTH(last_layer) |
893 S_00A010_BASE_ARRAY(first_layer);
894 fmask_state[5] = 0;
895 fmask_state[6] = S_00A018_META_PIPE_ALIGNED(1);
896 fmask_state[7] = 0;
897 } else if (fmask_state)
898 memset(fmask_state, 0, 8 * 4);
899 }
900
901 /**
902 * Build the sampler view descriptor for a texture (SI-GFX9)
903 */
904 static void
905 si_make_texture_descriptor(struct radv_device *device,
906 struct radv_image *image,
907 bool is_storage_image,
908 VkImageViewType view_type,
909 VkFormat vk_format,
910 const VkComponentMapping *mapping,
911 unsigned first_level, unsigned last_level,
912 unsigned first_layer, unsigned last_layer,
913 unsigned width, unsigned height, unsigned depth,
914 uint32_t *state,
915 uint32_t *fmask_state)
916 {
917 const struct vk_format_description *desc;
918 enum vk_swizzle swizzle[4];
919 int first_non_void;
920 unsigned num_format, data_format, type;
921
922 desc = vk_format_description(vk_format);
923
924 if (desc->colorspace == VK_FORMAT_COLORSPACE_ZS) {
925 const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
926 vk_format_compose_swizzles(mapping, swizzle_xxxx, swizzle);
927 } else {
928 vk_format_compose_swizzles(mapping, desc->swizzle, swizzle);
929 }
930
931 first_non_void = vk_format_get_first_non_void_channel(vk_format);
932
933 num_format = radv_translate_tex_numformat(vk_format, desc, first_non_void);
934 if (num_format == ~0) {
935 num_format = 0;
936 }
937
938 data_format = radv_translate_tex_dataformat(vk_format, desc, first_non_void);
939 if (data_format == ~0) {
940 data_format = 0;
941 }
942
943 /* S8 with either Z16 or Z32 HTILE need a special format. */
944 if (device->physical_device->rad_info.chip_class == GFX9 &&
945 vk_format == VK_FORMAT_S8_UINT &&
946 radv_image_is_tc_compat_htile(image)) {
947 if (image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT)
948 data_format = V_008F14_IMG_DATA_FORMAT_S8_32;
949 else if (image->vk_format == VK_FORMAT_D16_UNORM_S8_UINT)
950 data_format = V_008F14_IMG_DATA_FORMAT_S8_16;
951 }
952 type = radv_tex_dim(image->type, view_type, image->info.array_size, image->info.samples,
953 is_storage_image, device->physical_device->rad_info.chip_class == GFX9);
954 if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) {
955 height = 1;
956 depth = image->info.array_size;
957 } else if (type == V_008F1C_SQ_RSRC_IMG_2D_ARRAY ||
958 type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
959 if (view_type != VK_IMAGE_VIEW_TYPE_3D)
960 depth = image->info.array_size;
961 } else if (type == V_008F1C_SQ_RSRC_IMG_CUBE)
962 depth = image->info.array_size / 6;
963
964 state[0] = 0;
965 state[1] = (S_008F14_DATA_FORMAT(data_format) |
966 S_008F14_NUM_FORMAT(num_format));
967 state[2] = (S_008F18_WIDTH(width - 1) |
968 S_008F18_HEIGHT(height - 1) |
969 S_008F18_PERF_MOD(4));
970 state[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle[0])) |
971 S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle[1])) |
972 S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle[2])) |
973 S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle[3])) |
974 S_008F1C_BASE_LEVEL(image->info.samples > 1 ?
975 0 : first_level) |
976 S_008F1C_LAST_LEVEL(image->info.samples > 1 ?
977 util_logbase2(image->info.samples) :
978 last_level) |
979 S_008F1C_TYPE(type));
980 state[4] = 0;
981 state[5] = S_008F24_BASE_ARRAY(first_layer);
982 state[6] = 0;
983 state[7] = 0;
984
985 if (device->physical_device->rad_info.chip_class == GFX9) {
986 unsigned bc_swizzle = gfx9_border_color_swizzle(swizzle);
987
988 /* Depth is the last accessible layer on Gfx9.
989 * The hw doesn't need to know the total number of layers.
990 */
991 if (type == V_008F1C_SQ_RSRC_IMG_3D)
992 state[4] |= S_008F20_DEPTH(depth - 1);
993 else
994 state[4] |= S_008F20_DEPTH(last_layer);
995
996 state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle);
997 state[5] |= S_008F24_MAX_MIP(image->info.samples > 1 ?
998 util_logbase2(image->info.samples) :
999 image->info.levels - 1);
1000 } else {
1001 state[3] |= S_008F1C_POW2_PAD(image->info.levels > 1);
1002 state[4] |= S_008F20_DEPTH(depth - 1);
1003 state[5] |= S_008F24_LAST_ARRAY(last_layer);
1004 }
1005 if (image->planes[0].surface.dcc_offset) {
1006 state[6] = S_008F28_ALPHA_IS_ON_MSB(vi_alpha_is_on_msb(device, vk_format));
1007 } else {
1008 /* The last dword is unused by hw. The shader uses it to clear
1009 * bits in the first dword of sampler state.
1010 */
1011 if (device->physical_device->rad_info.chip_class <= GFX7 && image->info.samples <= 1) {
1012 if (first_level == last_level)
1013 state[7] = C_008F30_MAX_ANISO_RATIO;
1014 else
1015 state[7] = 0xffffffff;
1016 }
1017 }
1018
1019 /* Initialize the sampler view for FMASK. */
1020 if (radv_image_has_fmask(image)) {
1021 uint32_t fmask_format, num_format;
1022 uint64_t gpu_address = radv_buffer_get_va(image->bo);
1023 uint64_t va;
1024
1025 assert(image->plane_count == 1);
1026
1027 va = gpu_address + image->offset + image->planes[0].surface.fmask_offset;
1028
1029 if (device->physical_device->rad_info.chip_class == GFX9) {
1030 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK;
1031 switch (image->info.samples) {
1032 case 2:
1033 num_format = V_008F14_IMG_FMASK_8_2_2;
1034 break;
1035 case 4:
1036 num_format = V_008F14_IMG_FMASK_8_4_4;
1037 break;
1038 case 8:
1039 num_format = V_008F14_IMG_FMASK_32_8_8;
1040 break;
1041 default:
1042 unreachable("invalid nr_samples");
1043 }
1044 } else {
1045 switch (image->info.samples) {
1046 case 2:
1047 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2;
1048 break;
1049 case 4:
1050 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4;
1051 break;
1052 case 8:
1053 fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8;
1054 break;
1055 default:
1056 assert(0);
1057 fmask_format = V_008F14_IMG_DATA_FORMAT_INVALID;
1058 }
1059 num_format = V_008F14_IMG_NUM_FORMAT_UINT;
1060 }
1061
1062 fmask_state[0] = va >> 8;
1063 fmask_state[0] |= image->planes[0].surface.fmask_tile_swizzle;
1064 fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) |
1065 S_008F14_DATA_FORMAT(fmask_format) |
1066 S_008F14_NUM_FORMAT(num_format);
1067 fmask_state[2] = S_008F18_WIDTH(width - 1) |
1068 S_008F18_HEIGHT(height - 1);
1069 fmask_state[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X) |
1070 S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X) |
1071 S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X) |
1072 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X) |
1073 S_008F1C_TYPE(radv_tex_dim(image->type, view_type, image->info.array_size, 0, false, false));
1074 fmask_state[4] = 0;
1075 fmask_state[5] = S_008F24_BASE_ARRAY(first_layer);
1076 fmask_state[6] = 0;
1077 fmask_state[7] = 0;
1078
1079 if (device->physical_device->rad_info.chip_class == GFX9) {
1080 fmask_state[3] |= S_008F1C_SW_MODE(image->planes[0].surface.u.gfx9.fmask.swizzle_mode);
1081 fmask_state[4] |= S_008F20_DEPTH(last_layer) |
1082 S_008F20_PITCH(image->planes[0].surface.u.gfx9.fmask.epitch);
1083 fmask_state[5] |= S_008F24_META_PIPE_ALIGNED(1) |
1084 S_008F24_META_RB_ALIGNED(1);
1085
1086 if (radv_image_is_tc_compat_cmask(image)) {
1087 va = gpu_address + image->offset + image->planes[0].surface.cmask_offset;
1088
1089 fmask_state[5] |= S_008F24_META_DATA_ADDRESS(va >> 40);
1090 fmask_state[6] |= S_008F28_COMPRESSION_EN(1);
1091 fmask_state[7] |= va >> 8;
1092 }
1093 } else {
1094 fmask_state[3] |= S_008F1C_TILING_INDEX(image->planes[0].surface.u.legacy.fmask.tiling_index);
1095 fmask_state[4] |= S_008F20_DEPTH(depth - 1) |
1096 S_008F20_PITCH(image->planes[0].surface.u.legacy.fmask.pitch_in_pixels - 1);
1097 fmask_state[5] |= S_008F24_LAST_ARRAY(last_layer);
1098
1099 if (radv_image_is_tc_compat_cmask(image)) {
1100 va = gpu_address + image->offset + image->planes[0].surface.cmask_offset;
1101
1102 fmask_state[6] |= S_008F28_COMPRESSION_EN(1);
1103 fmask_state[7] |= va >> 8;
1104 }
1105 }
1106 } else if (fmask_state)
1107 memset(fmask_state, 0, 8 * 4);
1108 }
1109
1110 static void
1111 radv_make_texture_descriptor(struct radv_device *device,
1112 struct radv_image *image,
1113 bool is_storage_image,
1114 VkImageViewType view_type,
1115 VkFormat vk_format,
1116 const VkComponentMapping *mapping,
1117 unsigned first_level, unsigned last_level,
1118 unsigned first_layer, unsigned last_layer,
1119 unsigned width, unsigned height, unsigned depth,
1120 uint32_t *state,
1121 uint32_t *fmask_state)
1122 {
1123 if (device->physical_device->rad_info.chip_class >= GFX10) {
1124 gfx10_make_texture_descriptor(device, image, is_storage_image,
1125 view_type, vk_format, mapping,
1126 first_level, last_level,
1127 first_layer, last_layer,
1128 width, height, depth,
1129 state, fmask_state);
1130 } else {
1131 si_make_texture_descriptor(device, image, is_storage_image,
1132 view_type, vk_format, mapping,
1133 first_level, last_level,
1134 first_layer, last_layer,
1135 width, height, depth,
1136 state, fmask_state);
1137 }
1138 }
1139
1140 static void
1141 radv_query_opaque_metadata(struct radv_device *device,
1142 struct radv_image *image,
1143 struct radeon_bo_metadata *md)
1144 {
1145 static const VkComponentMapping fixedmapping;
1146 uint32_t desc[8], i;
1147
1148 assert(image->plane_count == 1);
1149
1150 /* Metadata image format format version 1:
1151 * [0] = 1 (metadata format identifier)
1152 * [1] = (VENDOR_ID << 16) | PCI_ID
1153 * [2:9] = image descriptor for the whole resource
1154 * [2] is always 0, because the base address is cleared
1155 * [9] is the DCC offset bits [39:8] from the beginning of
1156 * the buffer
1157 * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level
1158 */
1159 md->metadata[0] = 1; /* metadata image format version 1 */
1160
1161 /* TILE_MODE_INDEX is ambiguous without a PCI ID. */
1162 md->metadata[1] = si_get_bo_metadata_word1(device);
1163
1164
1165 radv_make_texture_descriptor(device, image, false,
1166 (VkImageViewType)image->type, image->vk_format,
1167 &fixedmapping, 0, image->info.levels - 1, 0,
1168 image->info.array_size - 1,
1169 image->info.width, image->info.height,
1170 image->info.depth,
1171 desc, NULL);
1172
1173 si_set_mutable_tex_desc_fields(device, image, &image->planes[0].surface.u.legacy.level[0], 0, 0, 0,
1174 image->planes[0].surface.blk_w, false, false, false, desc);
1175
1176 /* Clear the base address and set the relative DCC offset. */
1177 desc[0] = 0;
1178 desc[1] &= C_008F14_BASE_ADDRESS_HI;
1179 desc[7] = image->planes[0].surface.dcc_offset >> 8;
1180
1181 /* Dwords [2:9] contain the image descriptor. */
1182 memcpy(&md->metadata[2], desc, sizeof(desc));
1183
1184 /* Dwords [10:..] contain the mipmap level offsets. */
1185 if (device->physical_device->rad_info.chip_class <= GFX8) {
1186 for (i = 0; i <= image->info.levels - 1; i++)
1187 md->metadata[10+i] = image->planes[0].surface.u.legacy.level[i].offset >> 8;
1188 md->size_metadata = (11 + image->info.levels - 1) * 4;
1189 } else
1190 md->size_metadata = 10 * 4;
1191 }
1192
1193 void
1194 radv_init_metadata(struct radv_device *device,
1195 struct radv_image *image,
1196 struct radeon_bo_metadata *metadata)
1197 {
1198 struct radeon_surf *surface = &image->planes[0].surface;
1199
1200 memset(metadata, 0, sizeof(*metadata));
1201
1202 if (device->physical_device->rad_info.chip_class >= GFX9) {
1203 metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
1204 metadata->u.gfx9.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
1205 } else {
1206 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
1207 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
1208 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
1209 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
1210 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
1211 metadata->u.legacy.bankw = surface->u.legacy.bankw;
1212 metadata->u.legacy.bankh = surface->u.legacy.bankh;
1213 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
1214 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
1215 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
1216 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
1217 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
1218 }
1219 radv_query_opaque_metadata(device, image, metadata);
1220 }
1221
1222 void
1223 radv_image_override_offset_stride(struct radv_device *device,
1224 struct radv_image *image,
1225 uint64_t offset, uint32_t stride)
1226 {
1227 ac_surface_override_offset_stride(&device->physical_device->rad_info,
1228 &image->planes[0].surface,
1229 image->info.levels, offset, stride);
1230 }
1231
1232 static void
1233 radv_image_alloc_single_sample_cmask(const struct radv_device *device,
1234 const struct radv_image *image,
1235 struct radeon_surf *surf)
1236 {
1237 if (!surf->cmask_size || surf->cmask_offset || surf->bpe > 8 ||
1238 image->info.levels > 1 || image->info.depth > 1 ||
1239 radv_image_has_dcc(image) ||
1240 !radv_image_use_fast_clear_for_image(device, image))
1241 return;
1242
1243 assert(image->info.storage_samples == 1);
1244
1245 surf->cmask_offset = align64(surf->total_size, surf->cmask_alignment);
1246 surf->total_size = surf->cmask_offset + surf->cmask_size;
1247 surf->alignment = MAX2(surf->alignment, surf->cmask_alignment);
1248 }
1249
1250 static void
1251 radv_image_alloc_values(const struct radv_device *device, struct radv_image *image)
1252 {
1253 if (radv_image_has_dcc(image)) {
1254 image->fce_pred_offset = image->size;
1255 image->size += 8 * image->info.levels;
1256
1257 image->dcc_pred_offset = image->size;
1258 image->size += 8 * image->info.levels;
1259 }
1260
1261 if (radv_image_has_dcc(image) || radv_image_has_cmask(image) ||
1262 radv_image_has_htile(image)) {
1263 image->clear_value_offset = image->size;
1264 image->size += 8 * image->info.levels;
1265 }
1266
1267 if (radv_image_is_tc_compat_htile(image) &&
1268 device->physical_device->rad_info.has_tc_compat_zrange_bug) {
1269 /* Metadata for the TC-compatible HTILE hardware bug which
1270 * have to be fixed by updating ZRANGE_PRECISION when doing
1271 * fast depth clears to 0.0f.
1272 */
1273 image->tc_compat_zrange_offset = image->size;
1274 image->size += image->info.levels * 4;
1275 }
1276 }
1277
1278
1279 static void
1280 radv_image_reset_layout(struct radv_image *image)
1281 {
1282 image->size = 0;
1283 image->alignment = 1;
1284
1285 image->tc_compatible_cmask = image->tc_compatible_htile = 0;
1286 image->fce_pred_offset = image->dcc_pred_offset = 0;
1287 image->clear_value_offset = image->tc_compat_zrange_offset = 0;
1288
1289 for (unsigned i = 0; i < image->plane_count; ++i) {
1290 VkFormat format = vk_format_get_plane_format(image->vk_format, i);
1291
1292 uint32_t flags = image->planes[i].surface.flags;
1293 memset(image->planes + i, 0, sizeof(image->planes[i]));
1294
1295 image->planes[i].surface.flags = flags;
1296 image->planes[i].surface.blk_w = vk_format_get_blockwidth(format);
1297 image->planes[i].surface.blk_h = vk_format_get_blockheight(format);
1298 image->planes[i].surface.bpe = vk_format_get_blocksize(vk_format_depth_only(format));
1299
1300 /* align byte per element on dword */
1301 if (image->planes[i].surface.bpe == 3) {
1302 image->planes[i].surface.bpe = 4;
1303 }
1304 }
1305 }
1306
1307 VkResult
1308 radv_image_create_layout(struct radv_device *device,
1309 struct radv_image_create_info create_info,
1310 struct radv_image *image)
1311 {
1312 /* Clear the pCreateInfo pointer so we catch issues in the delayed case when we test in the
1313 * common internal case. */
1314 create_info.vk_info = NULL;
1315
1316 struct ac_surf_info image_info = image->info;
1317 VkResult result = radv_patch_image_from_extra_info(device, image, &create_info, &image_info);
1318 if (result != VK_SUCCESS)
1319 return result;
1320
1321 radv_image_reset_layout(image);
1322
1323 for (unsigned plane = 0; plane < image->plane_count; ++plane) {
1324 struct ac_surf_info info = image_info;
1325
1326 if (plane) {
1327 const struct vk_format_description *desc = vk_format_description(image->vk_format);
1328 assert(info.width % desc->width_divisor == 0);
1329 assert(info.height % desc->height_divisor == 0);
1330
1331 info.width /= desc->width_divisor;
1332 info.height /= desc->height_divisor;
1333 }
1334
1335 if (create_info.no_metadata_planes || image->plane_count > 1) {
1336 image->planes[plane].surface.flags |= RADEON_SURF_DISABLE_DCC |
1337 RADEON_SURF_NO_FMASK |
1338 RADEON_SURF_NO_HTILE;
1339 }
1340
1341 device->ws->surface_init(device->ws, &info, &image->planes[plane].surface);
1342
1343 if (!create_info.no_metadata_planes && image->plane_count == 1)
1344 radv_image_alloc_single_sample_cmask(device, image, &image->planes[plane].surface);
1345
1346 image->planes[plane].offset = align(image->size, image->planes[plane].surface.alignment);
1347 image->size = image->planes[plane].offset + image->planes[plane].surface.total_size;
1348 image->alignment = MAX2(image->alignment, image->planes[plane].surface.alignment);
1349
1350 image->planes[plane].format = vk_format_get_plane_format(image->vk_format, plane);
1351 }
1352
1353 image->tc_compatible_cmask = radv_image_has_cmask(image) &&
1354 radv_use_tc_compat_cmask_for_image(device, image);
1355
1356 image->tc_compatible_htile = radv_image_has_htile(image) &&
1357 image->planes[0].surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
1358
1359 radv_image_alloc_values(device, image);
1360
1361 assert(image->planes[0].surface.surf_size);
1362 return VK_SUCCESS;
1363 }
1364
1365 static void
1366 radv_destroy_image(struct radv_device *device,
1367 const VkAllocationCallbacks *pAllocator,
1368 struct radv_image *image)
1369 {
1370 if ((image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && image->bo)
1371 device->ws->buffer_destroy(image->bo);
1372
1373 if (image->owned_memory != VK_NULL_HANDLE) {
1374 RADV_FROM_HANDLE(radv_device_memory, mem, image->owned_memory);
1375 radv_free_memory(device, pAllocator, mem);
1376 }
1377
1378 vk_object_base_finish(&image->base);
1379 vk_free2(&device->vk.alloc, pAllocator, image);
1380 }
1381
1382 VkResult
1383 radv_image_create(VkDevice _device,
1384 const struct radv_image_create_info *create_info,
1385 const VkAllocationCallbacks* alloc,
1386 VkImage *pImage)
1387 {
1388 RADV_FROM_HANDLE(radv_device, device, _device);
1389 const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
1390 struct radv_image *image = NULL;
1391 VkFormat format = radv_select_android_external_format(pCreateInfo->pNext,
1392 pCreateInfo->format);
1393 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
1394
1395 const unsigned plane_count = vk_format_get_plane_count(format);
1396 const size_t image_struct_size = sizeof(*image) + sizeof(struct radv_image_plane) * plane_count;
1397
1398 radv_assert(pCreateInfo->mipLevels > 0);
1399 radv_assert(pCreateInfo->arrayLayers > 0);
1400 radv_assert(pCreateInfo->samples > 0);
1401 radv_assert(pCreateInfo->extent.width > 0);
1402 radv_assert(pCreateInfo->extent.height > 0);
1403 radv_assert(pCreateInfo->extent.depth > 0);
1404
1405 image = vk_zalloc2(&device->vk.alloc, alloc, image_struct_size, 8,
1406 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1407 if (!image)
1408 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1409
1410 vk_object_base_init(&device->vk, &image->base, VK_OBJECT_TYPE_IMAGE);
1411
1412 image->type = pCreateInfo->imageType;
1413 image->info.width = pCreateInfo->extent.width;
1414 image->info.height = pCreateInfo->extent.height;
1415 image->info.depth = pCreateInfo->extent.depth;
1416 image->info.samples = pCreateInfo->samples;
1417 image->info.storage_samples = pCreateInfo->samples;
1418 image->info.array_size = pCreateInfo->arrayLayers;
1419 image->info.levels = pCreateInfo->mipLevels;
1420 image->info.num_channels = vk_format_get_nr_components(format);
1421
1422 image->vk_format = format;
1423 image->tiling = pCreateInfo->tiling;
1424 image->usage = pCreateInfo->usage;
1425 image->flags = pCreateInfo->flags;
1426 image->plane_count = plane_count;
1427
1428 image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
1429 if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
1430 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
1431 if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL ||
1432 pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_FOREIGN_EXT)
1433 image->queue_family_mask |= (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
1434 else
1435 image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i];
1436 }
1437
1438 const VkExternalMemoryImageCreateInfo *external_info =
1439 vk_find_struct_const(pCreateInfo->pNext,
1440 EXTERNAL_MEMORY_IMAGE_CREATE_INFO) ;
1441
1442 image->shareable = external_info;
1443 if (!vk_format_is_depth_or_stencil(format) && !image->shareable) {
1444 image->info.surf_index = &device->image_mrt_offset_counter;
1445 }
1446
1447 for (unsigned plane = 0; plane < image->plane_count; ++plane) {
1448 image->planes[plane].surface.flags =
1449 radv_get_surface_flags(device, image, plane, pCreateInfo, format);
1450 }
1451
1452 bool delay_layout = external_info &&
1453 (external_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID);
1454
1455 if (delay_layout) {
1456 *pImage = radv_image_to_handle(image);
1457 assert (!(image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT));
1458 return VK_SUCCESS;
1459 }
1460
1461 ASSERTED VkResult result = radv_image_create_layout(device, *create_info, image);
1462 assert(result == VK_SUCCESS);
1463
1464 if (image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
1465 image->alignment = MAX2(image->alignment, 4096);
1466 image->size = align64(image->size, image->alignment);
1467 image->offset = 0;
1468
1469 image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment,
1470 0, RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL);
1471 if (!image->bo) {
1472 radv_destroy_image(device, alloc, image);
1473 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1474 }
1475 }
1476
1477 *pImage = radv_image_to_handle(image);
1478
1479 return VK_SUCCESS;
1480 }
1481
1482 static void
1483 radv_image_view_make_descriptor(struct radv_image_view *iview,
1484 struct radv_device *device,
1485 VkFormat vk_format,
1486 const VkComponentMapping *components,
1487 bool is_storage_image, bool disable_compression,
1488 unsigned plane_id, unsigned descriptor_plane_id)
1489 {
1490 struct radv_image *image = iview->image;
1491 struct radv_image_plane *plane = &image->planes[plane_id];
1492 const struct vk_format_description *format_desc = vk_format_description(image->vk_format);
1493 bool is_stencil = iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT;
1494 uint32_t blk_w;
1495 union radv_descriptor *descriptor;
1496 uint32_t hw_level = 0;
1497
1498 if (is_storage_image) {
1499 descriptor = &iview->storage_descriptor;
1500 } else {
1501 descriptor = &iview->descriptor;
1502 }
1503
1504 assert(vk_format_get_plane_count(vk_format) == 1);
1505 assert(plane->surface.blk_w % vk_format_get_blockwidth(plane->format) == 0);
1506 blk_w = plane->surface.blk_w / vk_format_get_blockwidth(plane->format) * vk_format_get_blockwidth(vk_format);
1507
1508 if (device->physical_device->rad_info.chip_class >= GFX9)
1509 hw_level = iview->base_mip;
1510 radv_make_texture_descriptor(device, image, is_storage_image,
1511 iview->type,
1512 vk_format,
1513 components,
1514 hw_level, hw_level + iview->level_count - 1,
1515 iview->base_layer,
1516 iview->base_layer + iview->layer_count - 1,
1517 iview->extent.width / (plane_id ? format_desc->width_divisor : 1),
1518 iview->extent.height / (plane_id ? format_desc->height_divisor : 1),
1519 iview->extent.depth,
1520 descriptor->plane_descriptors[descriptor_plane_id],
1521 descriptor_plane_id ? NULL : descriptor->fmask_descriptor);
1522
1523 const struct legacy_surf_level *base_level_info = NULL;
1524 if (device->physical_device->rad_info.chip_class <= GFX9) {
1525 if (is_stencil)
1526 base_level_info = &plane->surface.u.legacy.stencil_level[iview->base_mip];
1527 else
1528 base_level_info = &plane->surface.u.legacy.level[iview->base_mip];
1529 }
1530 si_set_mutable_tex_desc_fields(device, image,
1531 base_level_info,
1532 plane_id,
1533 iview->base_mip,
1534 iview->base_mip,
1535 blk_w, is_stencil, is_storage_image,
1536 is_storage_image || disable_compression,
1537 descriptor->plane_descriptors[descriptor_plane_id]);
1538 }
1539
1540 static unsigned
1541 radv_plane_from_aspect(VkImageAspectFlags mask)
1542 {
1543 switch(mask) {
1544 case VK_IMAGE_ASPECT_PLANE_1_BIT:
1545 return 1;
1546 case VK_IMAGE_ASPECT_PLANE_2_BIT:
1547 return 2;
1548 default:
1549 return 0;
1550 }
1551 }
1552
1553 VkFormat
1554 radv_get_aspect_format(struct radv_image *image, VkImageAspectFlags mask)
1555 {
1556 switch(mask) {
1557 case VK_IMAGE_ASPECT_PLANE_0_BIT:
1558 return image->planes[0].format;
1559 case VK_IMAGE_ASPECT_PLANE_1_BIT:
1560 return image->planes[1].format;
1561 case VK_IMAGE_ASPECT_PLANE_2_BIT:
1562 return image->planes[2].format;
1563 case VK_IMAGE_ASPECT_STENCIL_BIT:
1564 return vk_format_stencil_only(image->vk_format);
1565 case VK_IMAGE_ASPECT_DEPTH_BIT:
1566 return vk_format_depth_only(image->vk_format);
1567 case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
1568 return vk_format_depth_only(image->vk_format);
1569 default:
1570 return image->vk_format;
1571 }
1572 }
1573
1574 void
1575 radv_image_view_init(struct radv_image_view *iview,
1576 struct radv_device *device,
1577 const VkImageViewCreateInfo* pCreateInfo,
1578 const struct radv_image_view_extra_create_info* extra_create_info)
1579 {
1580 RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image);
1581 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
1582
1583 switch (image->type) {
1584 case VK_IMAGE_TYPE_1D:
1585 case VK_IMAGE_TYPE_2D:
1586 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 <= image->info.array_size);
1587 break;
1588 case VK_IMAGE_TYPE_3D:
1589 assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1
1590 <= radv_minify(image->info.depth, range->baseMipLevel));
1591 break;
1592 default:
1593 unreachable("bad VkImageType");
1594 }
1595 iview->image = image;
1596 iview->bo = image->bo;
1597 iview->type = pCreateInfo->viewType;
1598 iview->plane_id = radv_plane_from_aspect(pCreateInfo->subresourceRange.aspectMask);
1599 iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
1600 iview->multiple_planes = vk_format_get_plane_count(image->vk_format) > 1 && iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT;
1601
1602 iview->vk_format = pCreateInfo->format;
1603
1604 /* If the image has an Android external format, pCreateInfo->format will be
1605 * VK_FORMAT_UNDEFINED. */
1606 if (iview->vk_format == VK_FORMAT_UNDEFINED)
1607 iview->vk_format = image->vk_format;
1608
1609 if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
1610 iview->vk_format = vk_format_stencil_only(iview->vk_format);
1611 } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
1612 iview->vk_format = vk_format_depth_only(iview->vk_format);
1613 }
1614
1615 if (device->physical_device->rad_info.chip_class >= GFX9) {
1616 iview->extent = (VkExtent3D) {
1617 .width = image->info.width,
1618 .height = image->info.height,
1619 .depth = image->info.depth,
1620 };
1621 } else {
1622 iview->extent = (VkExtent3D) {
1623 .width = radv_minify(image->info.width , range->baseMipLevel),
1624 .height = radv_minify(image->info.height, range->baseMipLevel),
1625 .depth = radv_minify(image->info.depth , range->baseMipLevel),
1626 };
1627 }
1628
1629 if (iview->vk_format != image->planes[iview->plane_id].format) {
1630 unsigned view_bw = vk_format_get_blockwidth(iview->vk_format);
1631 unsigned view_bh = vk_format_get_blockheight(iview->vk_format);
1632 unsigned img_bw = vk_format_get_blockwidth(image->vk_format);
1633 unsigned img_bh = vk_format_get_blockheight(image->vk_format);
1634
1635 iview->extent.width = round_up_u32(iview->extent.width * view_bw, img_bw);
1636 iview->extent.height = round_up_u32(iview->extent.height * view_bh, img_bh);
1637
1638 /* Comment ported from amdvlk -
1639 * If we have the following image:
1640 * Uncompressed pixels Compressed block sizes (4x4)
1641 * mip0: 22 x 22 6 x 6
1642 * mip1: 11 x 11 3 x 3
1643 * mip2: 5 x 5 2 x 2
1644 * mip3: 2 x 2 1 x 1
1645 * mip4: 1 x 1 1 x 1
1646 *
1647 * On GFX9 the descriptor is always programmed with the WIDTH and HEIGHT of the base level and the HW is
1648 * calculating the degradation of the block sizes down the mip-chain as follows (straight-up
1649 * divide-by-two integer math):
1650 * mip0: 6x6
1651 * mip1: 3x3
1652 * mip2: 1x1
1653 * mip3: 1x1
1654 *
1655 * This means that mip2 will be missing texels.
1656 *
1657 * Fix this by calculating the base mip's width and height, then convert that, and round it
1658 * back up to get the level 0 size.
1659 * Clamp the converted size between the original values, and next power of two, which
1660 * means we don't oversize the image.
1661 */
1662 if (device->physical_device->rad_info.chip_class >= GFX9 &&
1663 vk_format_is_compressed(image->vk_format) &&
1664 !vk_format_is_compressed(iview->vk_format)) {
1665 unsigned lvl_width = radv_minify(image->info.width , range->baseMipLevel);
1666 unsigned lvl_height = radv_minify(image->info.height, range->baseMipLevel);
1667
1668 lvl_width = round_up_u32(lvl_width * view_bw, img_bw);
1669 lvl_height = round_up_u32(lvl_height * view_bh, img_bh);
1670
1671 lvl_width <<= range->baseMipLevel;
1672 lvl_height <<= range->baseMipLevel;
1673
1674 iview->extent.width = CLAMP(lvl_width, iview->extent.width, iview->image->planes[0].surface.u.gfx9.surf_pitch);
1675 iview->extent.height = CLAMP(lvl_height, iview->extent.height, iview->image->planes[0].surface.u.gfx9.surf_height);
1676 }
1677 }
1678
1679 iview->base_layer = range->baseArrayLayer;
1680 iview->layer_count = radv_get_layerCount(image, range);
1681 iview->base_mip = range->baseMipLevel;
1682 iview->level_count = radv_get_levelCount(image, range);
1683
1684 bool disable_compression = extra_create_info ? extra_create_info->disable_compression: false;
1685 for (unsigned i = 0; i < (iview->multiple_planes ? vk_format_get_plane_count(image->vk_format) : 1); ++i) {
1686 VkFormat format = vk_format_get_plane_format(iview->vk_format, i);
1687 radv_image_view_make_descriptor(iview, device, format,
1688 &pCreateInfo->components,
1689 false, disable_compression,
1690 iview->plane_id + i, i);
1691 radv_image_view_make_descriptor(iview, device,
1692 format, &pCreateInfo->components,
1693 true, disable_compression,
1694 iview->plane_id + i, i);
1695 }
1696 }
1697
1698 bool radv_layout_is_htile_compressed(const struct radv_image *image,
1699 VkImageLayout layout,
1700 bool in_render_loop,
1701 unsigned queue_mask)
1702 {
1703 if (radv_image_is_tc_compat_htile(image)) {
1704 if (layout == VK_IMAGE_LAYOUT_GENERAL &&
1705 !in_render_loop &&
1706 !(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
1707 /* It should be safe to enable TC-compat HTILE with
1708 * VK_IMAGE_LAYOUT_GENERAL if we are not in a render
1709 * loop and if the image doesn't have the storage bit
1710 * set. This improves performance for apps that use
1711 * GENERAL for the main depth pass because this allows
1712 * compression and this reduces the number of
1713 * decompressions from/to GENERAL.
1714 */
1715 return true;
1716 }
1717
1718 return layout != VK_IMAGE_LAYOUT_GENERAL;
1719 }
1720
1721 return radv_image_has_htile(image) &&
1722 (layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
1723 layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR ||
1724 layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR ||
1725 (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
1726 queue_mask == (1u << RADV_QUEUE_GENERAL)));
1727 }
1728
1729 bool radv_layout_can_fast_clear(const struct radv_image *image,
1730 VkImageLayout layout,
1731 bool in_render_loop,
1732 unsigned queue_mask)
1733 {
1734 return layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL &&
1735 queue_mask == (1u << RADV_QUEUE_GENERAL);
1736 }
1737
1738 bool radv_layout_dcc_compressed(const struct radv_device *device,
1739 const struct radv_image *image,
1740 VkImageLayout layout,
1741 bool in_render_loop,
1742 unsigned queue_mask)
1743 {
1744 /* Don't compress compute transfer dst, as image stores are not supported. */
1745 if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
1746 (queue_mask & (1u << RADV_QUEUE_COMPUTE)))
1747 return false;
1748
1749 return radv_image_has_dcc(image) && layout != VK_IMAGE_LAYOUT_GENERAL;
1750 }
1751
1752
1753 unsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family)
1754 {
1755 if (!image->exclusive)
1756 return image->queue_family_mask;
1757 if (family == VK_QUEUE_FAMILY_EXTERNAL ||
1758 family == VK_QUEUE_FAMILY_FOREIGN_EXT)
1759 return (1u << RADV_MAX_QUEUE_FAMILIES) - 1u;
1760 if (family == VK_QUEUE_FAMILY_IGNORED)
1761 return 1u << queue_family;
1762 return 1u << family;
1763 }
1764
1765 VkResult
1766 radv_CreateImage(VkDevice device,
1767 const VkImageCreateInfo *pCreateInfo,
1768 const VkAllocationCallbacks *pAllocator,
1769 VkImage *pImage)
1770 {
1771 #ifdef ANDROID
1772 const VkNativeBufferANDROID *gralloc_info =
1773 vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
1774
1775 if (gralloc_info)
1776 return radv_image_from_gralloc(device, pCreateInfo, gralloc_info,
1777 pAllocator, pImage);
1778 #endif
1779
1780 const struct wsi_image_create_info *wsi_info =
1781 vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
1782 bool scanout = wsi_info && wsi_info->scanout;
1783
1784 return radv_image_create(device,
1785 &(struct radv_image_create_info) {
1786 .vk_info = pCreateInfo,
1787 .scanout = scanout,
1788 },
1789 pAllocator,
1790 pImage);
1791 }
1792
1793 void
1794 radv_DestroyImage(VkDevice _device, VkImage _image,
1795 const VkAllocationCallbacks *pAllocator)
1796 {
1797 RADV_FROM_HANDLE(radv_device, device, _device);
1798 RADV_FROM_HANDLE(radv_image, image, _image);
1799
1800 if (!image)
1801 return;
1802
1803 radv_destroy_image(device, pAllocator, image);
1804 }
1805
1806 void radv_GetImageSubresourceLayout(
1807 VkDevice _device,
1808 VkImage _image,
1809 const VkImageSubresource* pSubresource,
1810 VkSubresourceLayout* pLayout)
1811 {
1812 RADV_FROM_HANDLE(radv_image, image, _image);
1813 RADV_FROM_HANDLE(radv_device, device, _device);
1814 int level = pSubresource->mipLevel;
1815 int layer = pSubresource->arrayLayer;
1816
1817 unsigned plane_id = radv_plane_from_aspect(pSubresource->aspectMask);
1818
1819 struct radv_image_plane *plane = &image->planes[plane_id];
1820 struct radeon_surf *surface = &plane->surface;
1821
1822 if (device->physical_device->rad_info.chip_class >= GFX9) {
1823 uint64_t level_offset = surface->is_linear ? surface->u.gfx9.offset[level] : 0;
1824
1825 pLayout->offset = plane->offset + level_offset + surface->u.gfx9.surf_slice_size * layer;
1826 if (image->vk_format == VK_FORMAT_R32G32B32_UINT ||
1827 image->vk_format == VK_FORMAT_R32G32B32_SINT ||
1828 image->vk_format == VK_FORMAT_R32G32B32_SFLOAT) {
1829 /* Adjust the number of bytes between each row because
1830 * the pitch is actually the number of components per
1831 * row.
1832 */
1833 pLayout->rowPitch = surface->u.gfx9.surf_pitch * surface->bpe / 3;
1834 } else {
1835 uint32_t pitch = surface->is_linear ? surface->u.gfx9.pitch[level] : surface->u.gfx9.surf_pitch;
1836
1837 assert(util_is_power_of_two_nonzero(surface->bpe));
1838 pLayout->rowPitch = pitch * surface->bpe;
1839 }
1840
1841 pLayout->arrayPitch = surface->u.gfx9.surf_slice_size;
1842 pLayout->depthPitch = surface->u.gfx9.surf_slice_size;
1843 pLayout->size = surface->u.gfx9.surf_slice_size;
1844 if (image->type == VK_IMAGE_TYPE_3D)
1845 pLayout->size *= u_minify(image->info.depth, level);
1846 } else {
1847 pLayout->offset = plane->offset + surface->u.legacy.level[level].offset + (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4 * layer;
1848 pLayout->rowPitch = surface->u.legacy.level[level].nblk_x * surface->bpe;
1849 pLayout->arrayPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
1850 pLayout->depthPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
1851 pLayout->size = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4;
1852 if (image->type == VK_IMAGE_TYPE_3D)
1853 pLayout->size *= u_minify(image->info.depth, level);
1854 }
1855 }
1856
1857
1858 VkResult
1859 radv_CreateImageView(VkDevice _device,
1860 const VkImageViewCreateInfo *pCreateInfo,
1861 const VkAllocationCallbacks *pAllocator,
1862 VkImageView *pView)
1863 {
1864 RADV_FROM_HANDLE(radv_device, device, _device);
1865 struct radv_image_view *view;
1866
1867 view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8,
1868 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1869 if (view == NULL)
1870 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1871
1872 vk_object_base_init(&device->vk, &view->base,
1873 VK_OBJECT_TYPE_IMAGE_VIEW);
1874
1875 radv_image_view_init(view, device, pCreateInfo, NULL);
1876
1877 *pView = radv_image_view_to_handle(view);
1878
1879 return VK_SUCCESS;
1880 }
1881
1882 void
1883 radv_DestroyImageView(VkDevice _device, VkImageView _iview,
1884 const VkAllocationCallbacks *pAllocator)
1885 {
1886 RADV_FROM_HANDLE(radv_device, device, _device);
1887 RADV_FROM_HANDLE(radv_image_view, iview, _iview);
1888
1889 if (!iview)
1890 return;
1891
1892 vk_object_base_finish(&iview->base);
1893 vk_free2(&device->vk.alloc, pAllocator, iview);
1894 }
1895
1896 void radv_buffer_view_init(struct radv_buffer_view *view,
1897 struct radv_device *device,
1898 const VkBufferViewCreateInfo* pCreateInfo)
1899 {
1900 RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
1901
1902 view->bo = buffer->bo;
1903 view->range = pCreateInfo->range == VK_WHOLE_SIZE ?
1904 buffer->size - pCreateInfo->offset : pCreateInfo->range;
1905 view->vk_format = pCreateInfo->format;
1906
1907 radv_make_buffer_descriptor(device, buffer, view->vk_format,
1908 pCreateInfo->offset, view->range, view->state);
1909 }
1910
1911 VkResult
1912 radv_CreateBufferView(VkDevice _device,
1913 const VkBufferViewCreateInfo *pCreateInfo,
1914 const VkAllocationCallbacks *pAllocator,
1915 VkBufferView *pView)
1916 {
1917 RADV_FROM_HANDLE(radv_device, device, _device);
1918 struct radv_buffer_view *view;
1919
1920 view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8,
1921 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1922 if (!view)
1923 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1924
1925 vk_object_base_init(&device->vk, &view->base,
1926 VK_OBJECT_TYPE_BUFFER_VIEW);
1927
1928 radv_buffer_view_init(view, device, pCreateInfo);
1929
1930 *pView = radv_buffer_view_to_handle(view);
1931
1932 return VK_SUCCESS;
1933 }
1934
1935 void
1936 radv_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
1937 const VkAllocationCallbacks *pAllocator)
1938 {
1939 RADV_FROM_HANDLE(radv_device, device, _device);
1940 RADV_FROM_HANDLE(radv_buffer_view, view, bufferView);
1941
1942 if (!view)
1943 return;
1944
1945 vk_object_base_finish(&view->base);
1946 vk_free2(&device->vk.alloc, pAllocator, view);
1947 }