turnip: implement UBWC
[mesa.git] / src / freedreno / vulkan / tu_image.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "util/debug.h"
31 #include "util/u_atomic.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34 #include "drm-uapi/drm_fourcc.h"
35
36 static inline bool
37 image_level_linear(struct tu_image *image, int level, bool ubwc)
38 {
39 unsigned w = u_minify(image->extent.width, level);
40 /* all levels are tiled/compressed with UBWC */
41 return ubwc ? false : (w < 16);
42 }
43
44 enum a6xx_tile_mode
45 tu6_get_image_tile_mode(struct tu_image *image, int level)
46 {
47 if (image_level_linear(image, level, !!image->ubwc_size))
48 return TILE6_LINEAR;
49 else
50 return image->tile_mode;
51 }
52
53 /* indexed by cpp, including msaa 2x and 4x: */
54 static const struct {
55 uint8_t pitchalign;
56 uint8_t heightalign;
57 uint8_t ubwc_blockwidth;
58 uint8_t ubwc_blockheight;
59 } tile_alignment[] = {
60 /* TODO:
61 * cpp=1 UBWC needs testing at larger texture sizes
62 * missing UBWC blockwidth/blockheight for npot+64 cpp
63 * missing 96/128 CPP for 8x MSAA with 32_32_32/32_32_32_32
64 */
65 [1] = { 128, 32, 16, 4 },
66 [2] = { 128, 16, 16, 4 },
67 [3] = { 64, 32 },
68 [4] = { 64, 16, 16, 4 },
69 [6] = { 64, 16 },
70 [8] = { 64, 16, 8, 4, },
71 [12] = { 64, 16 },
72 [16] = { 64, 16, 4, 4, },
73 [24] = { 64, 16 },
74 [32] = { 64, 16, 4, 2 },
75 [48] = { 64, 16 },
76 [64] = { 64, 16 },
77 /* special case for r8g8: */
78 [0] = { 64, 32, 16, 4 },
79 };
80
81 static void
82 setup_slices(struct tu_image *image,
83 const VkImageCreateInfo *pCreateInfo,
84 bool ubwc_enabled)
85 {
86 #define RGB_TILE_WIDTH_ALIGNMENT 64
87 #define RGB_TILE_HEIGHT_ALIGNMENT 16
88 #define UBWC_PLANE_SIZE_ALIGNMENT 4096
89 VkFormat format = pCreateInfo->format;
90 enum util_format_layout layout = vk_format_description(format)->layout;
91 uint32_t layer_size = 0;
92 uint32_t ubwc_size = 0;
93 int ta = image->cpp;
94
95 /* The r8g8 format seems to not play by the normal tiling rules: */
96 if (image->cpp == 2 && vk_format_get_nr_components(format) == 2)
97 ta = 0;
98
99 for (unsigned level = 0; level < pCreateInfo->mipLevels; level++) {
100 struct tu_image_level *slice = &image->levels[level];
101 struct tu_image_level *ubwc_slice = &image->ubwc_levels[level];
102 uint32_t width = u_minify(pCreateInfo->extent.width, level);
103 uint32_t height = u_minify(pCreateInfo->extent.height, level);
104 uint32_t depth = u_minify(pCreateInfo->extent.depth, level);
105 uint32_t aligned_height = height;
106 uint32_t blocks;
107 uint32_t pitchalign;
108
109 if (image->tile_mode && !image_level_linear(image, level, ubwc_enabled)) {
110 /* tiled levels of 3D textures are rounded up to PoT dimensions: */
111 if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D) {
112 width = util_next_power_of_two(width);
113 height = aligned_height = util_next_power_of_two(height);
114 }
115 pitchalign = tile_alignment[ta].pitchalign;
116 aligned_height = align(aligned_height, tile_alignment[ta].heightalign);
117 } else {
118 pitchalign = 64;
119 }
120
121 /* The blits used for mem<->gmem work at a granularity of
122 * 32x32, which can cause faults due to over-fetch on the
123 * last level. The simple solution is to over-allocate a
124 * bit the last level to ensure any over-fetch is harmless.
125 * The pitch is already sufficiently aligned, but height
126 * may not be:
127 */
128 if (level + 1 == pCreateInfo->mipLevels)
129 aligned_height = align(aligned_height, 32);
130
131 if (layout == UTIL_FORMAT_LAYOUT_ASTC)
132 slice->pitch =
133 util_align_npot(width, pitchalign * vk_format_get_blockwidth(format));
134 else
135 slice->pitch = align(width, pitchalign);
136
137 slice->offset = layer_size;
138 blocks = vk_format_get_block_count(format, slice->pitch, aligned_height);
139
140 /* 1d array and 2d array textures must all have the same layer size
141 * for each miplevel on a6xx. 3d textures can have different layer
142 * sizes for high levels, but the hw auto-sizer is buggy (or at least
143 * different than what this code does), so as soon as the layer size
144 * range gets into range, we stop reducing it.
145 */
146 if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D) {
147 if (level < 1 || image->levels[level - 1].size > 0xf000) {
148 slice->size = align(blocks * image->cpp, 4096);
149 } else {
150 slice->size = image->levels[level - 1].size;
151 }
152 } else {
153 slice->size = blocks * image->cpp;
154 }
155
156 layer_size += slice->size * depth;
157 if (ubwc_enabled) {
158 /* with UBWC every level is aligned to 4K */
159 layer_size = align(layer_size, 4096);
160
161 uint32_t block_width = tile_alignment[ta].ubwc_blockwidth;
162 uint32_t block_height = tile_alignment[ta].ubwc_blockheight;
163 uint32_t meta_pitch = align(DIV_ROUND_UP(width, block_width), RGB_TILE_WIDTH_ALIGNMENT);
164 uint32_t meta_height = align(DIV_ROUND_UP(height, block_height), RGB_TILE_HEIGHT_ALIGNMENT);
165
166 /* it looks like mipmaps need alignment to power of two
167 * TODO: needs testing with large npot textures
168 * (needed for the first level?)
169 */
170 if (pCreateInfo->mipLevels > 1) {
171 meta_pitch = util_next_power_of_two(meta_pitch);
172 meta_height = util_next_power_of_two(meta_height);
173 }
174
175 ubwc_slice->pitch = meta_pitch;
176 ubwc_slice->offset = ubwc_size;
177 ubwc_size += align(meta_pitch * meta_height, UBWC_PLANE_SIZE_ALIGNMENT);
178 }
179 }
180 image->layer_size = align(layer_size, 4096);
181
182 VkDeviceSize offset = ubwc_size * pCreateInfo->arrayLayers;
183 for (unsigned level = 0; level < pCreateInfo->mipLevels; level++)
184 image->levels[level].offset += offset;
185
186 image->size = offset + image->layer_size * pCreateInfo->arrayLayers;
187 image->ubwc_size = ubwc_size;
188 }
189
190 VkResult
191 tu_image_create(VkDevice _device,
192 const VkImageCreateInfo *pCreateInfo,
193 const VkAllocationCallbacks *alloc,
194 VkImage *pImage,
195 uint64_t modifier)
196 {
197 TU_FROM_HANDLE(tu_device, device, _device);
198 struct tu_image *image = NULL;
199 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
200
201 tu_assert(pCreateInfo->mipLevels > 0);
202 tu_assert(pCreateInfo->arrayLayers > 0);
203 tu_assert(pCreateInfo->samples > 0);
204 tu_assert(pCreateInfo->extent.width > 0);
205 tu_assert(pCreateInfo->extent.height > 0);
206 tu_assert(pCreateInfo->extent.depth > 0);
207
208 image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
209 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
210 if (!image)
211 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
212
213 image->type = pCreateInfo->imageType;
214
215 image->vk_format = pCreateInfo->format;
216 image->tiling = pCreateInfo->tiling;
217 image->usage = pCreateInfo->usage;
218 image->flags = pCreateInfo->flags;
219 image->extent = pCreateInfo->extent;
220 image->level_count = pCreateInfo->mipLevels;
221 image->layer_count = pCreateInfo->arrayLayers;
222 image->samples = pCreateInfo->samples;
223 image->cpp = vk_format_get_blocksize(image->vk_format) * image->samples;
224
225 image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
226 if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
227 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
228 if (pCreateInfo->pQueueFamilyIndices[i] ==
229 VK_QUEUE_FAMILY_EXTERNAL)
230 image->queue_family_mask |= (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
231 else
232 image->queue_family_mask |=
233 1u << pCreateInfo->pQueueFamilyIndices[i];
234 }
235
236 image->shareable =
237 vk_find_struct_const(pCreateInfo->pNext,
238 EXTERNAL_MEMORY_IMAGE_CREATE_INFO) != NULL;
239
240 image->tile_mode = TILE6_3;
241 bool ubwc_enabled = true;
242
243 /* disable tiling when linear is requested and for compressed formats */
244 if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR ||
245 modifier == DRM_FORMAT_MOD_LINEAR ||
246 vk_format_is_compressed(image->vk_format)) {
247 image->tile_mode = TILE6_LINEAR;
248 ubwc_enabled = false;
249 }
250
251 /* using UBWC with D24S8 breaks the "stencil read" copy path (why?)
252 * (causes any deqp tests that need to check stencil to fail)
253 * disable UBWC for this format until we properly support copy aspect masks
254 */
255 if (image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT)
256 ubwc_enabled = false;
257
258 /* UBWC can't be used with E5B9G9R9 */
259 if (image->vk_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)
260 ubwc_enabled = false;
261
262 if (image->extent.depth > 1) {
263 tu_finishme("UBWC with 3D textures");
264 ubwc_enabled = false;
265 }
266
267 if (!tile_alignment[image->cpp].ubwc_blockwidth) {
268 tu_finishme("UBWC for cpp=%d", image->cpp);
269 ubwc_enabled = false;
270 }
271
272 /* expect UBWC enabled if we asked for it */
273 assert(modifier != DRM_FORMAT_MOD_QCOM_COMPRESSED || ubwc_enabled);
274
275 setup_slices(image, pCreateInfo, ubwc_enabled);
276
277 *pImage = tu_image_to_handle(image);
278
279 return VK_SUCCESS;
280 }
281
282 static enum a6xx_tex_fetchsize
283 tu6_fetchsize(VkFormat format)
284 {
285 if (vk_format_description(format)->layout == UTIL_FORMAT_LAYOUT_ASTC)
286 return TFETCH6_16_BYTE;
287
288 switch (vk_format_get_blocksize(format) / vk_format_get_blockwidth(format)) {
289 case 1: return TFETCH6_1_BYTE;
290 case 2: return TFETCH6_2_BYTE;
291 case 4: return TFETCH6_4_BYTE;
292 case 8: return TFETCH6_8_BYTE;
293 case 16: return TFETCH6_16_BYTE;
294 default:
295 unreachable("bad block size");
296 }
297 }
298
299 static uint32_t
300 tu6_texswiz(const VkComponentMapping *comps, const unsigned char *fmt_swiz)
301 {
302 unsigned char swiz[4] = {comps->r, comps->g, comps->b, comps->a};
303 unsigned char vk_swizzle[] = {
304 [VK_COMPONENT_SWIZZLE_ZERO] = A6XX_TEX_ZERO,
305 [VK_COMPONENT_SWIZZLE_ONE] = A6XX_TEX_ONE,
306 [VK_COMPONENT_SWIZZLE_R] = A6XX_TEX_X,
307 [VK_COMPONENT_SWIZZLE_G] = A6XX_TEX_Y,
308 [VK_COMPONENT_SWIZZLE_B] = A6XX_TEX_Z,
309 [VK_COMPONENT_SWIZZLE_A] = A6XX_TEX_W,
310 };
311 for (unsigned i = 0; i < 4; i++) {
312 swiz[i] = (swiz[i] == VK_COMPONENT_SWIZZLE_IDENTITY) ? i : vk_swizzle[swiz[i]];
313 /* if format has 0/1 in channel, use that (needed for bc1_rgb) */
314 if (swiz[i] < 4) {
315 switch (fmt_swiz[swiz[i]]) {
316 case PIPE_SWIZZLE_0: swiz[i] = A6XX_TEX_ZERO; break;
317 case PIPE_SWIZZLE_1: swiz[i] = A6XX_TEX_ONE; break;
318 }
319 }
320 }
321
322 return A6XX_TEX_CONST_0_SWIZ_X(swiz[0]) |
323 A6XX_TEX_CONST_0_SWIZ_Y(swiz[1]) |
324 A6XX_TEX_CONST_0_SWIZ_Z(swiz[2]) |
325 A6XX_TEX_CONST_0_SWIZ_W(swiz[3]);
326 }
327
328 static enum a6xx_tex_type
329 tu6_tex_type(VkImageViewType type)
330 {
331 switch (type) {
332 default:
333 case VK_IMAGE_VIEW_TYPE_1D:
334 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
335 return A6XX_TEX_1D;
336 case VK_IMAGE_VIEW_TYPE_2D:
337 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
338 return A6XX_TEX_2D;
339 case VK_IMAGE_VIEW_TYPE_3D:
340 return A6XX_TEX_3D;
341 case VK_IMAGE_VIEW_TYPE_CUBE:
342 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
343 return A6XX_TEX_CUBE;
344 }
345 }
346
347 void
348 tu_image_view_init(struct tu_image_view *iview,
349 struct tu_device *device,
350 const VkImageViewCreateInfo *pCreateInfo)
351 {
352 TU_FROM_HANDLE(tu_image, image, pCreateInfo->image);
353 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
354
355 switch (image->type) {
356 case VK_IMAGE_TYPE_1D:
357 case VK_IMAGE_TYPE_2D:
358 assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
359 image->layer_count);
360 break;
361 case VK_IMAGE_TYPE_3D:
362 assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
363 tu_minify(image->extent.depth, range->baseMipLevel));
364 break;
365 default:
366 unreachable("bad VkImageType");
367 }
368
369 iview->image = image;
370 iview->type = pCreateInfo->viewType;
371 iview->vk_format = pCreateInfo->format;
372 iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
373
374 if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
375 iview->vk_format = vk_format_stencil_only(iview->vk_format);
376 } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
377 iview->vk_format = vk_format_depth_only(iview->vk_format);
378 }
379
380 // should we minify?
381 iview->extent = image->extent;
382
383 iview->base_layer = range->baseArrayLayer;
384 iview->layer_count = tu_get_layerCount(image, range);
385 iview->base_mip = range->baseMipLevel;
386 iview->level_count = tu_get_levelCount(image, range);
387
388 memset(iview->descriptor, 0, sizeof(iview->descriptor));
389
390 const struct tu_native_format *fmt = tu6_get_native_format(iview->vk_format);
391 uint64_t base_addr = tu_image_base(image, iview->base_mip, iview->base_layer);
392 uint64_t ubwc_addr = tu_image_ubwc_base(image, iview->base_mip, iview->base_layer);
393
394 uint32_t pitch = tu_image_stride(image, iview->base_mip) / vk_format_get_blockwidth(iview->vk_format);
395 enum a6xx_tile_mode tile_mode = tu6_get_image_tile_mode(image, iview->base_mip);
396 uint32_t width = u_minify(image->extent.width, iview->base_mip);
397 uint32_t height = u_minify(image->extent.height, iview->base_mip);
398
399 iview->descriptor[0] =
400 A6XX_TEX_CONST_0_TILE_MODE(tile_mode) |
401 COND(vk_format_is_srgb(iview->vk_format), A6XX_TEX_CONST_0_SRGB) |
402 A6XX_TEX_CONST_0_FMT(fmt->tex) |
403 A6XX_TEX_CONST_0_SAMPLES(tu_msaa_samples(image->samples)) |
404 A6XX_TEX_CONST_0_SWAP(image->tile_mode ? WZYX : fmt->swap) |
405 tu6_texswiz(&pCreateInfo->components, vk_format_description(iview->vk_format)->swizzle) |
406 A6XX_TEX_CONST_0_MIPLVLS(iview->level_count - 1);
407 iview->descriptor[1] = A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height);
408 iview->descriptor[2] =
409 A6XX_TEX_CONST_2_FETCHSIZE(tu6_fetchsize(iview->vk_format)) |
410 A6XX_TEX_CONST_2_PITCH(pitch) |
411 A6XX_TEX_CONST_2_TYPE(tu6_tex_type(pCreateInfo->viewType));
412 iview->descriptor[3] = A6XX_TEX_CONST_3_ARRAY_PITCH(tu_layer_size(image, iview->base_mip));
413 iview->descriptor[4] = base_addr;
414 iview->descriptor[5] = base_addr >> 32;
415
416 if (image->ubwc_size) {
417 uint32_t block_width = tile_alignment[image->cpp].ubwc_blockwidth;
418 uint32_t block_height = tile_alignment[image->cpp].ubwc_blockheight;
419
420 iview->descriptor[3] |= A6XX_TEX_CONST_3_FLAG | A6XX_TEX_CONST_3_TILE_ALL;
421 iview->descriptor[7] = ubwc_addr;
422 iview->descriptor[8] = ubwc_addr >> 32;
423 iview->descriptor[9] |= A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(tu_image_ubwc_size(image, iview->base_mip) >> 2);
424 iview->descriptor[10] |=
425 A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(tu_image_ubwc_pitch(image, iview->base_mip)) |
426 A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(util_logbase2_ceil(DIV_ROUND_UP(width, block_width))) |
427 A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(util_logbase2_ceil(DIV_ROUND_UP(height, block_height)));
428 }
429
430 if (pCreateInfo->viewType != VK_IMAGE_VIEW_TYPE_3D) {
431 iview->descriptor[5] |= A6XX_TEX_CONST_5_DEPTH(iview->layer_count);
432 } else {
433 iview->descriptor[3] |=
434 A6XX_TEX_CONST_3_MIN_LAYERSZ(image->levels[image->level_count - 1].size);
435 iview->descriptor[5] |=
436 A6XX_TEX_CONST_5_DEPTH(u_minify(image->extent.depth, iview->base_mip));
437 }
438 }
439
440 unsigned
441 tu_image_queue_family_mask(const struct tu_image *image,
442 uint32_t family,
443 uint32_t queue_family)
444 {
445 if (!image->exclusive)
446 return image->queue_family_mask;
447 if (family == VK_QUEUE_FAMILY_EXTERNAL)
448 return (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
449 if (family == VK_QUEUE_FAMILY_IGNORED)
450 return 1u << queue_family;
451 return 1u << family;
452 }
453
454 VkResult
455 tu_CreateImage(VkDevice device,
456 const VkImageCreateInfo *pCreateInfo,
457 const VkAllocationCallbacks *pAllocator,
458 VkImage *pImage)
459 {
460 #ifdef ANDROID
461 const VkNativeBufferANDROID *gralloc_info =
462 vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
463
464 if (gralloc_info)
465 return tu_image_from_gralloc(device, pCreateInfo, gralloc_info,
466 pAllocator, pImage);
467 #endif
468
469 const struct wsi_image_create_info *wsi_info =
470 vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA);
471 uint64_t modifier = DRM_FORMAT_MOD_INVALID;
472
473 if (wsi_info) {
474 modifier = DRM_FORMAT_MOD_LINEAR;
475 for (unsigned i = 0; i < wsi_info->modifier_count; i++) {
476 if (wsi_info->modifiers[i] == DRM_FORMAT_MOD_QCOM_COMPRESSED)
477 modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED;
478 }
479 }
480
481 return tu_image_create(device, pCreateInfo, pAllocator, pImage, modifier);
482 }
483
484 void
485 tu_DestroyImage(VkDevice _device,
486 VkImage _image,
487 const VkAllocationCallbacks *pAllocator)
488 {
489 TU_FROM_HANDLE(tu_device, device, _device);
490 TU_FROM_HANDLE(tu_image, image, _image);
491
492 if (!image)
493 return;
494
495 if (image->owned_memory != VK_NULL_HANDLE)
496 tu_FreeMemory(_device, image->owned_memory, pAllocator);
497
498 vk_free2(&device->alloc, pAllocator, image);
499 }
500
501 void
502 tu_GetImageSubresourceLayout(VkDevice _device,
503 VkImage _image,
504 const VkImageSubresource *pSubresource,
505 VkSubresourceLayout *pLayout)
506 {
507 TU_FROM_HANDLE(tu_image, image, _image);
508
509 const uint32_t layer_offset = image->layer_size * pSubresource->arrayLayer;
510 const struct tu_image_level *level =
511 image->levels + pSubresource->mipLevel;
512
513 pLayout->offset = layer_offset + level->offset;
514 pLayout->size = level->size;
515 pLayout->rowPitch =
516 level->pitch * vk_format_get_blocksize(image->vk_format);
517 pLayout->arrayPitch = image->layer_size;
518 pLayout->depthPitch = level->size;
519
520 if (image->ubwc_size) {
521 /* UBWC starts at offset 0 */
522 pLayout->offset = 0;
523 /* UBWC scanout won't match what the kernel wants if we have levels/layers */
524 assert(image->level_count == 1 && image->layer_count == 1);
525 }
526 }
527
528 VkResult
529 tu_CreateImageView(VkDevice _device,
530 const VkImageViewCreateInfo *pCreateInfo,
531 const VkAllocationCallbacks *pAllocator,
532 VkImageView *pView)
533 {
534 TU_FROM_HANDLE(tu_device, device, _device);
535 struct tu_image_view *view;
536
537 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
538 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
539 if (view == NULL)
540 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
541
542 tu_image_view_init(view, device, pCreateInfo);
543
544 *pView = tu_image_view_to_handle(view);
545
546 return VK_SUCCESS;
547 }
548
549 void
550 tu_DestroyImageView(VkDevice _device,
551 VkImageView _iview,
552 const VkAllocationCallbacks *pAllocator)
553 {
554 TU_FROM_HANDLE(tu_device, device, _device);
555 TU_FROM_HANDLE(tu_image_view, iview, _iview);
556
557 if (!iview)
558 return;
559 vk_free2(&device->alloc, pAllocator, iview);
560 }
561
562 void
563 tu_buffer_view_init(struct tu_buffer_view *view,
564 struct tu_device *device,
565 const VkBufferViewCreateInfo *pCreateInfo)
566 {
567 TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
568
569 view->range = pCreateInfo->range == VK_WHOLE_SIZE
570 ? buffer->size - pCreateInfo->offset
571 : pCreateInfo->range;
572 view->vk_format = pCreateInfo->format;
573 }
574
575 VkResult
576 tu_CreateBufferView(VkDevice _device,
577 const VkBufferViewCreateInfo *pCreateInfo,
578 const VkAllocationCallbacks *pAllocator,
579 VkBufferView *pView)
580 {
581 TU_FROM_HANDLE(tu_device, device, _device);
582 struct tu_buffer_view *view;
583
584 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
585 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
586 if (!view)
587 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
588
589 tu_buffer_view_init(view, device, pCreateInfo);
590
591 *pView = tu_buffer_view_to_handle(view);
592
593 return VK_SUCCESS;
594 }
595
596 void
597 tu_DestroyBufferView(VkDevice _device,
598 VkBufferView bufferView,
599 const VkAllocationCallbacks *pAllocator)
600 {
601 TU_FROM_HANDLE(tu_device, device, _device);
602 TU_FROM_HANDLE(tu_buffer_view, view, bufferView);
603
604 if (!view)
605 return;
606
607 vk_free2(&device->alloc, pAllocator, view);
608 }