turnip: Annotate vkGetImageSubresourceLayout with tu_stub
[mesa.git] / src / freedreno / vulkan / tu_image.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "util/debug.h"
31 #include "util/u_atomic.h"
32 #include "vk_format.h"
33 #include "vk_util.h"
34
35 static inline bool
36 image_level_linear(struct tu_image *image, int level)
37 {
38 unsigned w = u_minify(image->extent.width, level);
39 return w < 16;
40 }
41
42 /* indexed by cpp: */
43 static const struct
44 {
45 unsigned pitchalign;
46 unsigned heightalign;
47 } tile_alignment[] = {
48 [1] = { 128, 32 }, [2] = { 128, 16 }, [3] = { 128, 16 }, [4] = { 64, 16 },
49 [8] = { 64, 16 }, [12] = { 64, 16 }, [16] = { 64, 16 },
50 };
51
52 static void
53 setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo)
54 {
55 enum vk_format_layout layout =
56 vk_format_description(pCreateInfo->format)->layout;
57 uint32_t layer_size = 0;
58 uint32_t width = pCreateInfo->extent.width;
59 uint32_t height = pCreateInfo->extent.height;
60 uint32_t depth = pCreateInfo->extent.depth;
61 bool layer_first = pCreateInfo->imageType != VK_IMAGE_TYPE_3D;
62 uint32_t alignment = pCreateInfo->imageType == VK_IMAGE_TYPE_3D ? 4096 : 1;
63 uint32_t cpp = vk_format_get_blocksize(pCreateInfo->format);
64
65 uint32_t heightalign = tile_alignment[cpp].heightalign;
66
67 for (unsigned level = 0; level < pCreateInfo->mipLevels; level++) {
68 struct tu_image_level *slice = &image->levels[level];
69 bool linear_level = image_level_linear(image, level);
70 uint32_t aligned_height = height;
71 uint32_t blocks;
72 uint32_t pitchalign;
73
74 if (image->tile_mode && !linear_level) {
75 pitchalign = tile_alignment[cpp].pitchalign;
76 aligned_height = align(aligned_height, heightalign);
77 } else {
78 pitchalign = 64;
79
80 /* The blits used for mem<->gmem work at a granularity of
81 * 32x32, which can cause faults due to over-fetch on the
82 * last level. The simple solution is to over-allocate a
83 * bit the last level to ensure any over-fetch is harmless.
84 * The pitch is already sufficiently aligned, but height
85 * may not be:
86 */
87 if ((level + 1 == pCreateInfo->mipLevels))
88 aligned_height = align(aligned_height, 32);
89 }
90
91 if (layout == VK_FORMAT_LAYOUT_ASTC)
92 slice->pitch = util_align_npot(
93 width,
94 pitchalign * vk_format_get_blockwidth(pCreateInfo->format));
95 else
96 slice->pitch = align(width, pitchalign);
97
98 slice->offset = layer_size;
99 blocks = vk_format_get_block_count(pCreateInfo->format, slice->pitch,
100 aligned_height);
101
102 /* 1d array and 2d array textures must all have the same layer size
103 * for each miplevel on a3xx. 3d textures can have different layer
104 * sizes for high levels, but the hw auto-sizer is buggy (or at least
105 * different than what this code does), so as soon as the layer size
106 * range gets into range, we stop reducing it.
107 */
108 if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D &&
109 (level == 1 ||
110 (level > 1 && image->levels[level - 1].size > 0xf000)))
111 slice->size = align(blocks * cpp, alignment);
112 else if (level == 0 || layer_first || alignment == 1)
113 slice->size = align(blocks * cpp, alignment);
114 else
115 slice->size = image->levels[level - 1].size;
116
117 layer_size += slice->size * depth;
118
119 width = u_minify(width, 1);
120 height = u_minify(height, 1);
121 depth = u_minify(depth, 1);
122 }
123
124 image->layer_size = layer_size;
125 }
126
127 VkResult
128 tu_image_create(VkDevice _device,
129 const struct tu_image_create_info *create_info,
130 const VkAllocationCallbacks *alloc,
131 VkImage *pImage)
132 {
133 TU_FROM_HANDLE(tu_device, device, _device);
134 const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
135 struct tu_image *image = NULL;
136 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
137
138 tu_assert(pCreateInfo->mipLevels > 0);
139 tu_assert(pCreateInfo->arrayLayers > 0);
140 tu_assert(pCreateInfo->samples > 0);
141 tu_assert(pCreateInfo->extent.width > 0);
142 tu_assert(pCreateInfo->extent.height > 0);
143 tu_assert(pCreateInfo->extent.depth > 0);
144
145 image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
146 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
147 if (!image)
148 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
149
150 image->type = pCreateInfo->imageType;
151
152 image->vk_format = pCreateInfo->format;
153 image->tiling = pCreateInfo->tiling;
154 image->usage = pCreateInfo->usage;
155 image->flags = pCreateInfo->flags;
156 image->extent = pCreateInfo->extent;
157 image->level_count = pCreateInfo->mipLevels;
158 image->layer_count = pCreateInfo->arrayLayers;
159
160 image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
161 if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
162 for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
163 if (pCreateInfo->pQueueFamilyIndices[i] ==
164 VK_QUEUE_FAMILY_EXTERNAL_KHR)
165 image->queue_family_mask |= (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
166 else
167 image->queue_family_mask |=
168 1u << pCreateInfo->pQueueFamilyIndices[i];
169 }
170
171 image->shareable =
172 vk_find_struct_const(pCreateInfo->pNext,
173 EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
174
175 image->tile_mode = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? 3 : 0;
176 setup_slices(image, pCreateInfo);
177
178 image->size = image->layer_size * pCreateInfo->arrayLayers;
179 *pImage = tu_image_to_handle(image);
180
181 return VK_SUCCESS;
182 }
183
184 void
185 tu_image_view_init(struct tu_image_view *iview,
186 struct tu_device *device,
187 const VkImageViewCreateInfo *pCreateInfo)
188 {
189 TU_FROM_HANDLE(tu_image, image, pCreateInfo->image);
190 const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
191
192 switch (image->type) {
193 case VK_IMAGE_TYPE_1D:
194 case VK_IMAGE_TYPE_2D:
195 assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
196 image->layer_count);
197 break;
198 case VK_IMAGE_TYPE_3D:
199 assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
200 tu_minify(image->extent.depth, range->baseMipLevel));
201 break;
202 default:
203 unreachable("bad VkImageType");
204 }
205
206 iview->image = image;
207 iview->type = pCreateInfo->viewType;
208 iview->vk_format = pCreateInfo->format;
209 iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
210
211 if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
212 iview->vk_format = vk_format_stencil_only(iview->vk_format);
213 } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
214 iview->vk_format = vk_format_depth_only(iview->vk_format);
215 }
216
217 // should we minify?
218 iview->extent = image->extent;
219
220 iview->base_layer = range->baseArrayLayer;
221 iview->layer_count = tu_get_layerCount(image, range);
222 iview->base_mip = range->baseMipLevel;
223 iview->level_count = tu_get_levelCount(image, range);
224 }
225
226 unsigned
227 tu_image_queue_family_mask(const struct tu_image *image,
228 uint32_t family,
229 uint32_t queue_family)
230 {
231 if (!image->exclusive)
232 return image->queue_family_mask;
233 if (family == VK_QUEUE_FAMILY_EXTERNAL_KHR)
234 return (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
235 if (family == VK_QUEUE_FAMILY_IGNORED)
236 return 1u << queue_family;
237 return 1u << family;
238 }
239
240 VkResult
241 tu_CreateImage(VkDevice device,
242 const VkImageCreateInfo *pCreateInfo,
243 const VkAllocationCallbacks *pAllocator,
244 VkImage *pImage)
245 {
246 #ifdef ANDROID
247 const VkNativeBufferANDROID *gralloc_info =
248 vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
249
250 if (gralloc_info)
251 return tu_image_from_gralloc(device, pCreateInfo, gralloc_info,
252 pAllocator, pImage);
253 #endif
254
255 return tu_image_create(device,
256 &(struct tu_image_create_info) {
257 .vk_info = pCreateInfo,
258 .scanout = false,
259 },
260 pAllocator, pImage);
261 }
262
263 void
264 tu_DestroyImage(VkDevice _device,
265 VkImage _image,
266 const VkAllocationCallbacks *pAllocator)
267 {
268 TU_FROM_HANDLE(tu_device, device, _device);
269 TU_FROM_HANDLE(tu_image, image, _image);
270
271 if (!image)
272 return;
273
274 if (image->owned_memory != VK_NULL_HANDLE)
275 tu_FreeMemory(_device, image->owned_memory, pAllocator);
276
277 vk_free2(&device->alloc, pAllocator, image);
278 }
279
280 void
281 tu_GetImageSubresourceLayout(VkDevice _device,
282 VkImage _image,
283 const VkImageSubresource *pSubresource,
284 VkSubresourceLayout *pLayout)
285 {
286 tu_stub();
287
288 /* Even though this is a stub, let's avoid heisenbugs by providing
289 * deterministic behavior.
290 */
291 memset(pLayout, 0, sizeof(*pLayout));
292 }
293
294 VkResult
295 tu_CreateImageView(VkDevice _device,
296 const VkImageViewCreateInfo *pCreateInfo,
297 const VkAllocationCallbacks *pAllocator,
298 VkImageView *pView)
299 {
300 TU_FROM_HANDLE(tu_device, device, _device);
301 struct tu_image_view *view;
302
303 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
304 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
305 if (view == NULL)
306 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
307
308 tu_image_view_init(view, device, pCreateInfo);
309
310 *pView = tu_image_view_to_handle(view);
311
312 return VK_SUCCESS;
313 }
314
315 void
316 tu_DestroyImageView(VkDevice _device,
317 VkImageView _iview,
318 const VkAllocationCallbacks *pAllocator)
319 {
320 TU_FROM_HANDLE(tu_device, device, _device);
321 TU_FROM_HANDLE(tu_image_view, iview, _iview);
322
323 if (!iview)
324 return;
325 vk_free2(&device->alloc, pAllocator, iview);
326 }
327
328 void
329 tu_buffer_view_init(struct tu_buffer_view *view,
330 struct tu_device *device,
331 const VkBufferViewCreateInfo *pCreateInfo)
332 {
333 TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
334
335 view->range = pCreateInfo->range == VK_WHOLE_SIZE
336 ? buffer->size - pCreateInfo->offset
337 : pCreateInfo->range;
338 view->vk_format = pCreateInfo->format;
339 }
340
341 VkResult
342 tu_CreateBufferView(VkDevice _device,
343 const VkBufferViewCreateInfo *pCreateInfo,
344 const VkAllocationCallbacks *pAllocator,
345 VkBufferView *pView)
346 {
347 TU_FROM_HANDLE(tu_device, device, _device);
348 struct tu_buffer_view *view;
349
350 view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
351 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
352 if (!view)
353 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
354
355 tu_buffer_view_init(view, device, pCreateInfo);
356
357 *pView = tu_buffer_view_to_handle(view);
358
359 return VK_SUCCESS;
360 }
361
362 void
363 tu_DestroyBufferView(VkDevice _device,
364 VkBufferView bufferView,
365 const VkAllocationCallbacks *pAllocator)
366 {
367 TU_FROM_HANDLE(tu_device, device, _device);
368 TU_FROM_HANDLE(tu_buffer_view, view, bufferView);
369
370 if (!view)
371 return;
372
373 vk_free2(&device->alloc, pAllocator, view);
374 }