anv/wsi: Allocate enough memory for the entire image
[mesa.git] / src / intel / vulkan / anv_wsi.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25 #include "wsi_common.h"
26 #include "vk_format_info.h"
27 #include "vk_util.h"
28
29 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
30 static const struct wsi_callbacks wsi_cbs = {
31 .get_phys_device_format_properties = anv_GetPhysicalDeviceFormatProperties,
32 };
33 #endif
34
35 VkResult
36 anv_init_wsi(struct anv_physical_device *physical_device)
37 {
38 VkResult result;
39
40 memset(physical_device->wsi_device.wsi, 0, sizeof(physical_device->wsi_device.wsi));
41
42 #ifdef VK_USE_PLATFORM_XCB_KHR
43 result = wsi_x11_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
44 if (result != VK_SUCCESS)
45 return result;
46 #endif
47
48 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
49 result = wsi_wl_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc,
50 anv_physical_device_to_handle(physical_device),
51 &wsi_cbs);
52 if (result != VK_SUCCESS) {
53 #ifdef VK_USE_PLATFORM_XCB_KHR
54 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
55 #endif
56 return result;
57 }
58 #endif
59
60 return VK_SUCCESS;
61 }
62
63 void
64 anv_finish_wsi(struct anv_physical_device *physical_device)
65 {
66 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
67 wsi_wl_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
68 #endif
69 #ifdef VK_USE_PLATFORM_XCB_KHR
70 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
71 #endif
72 }
73
74 void anv_DestroySurfaceKHR(
75 VkInstance _instance,
76 VkSurfaceKHR _surface,
77 const VkAllocationCallbacks* pAllocator)
78 {
79 ANV_FROM_HANDLE(anv_instance, instance, _instance);
80 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
81
82 if (!surface)
83 return;
84
85 vk_free2(&instance->alloc, pAllocator, surface);
86 }
87
88 VkResult anv_GetPhysicalDeviceSurfaceSupportKHR(
89 VkPhysicalDevice physicalDevice,
90 uint32_t queueFamilyIndex,
91 VkSurfaceKHR _surface,
92 VkBool32* pSupported)
93 {
94 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
95 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
96 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
97
98 return iface->get_support(surface, &device->wsi_device,
99 &device->instance->alloc,
100 queueFamilyIndex, device->local_fd, false, pSupported);
101 }
102
103 VkResult anv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
104 VkPhysicalDevice physicalDevice,
105 VkSurfaceKHR _surface,
106 VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
107 {
108 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
109 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
110 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
111
112 return iface->get_capabilities(surface, pSurfaceCapabilities);
113 }
114
115 VkResult anv_GetPhysicalDeviceSurfaceCapabilities2KHR(
116 VkPhysicalDevice physicalDevice,
117 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
118 VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
119 {
120 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
121 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
122 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
123
124 return iface->get_capabilities2(surface, pSurfaceInfo->pNext,
125 pSurfaceCapabilities);
126 }
127
128 VkResult anv_GetPhysicalDeviceSurfaceFormatsKHR(
129 VkPhysicalDevice physicalDevice,
130 VkSurfaceKHR _surface,
131 uint32_t* pSurfaceFormatCount,
132 VkSurfaceFormatKHR* pSurfaceFormats)
133 {
134 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
135 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
136 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
137
138 return iface->get_formats(surface, &device->wsi_device, pSurfaceFormatCount,
139 pSurfaceFormats);
140 }
141
142 VkResult anv_GetPhysicalDeviceSurfaceFormats2KHR(
143 VkPhysicalDevice physicalDevice,
144 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
145 uint32_t* pSurfaceFormatCount,
146 VkSurfaceFormat2KHR* pSurfaceFormats)
147 {
148 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
149 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
150 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
151
152 return iface->get_formats2(surface, &device->wsi_device, pSurfaceInfo->pNext,
153 pSurfaceFormatCount, pSurfaceFormats);
154 }
155
156 VkResult anv_GetPhysicalDeviceSurfacePresentModesKHR(
157 VkPhysicalDevice physicalDevice,
158 VkSurfaceKHR _surface,
159 uint32_t* pPresentModeCount,
160 VkPresentModeKHR* pPresentModes)
161 {
162 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
163 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
164 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
165
166 return iface->get_present_modes(surface, pPresentModeCount,
167 pPresentModes);
168 }
169
170
171 static VkResult
172 anv_wsi_image_create(VkDevice device_h,
173 const VkSwapchainCreateInfoKHR *pCreateInfo,
174 const VkAllocationCallbacks* pAllocator,
175 bool different_gpu,
176 bool linear,
177 VkImage *image_p,
178 VkDeviceMemory *memory_p,
179 uint32_t *size,
180 uint32_t *offset,
181 uint32_t *row_pitch, int *fd_p)
182 {
183 struct anv_device *device = anv_device_from_handle(device_h);
184 VkImage image_h;
185 struct anv_image *image;
186
187 VkResult result;
188 result = anv_image_create(anv_device_to_handle(device),
189 &(struct anv_image_create_info) {
190 .isl_tiling_flags = ISL_TILING_X_BIT,
191 .stride = 0,
192 .vk_info =
193 &(VkImageCreateInfo) {
194 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
195 .imageType = VK_IMAGE_TYPE_2D,
196 .format = pCreateInfo->imageFormat,
197 .extent = {
198 .width = pCreateInfo->imageExtent.width,
199 .height = pCreateInfo->imageExtent.height,
200 .depth = 1
201 },
202 .mipLevels = 1,
203 .arrayLayers = 1,
204 .samples = 1,
205 /* FIXME: Need a way to use X tiling to allow scanout */
206 .tiling = VK_IMAGE_TILING_OPTIMAL,
207 .usage = (pCreateInfo->imageUsage |
208 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
209 .flags = 0,
210 }},
211 NULL,
212 &image_h);
213 if (result != VK_SUCCESS)
214 return result;
215
216 image = anv_image_from_handle(image_h);
217 assert(vk_format_is_color(image->vk_format));
218
219 VkDeviceMemory memory_h;
220 struct anv_device_memory *memory;
221 result = anv_AllocateMemory(anv_device_to_handle(device),
222 &(VkMemoryAllocateInfo) {
223 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
224 .allocationSize = image->size,
225 .memoryTypeIndex = 0,
226 },
227 NULL /* XXX: pAllocator */,
228 &memory_h);
229 if (result != VK_SUCCESS)
230 goto fail_create_image;
231
232 memory = anv_device_memory_from_handle(memory_h);
233
234 /* We need to set the WRITE flag on window system buffers so that GEM will
235 * know we're writing to them and synchronize uses on other rings (eg if
236 * the display server uses the blitter ring).
237 */
238 memory->bo->flags &= ~EXEC_OBJECT_ASYNC;
239 memory->bo->flags |= EXEC_OBJECT_WRITE;
240
241 anv_BindImageMemory(device_h, image_h, memory_h, 0);
242 assert(image->size == 0);
243
244 struct anv_surface *surface = &image->planes[0].surface;
245 assert(surface->isl.tiling == ISL_TILING_X);
246
247 *row_pitch = surface->isl.row_pitch;
248 int ret = anv_gem_set_tiling(device, memory->bo->gem_handle,
249 surface->isl.row_pitch, I915_TILING_X);
250 if (ret) {
251 /* FINISHME: Choose a better error. */
252 result = vk_errorf(device->instance, device,
253 VK_ERROR_OUT_OF_DEVICE_MEMORY,
254 "set_tiling failed: %m");
255 goto fail_alloc_memory;
256 }
257
258 int fd = anv_gem_handle_to_fd(device, memory->bo->gem_handle);
259 if (fd == -1) {
260 /* FINISHME: Choose a better error. */
261 result = vk_errorf(device->instance, device,
262 VK_ERROR_OUT_OF_DEVICE_MEMORY,
263 "handle_to_fd failed: %m");
264 goto fail_alloc_memory;
265 }
266
267 *image_p = image_h;
268 *memory_p = memory_h;
269 *fd_p = fd;
270 *size = image->size;
271 *offset = 0;
272 return VK_SUCCESS;
273 fail_alloc_memory:
274 anv_FreeMemory(device_h, memory_h, pAllocator);
275
276 fail_create_image:
277 anv_DestroyImage(device_h, image_h, pAllocator);
278 return result;
279 }
280
281 static void
282 anv_wsi_image_free(VkDevice device,
283 const VkAllocationCallbacks* pAllocator,
284 VkImage image_h,
285 VkDeviceMemory memory_h)
286 {
287 anv_DestroyImage(device, image_h, pAllocator);
288
289 anv_FreeMemory(device, memory_h, pAllocator);
290 }
291
292 static const struct wsi_image_fns anv_wsi_image_fns = {
293 .create_wsi_image = anv_wsi_image_create,
294 .free_wsi_image = anv_wsi_image_free,
295 };
296
297 VkResult anv_CreateSwapchainKHR(
298 VkDevice _device,
299 const VkSwapchainCreateInfoKHR* pCreateInfo,
300 const VkAllocationCallbacks* pAllocator,
301 VkSwapchainKHR* pSwapchain)
302 {
303 ANV_FROM_HANDLE(anv_device, device, _device);
304 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
305 struct wsi_interface *iface =
306 device->instance->physicalDevice.wsi_device.wsi[surface->platform];
307 struct wsi_swapchain *swapchain;
308 const VkAllocationCallbacks *alloc;
309
310 if (pAllocator)
311 alloc = pAllocator;
312 else
313 alloc = &device->alloc;
314 VkResult result = iface->create_swapchain(surface, _device,
315 &device->instance->physicalDevice.wsi_device,
316 device->instance->physicalDevice.local_fd,
317 pCreateInfo,
318 alloc, &anv_wsi_image_fns,
319 &swapchain);
320 if (result != VK_SUCCESS)
321 return result;
322
323 swapchain->alloc = *alloc;
324
325 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
326 swapchain->fences[i] = VK_NULL_HANDLE;
327
328 *pSwapchain = wsi_swapchain_to_handle(swapchain);
329
330 return VK_SUCCESS;
331 }
332
333 void anv_DestroySwapchainKHR(
334 VkDevice _device,
335 VkSwapchainKHR _swapchain,
336 const VkAllocationCallbacks* pAllocator)
337 {
338 ANV_FROM_HANDLE(anv_device, device, _device);
339 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
340 const VkAllocationCallbacks *alloc;
341
342 if (!swapchain)
343 return;
344
345 if (pAllocator)
346 alloc = pAllocator;
347 else
348 alloc = &device->alloc;
349 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
350 if (swapchain->fences[i] != VK_NULL_HANDLE)
351 anv_DestroyFence(_device, swapchain->fences[i], pAllocator);
352 }
353
354 swapchain->destroy(swapchain, alloc);
355 }
356
357 VkResult anv_GetSwapchainImagesKHR(
358 VkDevice device,
359 VkSwapchainKHR _swapchain,
360 uint32_t* pSwapchainImageCount,
361 VkImage* pSwapchainImages)
362 {
363 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
364
365 return swapchain->get_images(swapchain, pSwapchainImageCount,
366 pSwapchainImages);
367 }
368
369 VkResult anv_AcquireNextImageKHR(
370 VkDevice _device,
371 VkSwapchainKHR _swapchain,
372 uint64_t timeout,
373 VkSemaphore semaphore,
374 VkFence _fence,
375 uint32_t* pImageIndex)
376 {
377 ANV_FROM_HANDLE(anv_device, device, _device);
378 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
379 ANV_FROM_HANDLE(anv_fence, fence, _fence);
380
381 VkResult result = swapchain->acquire_next_image(swapchain, timeout,
382 semaphore, pImageIndex);
383
384 /* Thanks to implicit sync, the image is ready immediately. However, we
385 * should wait for the current GPU state to finish.
386 */
387 if (fence)
388 anv_QueueSubmit(anv_queue_to_handle(&device->queue), 0, NULL, _fence);
389
390 return result;
391 }
392
393 VkResult anv_QueuePresentKHR(
394 VkQueue _queue,
395 const VkPresentInfoKHR* pPresentInfo)
396 {
397 ANV_FROM_HANDLE(anv_queue, queue, _queue);
398 VkResult result = VK_SUCCESS;
399
400 const VkPresentRegionsKHR *regions =
401 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
402
403 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
404 ANV_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
405 VkResult item_result;
406
407 const VkPresentRegionKHR *region = NULL;
408 if (regions && regions->pRegions)
409 region = &regions->pRegions[i];
410
411 assert(anv_device_from_handle(swapchain->device) == queue->device);
412
413 if (swapchain->fences[0] == VK_NULL_HANDLE) {
414 item_result = anv_CreateFence(anv_device_to_handle(queue->device),
415 &(VkFenceCreateInfo) {
416 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
417 .flags = 0,
418 }, &swapchain->alloc, &swapchain->fences[0]);
419 if (pPresentInfo->pResults != NULL)
420 pPresentInfo->pResults[i] = item_result;
421 result = result == VK_SUCCESS ? item_result : result;
422 if (item_result != VK_SUCCESS)
423 continue;
424 } else {
425 anv_ResetFences(anv_device_to_handle(queue->device),
426 1, &swapchain->fences[0]);
427 }
428
429 anv_QueueSubmit(_queue, 0, NULL, swapchain->fences[0]);
430
431 item_result = swapchain->queue_present(swapchain,
432 pPresentInfo->pImageIndices[i],
433 region);
434 /* TODO: What if one of them returns OUT_OF_DATE? */
435 if (pPresentInfo->pResults != NULL)
436 pPresentInfo->pResults[i] = item_result;
437 result = result == VK_SUCCESS ? item_result : result;
438 if (item_result != VK_SUCCESS)
439 continue;
440
441 VkFence last = swapchain->fences[2];
442 swapchain->fences[2] = swapchain->fences[1];
443 swapchain->fences[1] = swapchain->fences[0];
444 swapchain->fences[0] = last;
445
446 if (last != VK_NULL_HANDLE) {
447 anv_WaitForFences(anv_device_to_handle(queue->device),
448 1, &last, true, 1);
449 }
450 }
451
452 return VK_SUCCESS;
453 }