vulkan/wsi: use function ptr definitions from the spec.
[mesa.git] / src / intel / vulkan / anv_wsi.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25 #include "wsi_common.h"
26 #include "vk_format_info.h"
27 #include "vk_util.h"
28
29 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
30 #define WSI_CB(x) .x = anv_##x
31 static const struct wsi_callbacks wsi_cbs = {
32 WSI_CB(GetPhysicalDeviceFormatProperties),
33 };
34 #endif
35
36 VkResult
37 anv_init_wsi(struct anv_physical_device *physical_device)
38 {
39 VkResult result;
40
41 memset(physical_device->wsi_device.wsi, 0, sizeof(physical_device->wsi_device.wsi));
42
43 #ifdef VK_USE_PLATFORM_XCB_KHR
44 result = wsi_x11_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
45 if (result != VK_SUCCESS)
46 return result;
47 #endif
48
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50 result = wsi_wl_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc,
51 anv_physical_device_to_handle(physical_device),
52 &wsi_cbs);
53 if (result != VK_SUCCESS) {
54 #ifdef VK_USE_PLATFORM_XCB_KHR
55 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
56 #endif
57 return result;
58 }
59 #endif
60
61 return VK_SUCCESS;
62 }
63
64 void
65 anv_finish_wsi(struct anv_physical_device *physical_device)
66 {
67 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
68 wsi_wl_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
69 #endif
70 #ifdef VK_USE_PLATFORM_XCB_KHR
71 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
72 #endif
73 }
74
75 void anv_DestroySurfaceKHR(
76 VkInstance _instance,
77 VkSurfaceKHR _surface,
78 const VkAllocationCallbacks* pAllocator)
79 {
80 ANV_FROM_HANDLE(anv_instance, instance, _instance);
81 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
82
83 if (!surface)
84 return;
85
86 vk_free2(&instance->alloc, pAllocator, surface);
87 }
88
89 VkResult anv_GetPhysicalDeviceSurfaceSupportKHR(
90 VkPhysicalDevice physicalDevice,
91 uint32_t queueFamilyIndex,
92 VkSurfaceKHR _surface,
93 VkBool32* pSupported)
94 {
95 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
96 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
97 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
98
99 return iface->get_support(surface, &device->wsi_device,
100 &device->instance->alloc,
101 queueFamilyIndex, device->local_fd, false, pSupported);
102 }
103
104 VkResult anv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
105 VkPhysicalDevice physicalDevice,
106 VkSurfaceKHR _surface,
107 VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
108 {
109 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
110 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
111 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
112
113 return iface->get_capabilities(surface, pSurfaceCapabilities);
114 }
115
116 VkResult anv_GetPhysicalDeviceSurfaceCapabilities2KHR(
117 VkPhysicalDevice physicalDevice,
118 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
119 VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
120 {
121 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
122 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
123 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
124
125 return iface->get_capabilities2(surface, pSurfaceInfo->pNext,
126 pSurfaceCapabilities);
127 }
128
129 VkResult anv_GetPhysicalDeviceSurfaceFormatsKHR(
130 VkPhysicalDevice physicalDevice,
131 VkSurfaceKHR _surface,
132 uint32_t* pSurfaceFormatCount,
133 VkSurfaceFormatKHR* pSurfaceFormats)
134 {
135 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
136 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
137 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
138
139 return iface->get_formats(surface, &device->wsi_device, pSurfaceFormatCount,
140 pSurfaceFormats);
141 }
142
143 VkResult anv_GetPhysicalDeviceSurfaceFormats2KHR(
144 VkPhysicalDevice physicalDevice,
145 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
146 uint32_t* pSurfaceFormatCount,
147 VkSurfaceFormat2KHR* pSurfaceFormats)
148 {
149 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
150 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
151 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
152
153 return iface->get_formats2(surface, &device->wsi_device, pSurfaceInfo->pNext,
154 pSurfaceFormatCount, pSurfaceFormats);
155 }
156
157 VkResult anv_GetPhysicalDeviceSurfacePresentModesKHR(
158 VkPhysicalDevice physicalDevice,
159 VkSurfaceKHR _surface,
160 uint32_t* pPresentModeCount,
161 VkPresentModeKHR* pPresentModes)
162 {
163 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
164 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
165 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
166
167 return iface->get_present_modes(surface, pPresentModeCount,
168 pPresentModes);
169 }
170
171
172 static VkResult
173 anv_wsi_image_create(VkDevice device_h,
174 const VkSwapchainCreateInfoKHR *pCreateInfo,
175 const VkAllocationCallbacks* pAllocator,
176 bool different_gpu,
177 bool linear,
178 VkImage *image_p,
179 VkDeviceMemory *memory_p,
180 uint32_t *size,
181 uint32_t *offset,
182 uint32_t *row_pitch, int *fd_p)
183 {
184 struct anv_device *device = anv_device_from_handle(device_h);
185 VkImage image_h;
186 struct anv_image *image;
187
188 VkResult result;
189 result = anv_image_create(anv_device_to_handle(device),
190 &(struct anv_image_create_info) {
191 .isl_tiling_flags = ISL_TILING_X_BIT,
192 .stride = 0,
193 .vk_info =
194 &(VkImageCreateInfo) {
195 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
196 .imageType = VK_IMAGE_TYPE_2D,
197 .format = pCreateInfo->imageFormat,
198 .extent = {
199 .width = pCreateInfo->imageExtent.width,
200 .height = pCreateInfo->imageExtent.height,
201 .depth = 1
202 },
203 .mipLevels = 1,
204 .arrayLayers = 1,
205 .samples = 1,
206 /* FIXME: Need a way to use X tiling to allow scanout */
207 .tiling = VK_IMAGE_TILING_OPTIMAL,
208 .usage = (pCreateInfo->imageUsage |
209 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
210 .flags = 0,
211 }},
212 NULL,
213 &image_h);
214 if (result != VK_SUCCESS)
215 return result;
216
217 image = anv_image_from_handle(image_h);
218 assert(vk_format_is_color(image->vk_format));
219
220 VkDeviceMemory memory_h;
221 struct anv_device_memory *memory;
222 result = anv_AllocateMemory(anv_device_to_handle(device),
223 &(VkMemoryAllocateInfo) {
224 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
225 .allocationSize = image->size,
226 .memoryTypeIndex = 0,
227 },
228 NULL /* XXX: pAllocator */,
229 &memory_h);
230 if (result != VK_SUCCESS)
231 goto fail_create_image;
232
233 memory = anv_device_memory_from_handle(memory_h);
234
235 /* We need to set the WRITE flag on window system buffers so that GEM will
236 * know we're writing to them and synchronize uses on other rings (eg if
237 * the display server uses the blitter ring).
238 */
239 memory->bo->flags &= ~EXEC_OBJECT_ASYNC;
240 memory->bo->flags |= EXEC_OBJECT_WRITE;
241
242 anv_BindImageMemory(device_h, image_h, memory_h, 0);
243 assert(image->planes[0].offset == 0);
244
245 struct anv_surface *surface = &image->planes[0].surface;
246 assert(surface->isl.tiling == ISL_TILING_X);
247
248 *row_pitch = surface->isl.row_pitch;
249 int ret = anv_gem_set_tiling(device, memory->bo->gem_handle,
250 surface->isl.row_pitch, I915_TILING_X);
251 if (ret) {
252 /* FINISHME: Choose a better error. */
253 result = vk_errorf(device->instance, device,
254 VK_ERROR_OUT_OF_DEVICE_MEMORY,
255 "set_tiling failed: %m");
256 goto fail_alloc_memory;
257 }
258
259 int fd = anv_gem_handle_to_fd(device, memory->bo->gem_handle);
260 if (fd == -1) {
261 /* FINISHME: Choose a better error. */
262 result = vk_errorf(device->instance, device,
263 VK_ERROR_OUT_OF_DEVICE_MEMORY,
264 "handle_to_fd failed: %m");
265 goto fail_alloc_memory;
266 }
267
268 *image_p = image_h;
269 *memory_p = memory_h;
270 *fd_p = fd;
271 *size = image->size;
272 *offset = 0;
273 return VK_SUCCESS;
274 fail_alloc_memory:
275 anv_FreeMemory(device_h, memory_h, pAllocator);
276
277 fail_create_image:
278 anv_DestroyImage(device_h, image_h, pAllocator);
279 return result;
280 }
281
282 static void
283 anv_wsi_image_free(VkDevice device,
284 const VkAllocationCallbacks* pAllocator,
285 VkImage image_h,
286 VkDeviceMemory memory_h)
287 {
288 anv_DestroyImage(device, image_h, pAllocator);
289
290 anv_FreeMemory(device, memory_h, pAllocator);
291 }
292
293 static const struct wsi_image_fns anv_wsi_image_fns = {
294 .create_wsi_image = anv_wsi_image_create,
295 .free_wsi_image = anv_wsi_image_free,
296 };
297
298 VkResult anv_CreateSwapchainKHR(
299 VkDevice _device,
300 const VkSwapchainCreateInfoKHR* pCreateInfo,
301 const VkAllocationCallbacks* pAllocator,
302 VkSwapchainKHR* pSwapchain)
303 {
304 ANV_FROM_HANDLE(anv_device, device, _device);
305 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
306 struct wsi_interface *iface =
307 device->instance->physicalDevice.wsi_device.wsi[surface->platform];
308 struct wsi_swapchain *swapchain;
309 const VkAllocationCallbacks *alloc;
310
311 if (pAllocator)
312 alloc = pAllocator;
313 else
314 alloc = &device->alloc;
315 VkResult result = iface->create_swapchain(surface, _device,
316 &device->instance->physicalDevice.wsi_device,
317 device->instance->physicalDevice.local_fd,
318 pCreateInfo,
319 alloc, &anv_wsi_image_fns,
320 &swapchain);
321 if (result != VK_SUCCESS)
322 return result;
323
324 swapchain->alloc = *alloc;
325
326 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
327 swapchain->fences[i] = VK_NULL_HANDLE;
328
329 *pSwapchain = wsi_swapchain_to_handle(swapchain);
330
331 return VK_SUCCESS;
332 }
333
334 void anv_DestroySwapchainKHR(
335 VkDevice _device,
336 VkSwapchainKHR _swapchain,
337 const VkAllocationCallbacks* pAllocator)
338 {
339 ANV_FROM_HANDLE(anv_device, device, _device);
340 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
341 const VkAllocationCallbacks *alloc;
342
343 if (!swapchain)
344 return;
345
346 if (pAllocator)
347 alloc = pAllocator;
348 else
349 alloc = &device->alloc;
350 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
351 if (swapchain->fences[i] != VK_NULL_HANDLE)
352 anv_DestroyFence(_device, swapchain->fences[i], pAllocator);
353 }
354
355 swapchain->destroy(swapchain, alloc);
356 }
357
358 VkResult anv_GetSwapchainImagesKHR(
359 VkDevice device,
360 VkSwapchainKHR _swapchain,
361 uint32_t* pSwapchainImageCount,
362 VkImage* pSwapchainImages)
363 {
364 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
365
366 return swapchain->get_images(swapchain, pSwapchainImageCount,
367 pSwapchainImages);
368 }
369
370 VkResult anv_AcquireNextImageKHR(
371 VkDevice _device,
372 VkSwapchainKHR _swapchain,
373 uint64_t timeout,
374 VkSemaphore semaphore,
375 VkFence _fence,
376 uint32_t* pImageIndex)
377 {
378 ANV_FROM_HANDLE(anv_device, device, _device);
379 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
380 ANV_FROM_HANDLE(anv_fence, fence, _fence);
381
382 VkResult result = swapchain->acquire_next_image(swapchain, timeout,
383 semaphore, pImageIndex);
384
385 /* Thanks to implicit sync, the image is ready immediately. However, we
386 * should wait for the current GPU state to finish.
387 */
388 if (fence)
389 anv_QueueSubmit(anv_queue_to_handle(&device->queue), 0, NULL, _fence);
390
391 return result;
392 }
393
394 VkResult anv_QueuePresentKHR(
395 VkQueue _queue,
396 const VkPresentInfoKHR* pPresentInfo)
397 {
398 ANV_FROM_HANDLE(anv_queue, queue, _queue);
399 VkResult result = VK_SUCCESS;
400
401 const VkPresentRegionsKHR *regions =
402 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
403
404 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
405 ANV_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
406 VkResult item_result;
407
408 const VkPresentRegionKHR *region = NULL;
409 if (regions && regions->pRegions)
410 region = &regions->pRegions[i];
411
412 assert(anv_device_from_handle(swapchain->device) == queue->device);
413
414 if (swapchain->fences[0] == VK_NULL_HANDLE) {
415 item_result = anv_CreateFence(anv_device_to_handle(queue->device),
416 &(VkFenceCreateInfo) {
417 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
418 .flags = 0,
419 }, &swapchain->alloc, &swapchain->fences[0]);
420 if (pPresentInfo->pResults != NULL)
421 pPresentInfo->pResults[i] = item_result;
422 result = result == VK_SUCCESS ? item_result : result;
423 if (item_result != VK_SUCCESS)
424 continue;
425 } else {
426 anv_ResetFences(anv_device_to_handle(queue->device),
427 1, &swapchain->fences[0]);
428 }
429
430 anv_QueueSubmit(_queue, 0, NULL, swapchain->fences[0]);
431
432 item_result = swapchain->queue_present(swapchain,
433 pPresentInfo->pImageIndices[i],
434 region);
435 /* TODO: What if one of them returns OUT_OF_DATE? */
436 if (pPresentInfo->pResults != NULL)
437 pPresentInfo->pResults[i] = item_result;
438 result = result == VK_SUCCESS ? item_result : result;
439 if (item_result != VK_SUCCESS)
440 continue;
441
442 VkFence last = swapchain->fences[2];
443 swapchain->fences[2] = swapchain->fences[1];
444 swapchain->fences[1] = swapchain->fences[0];
445 swapchain->fences[0] = last;
446
447 if (last != VK_NULL_HANDLE) {
448 anv_WaitForFences(anv_device_to_handle(queue->device),
449 1, &last, true, 1);
450 }
451 }
452
453 return VK_SUCCESS;
454 }