d8c4885b5b895fb4ad1c38cf2edf2d8417d63742
[mesa.git] / src / intel / vulkan / anv_wsi.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25 #include "wsi_common.h"
26 #include "vk_format_info.h"
27 #include "vk_util.h"
28
29 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
30 #define WSI_CB(x) .x = anv_##x
31 static const struct wsi_callbacks wsi_cbs = {
32 WSI_CB(GetPhysicalDeviceFormatProperties),
33 };
34 #endif
35
36 VkResult
37 anv_init_wsi(struct anv_physical_device *physical_device)
38 {
39 VkResult result;
40
41 memset(physical_device->wsi_device.wsi, 0, sizeof(physical_device->wsi_device.wsi));
42
43 #ifdef VK_USE_PLATFORM_XCB_KHR
44 result = wsi_x11_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
45 if (result != VK_SUCCESS)
46 return result;
47 #endif
48
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50 result = wsi_wl_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc,
51 anv_physical_device_to_handle(physical_device),
52 &wsi_cbs);
53 if (result != VK_SUCCESS) {
54 #ifdef VK_USE_PLATFORM_XCB_KHR
55 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
56 #endif
57 return result;
58 }
59 #endif
60
61 return VK_SUCCESS;
62 }
63
64 void
65 anv_finish_wsi(struct anv_physical_device *physical_device)
66 {
67 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
68 wsi_wl_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
69 #endif
70 #ifdef VK_USE_PLATFORM_XCB_KHR
71 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
72 #endif
73 }
74
75 void anv_DestroySurfaceKHR(
76 VkInstance _instance,
77 VkSurfaceKHR _surface,
78 const VkAllocationCallbacks* pAllocator)
79 {
80 ANV_FROM_HANDLE(anv_instance, instance, _instance);
81 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
82
83 if (!surface)
84 return;
85
86 vk_free2(&instance->alloc, pAllocator, surface);
87 }
88
89 VkResult anv_GetPhysicalDeviceSurfaceSupportKHR(
90 VkPhysicalDevice physicalDevice,
91 uint32_t queueFamilyIndex,
92 VkSurfaceKHR _surface,
93 VkBool32* pSupported)
94 {
95 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
96 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
97 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
98
99 return iface->get_support(surface, &device->wsi_device,
100 &device->instance->alloc,
101 queueFamilyIndex, device->local_fd, false, pSupported);
102 }
103
104 VkResult anv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
105 VkPhysicalDevice physicalDevice,
106 VkSurfaceKHR _surface,
107 VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
108 {
109 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
110 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
111 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
112
113 return iface->get_capabilities(surface, pSurfaceCapabilities);
114 }
115
116 VkResult anv_GetPhysicalDeviceSurfaceCapabilities2KHR(
117 VkPhysicalDevice physicalDevice,
118 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
119 VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
120 {
121 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
122 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
123 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
124
125 return iface->get_capabilities2(surface, pSurfaceInfo->pNext,
126 pSurfaceCapabilities);
127 }
128
129 VkResult anv_GetPhysicalDeviceSurfaceFormatsKHR(
130 VkPhysicalDevice physicalDevice,
131 VkSurfaceKHR _surface,
132 uint32_t* pSurfaceFormatCount,
133 VkSurfaceFormatKHR* pSurfaceFormats)
134 {
135 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
136 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
137 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
138
139 return iface->get_formats(surface, &device->wsi_device, pSurfaceFormatCount,
140 pSurfaceFormats);
141 }
142
143 VkResult anv_GetPhysicalDeviceSurfaceFormats2KHR(
144 VkPhysicalDevice physicalDevice,
145 const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
146 uint32_t* pSurfaceFormatCount,
147 VkSurfaceFormat2KHR* pSurfaceFormats)
148 {
149 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
150 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
151 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
152
153 return iface->get_formats2(surface, &device->wsi_device, pSurfaceInfo->pNext,
154 pSurfaceFormatCount, pSurfaceFormats);
155 }
156
157 VkResult anv_GetPhysicalDeviceSurfacePresentModesKHR(
158 VkPhysicalDevice physicalDevice,
159 VkSurfaceKHR _surface,
160 uint32_t* pPresentModeCount,
161 VkPresentModeKHR* pPresentModes)
162 {
163 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
164 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
165 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
166
167 return iface->get_present_modes(surface, pPresentModeCount,
168 pPresentModes);
169 }
170
171
172 static VkResult
173 anv_wsi_image_create(VkDevice device_h,
174 const VkSwapchainCreateInfoKHR *pCreateInfo,
175 const VkAllocationCallbacks* pAllocator,
176 bool different_gpu,
177 bool linear,
178 struct wsi_image *wsi_image)
179 {
180 struct anv_device *device = anv_device_from_handle(device_h);
181 VkImage image_h;
182 struct anv_image *image;
183
184 VkResult result;
185 result = anv_image_create(anv_device_to_handle(device),
186 &(struct anv_image_create_info) {
187 .isl_tiling_flags = ISL_TILING_X_BIT,
188 .stride = 0,
189 .vk_info =
190 &(VkImageCreateInfo) {
191 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
192 .imageType = VK_IMAGE_TYPE_2D,
193 .format = pCreateInfo->imageFormat,
194 .extent = {
195 .width = pCreateInfo->imageExtent.width,
196 .height = pCreateInfo->imageExtent.height,
197 .depth = 1
198 },
199 .mipLevels = 1,
200 .arrayLayers = 1,
201 .samples = 1,
202 /* FIXME: Need a way to use X tiling to allow scanout */
203 .tiling = VK_IMAGE_TILING_OPTIMAL,
204 .usage = (pCreateInfo->imageUsage |
205 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
206 .flags = 0,
207 }},
208 NULL,
209 &image_h);
210 if (result != VK_SUCCESS)
211 return result;
212
213 image = anv_image_from_handle(image_h);
214 assert(vk_format_is_color(image->vk_format));
215
216 VkDeviceMemory memory_h;
217 struct anv_device_memory *memory;
218 result = anv_AllocateMemory(anv_device_to_handle(device),
219 &(VkMemoryAllocateInfo) {
220 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
221 .allocationSize = image->size,
222 .memoryTypeIndex = 0,
223 },
224 NULL /* XXX: pAllocator */,
225 &memory_h);
226 if (result != VK_SUCCESS)
227 goto fail_create_image;
228
229 memory = anv_device_memory_from_handle(memory_h);
230
231 /* We need to set the WRITE flag on window system buffers so that GEM will
232 * know we're writing to them and synchronize uses on other rings (eg if
233 * the display server uses the blitter ring).
234 */
235 memory->bo->flags &= ~EXEC_OBJECT_ASYNC;
236 memory->bo->flags |= EXEC_OBJECT_WRITE;
237
238 anv_BindImageMemory(device_h, image_h, memory_h, 0);
239 assert(image->planes[0].offset == 0);
240
241 struct anv_surface *surface = &image->planes[0].surface;
242 assert(surface->isl.tiling == ISL_TILING_X);
243
244 int ret = anv_gem_set_tiling(device, memory->bo->gem_handle,
245 surface->isl.row_pitch, I915_TILING_X);
246 if (ret) {
247 /* FINISHME: Choose a better error. */
248 result = vk_errorf(device->instance, device,
249 VK_ERROR_OUT_OF_DEVICE_MEMORY,
250 "set_tiling failed: %m");
251 goto fail_alloc_memory;
252 }
253
254 int fd = anv_gem_handle_to_fd(device, memory->bo->gem_handle);
255 if (fd == -1) {
256 /* FINISHME: Choose a better error. */
257 result = vk_errorf(device->instance, device,
258 VK_ERROR_OUT_OF_DEVICE_MEMORY,
259 "handle_to_fd failed: %m");
260 goto fail_alloc_memory;
261 }
262
263 wsi_image->image = image_h;
264 wsi_image->memory = memory_h;
265 wsi_image->fd = fd;
266 wsi_image->size = image->size;
267 wsi_image->offset = 0;
268 wsi_image->row_pitch = surface->isl.row_pitch;
269 return VK_SUCCESS;
270 fail_alloc_memory:
271 anv_FreeMemory(device_h, memory_h, pAllocator);
272
273 fail_create_image:
274 anv_DestroyImage(device_h, image_h, pAllocator);
275 return result;
276 }
277
278 static void
279 anv_wsi_image_free(VkDevice device,
280 const VkAllocationCallbacks* pAllocator,
281 struct wsi_image *wsi_image)
282 {
283 anv_DestroyImage(device, wsi_image->image, pAllocator);
284
285 anv_FreeMemory(device, wsi_image->memory, pAllocator);
286 }
287
288 static const struct wsi_image_fns anv_wsi_image_fns = {
289 .create_wsi_image = anv_wsi_image_create,
290 .free_wsi_image = anv_wsi_image_free,
291 };
292
293 VkResult anv_CreateSwapchainKHR(
294 VkDevice _device,
295 const VkSwapchainCreateInfoKHR* pCreateInfo,
296 const VkAllocationCallbacks* pAllocator,
297 VkSwapchainKHR* pSwapchain)
298 {
299 ANV_FROM_HANDLE(anv_device, device, _device);
300 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
301 struct wsi_interface *iface =
302 device->instance->physicalDevice.wsi_device.wsi[surface->platform];
303 struct wsi_swapchain *swapchain;
304 const VkAllocationCallbacks *alloc;
305
306 if (pAllocator)
307 alloc = pAllocator;
308 else
309 alloc = &device->alloc;
310 VkResult result = iface->create_swapchain(surface, _device,
311 &device->instance->physicalDevice.wsi_device,
312 device->instance->physicalDevice.local_fd,
313 pCreateInfo,
314 alloc, &anv_wsi_image_fns,
315 &swapchain);
316 if (result != VK_SUCCESS)
317 return result;
318
319 swapchain->alloc = *alloc;
320
321 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
322 swapchain->fences[i] = VK_NULL_HANDLE;
323
324 *pSwapchain = wsi_swapchain_to_handle(swapchain);
325
326 return VK_SUCCESS;
327 }
328
329 void anv_DestroySwapchainKHR(
330 VkDevice _device,
331 VkSwapchainKHR _swapchain,
332 const VkAllocationCallbacks* pAllocator)
333 {
334 ANV_FROM_HANDLE(anv_device, device, _device);
335 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
336 const VkAllocationCallbacks *alloc;
337
338 if (!swapchain)
339 return;
340
341 if (pAllocator)
342 alloc = pAllocator;
343 else
344 alloc = &device->alloc;
345 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
346 if (swapchain->fences[i] != VK_NULL_HANDLE)
347 anv_DestroyFence(_device, swapchain->fences[i], pAllocator);
348 }
349
350 swapchain->destroy(swapchain, alloc);
351 }
352
353 VkResult anv_GetSwapchainImagesKHR(
354 VkDevice device,
355 VkSwapchainKHR _swapchain,
356 uint32_t* pSwapchainImageCount,
357 VkImage* pSwapchainImages)
358 {
359 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
360
361 return swapchain->get_images(swapchain, pSwapchainImageCount,
362 pSwapchainImages);
363 }
364
365 VkResult anv_AcquireNextImageKHR(
366 VkDevice _device,
367 VkSwapchainKHR _swapchain,
368 uint64_t timeout,
369 VkSemaphore semaphore,
370 VkFence _fence,
371 uint32_t* pImageIndex)
372 {
373 ANV_FROM_HANDLE(anv_device, device, _device);
374 ANV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
375 ANV_FROM_HANDLE(anv_fence, fence, _fence);
376
377 VkResult result = swapchain->acquire_next_image(swapchain, timeout,
378 semaphore, pImageIndex);
379
380 /* Thanks to implicit sync, the image is ready immediately. However, we
381 * should wait for the current GPU state to finish.
382 */
383 if (fence)
384 anv_QueueSubmit(anv_queue_to_handle(&device->queue), 0, NULL, _fence);
385
386 return result;
387 }
388
389 VkResult anv_QueuePresentKHR(
390 VkQueue _queue,
391 const VkPresentInfoKHR* pPresentInfo)
392 {
393 ANV_FROM_HANDLE(anv_queue, queue, _queue);
394 VkResult result = VK_SUCCESS;
395
396 const VkPresentRegionsKHR *regions =
397 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
398
399 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
400 ANV_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
401 VkResult item_result;
402
403 const VkPresentRegionKHR *region = NULL;
404 if (regions && regions->pRegions)
405 region = &regions->pRegions[i];
406
407 assert(anv_device_from_handle(swapchain->device) == queue->device);
408
409 if (swapchain->fences[0] == VK_NULL_HANDLE) {
410 item_result = anv_CreateFence(anv_device_to_handle(queue->device),
411 &(VkFenceCreateInfo) {
412 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
413 .flags = 0,
414 }, &swapchain->alloc, &swapchain->fences[0]);
415 if (pPresentInfo->pResults != NULL)
416 pPresentInfo->pResults[i] = item_result;
417 result = result == VK_SUCCESS ? item_result : result;
418 if (item_result != VK_SUCCESS)
419 continue;
420 } else {
421 anv_ResetFences(anv_device_to_handle(queue->device),
422 1, &swapchain->fences[0]);
423 }
424
425 anv_QueueSubmit(_queue, 0, NULL, swapchain->fences[0]);
426
427 item_result = swapchain->queue_present(swapchain,
428 pPresentInfo->pImageIndices[i],
429 region);
430 /* TODO: What if one of them returns OUT_OF_DATE? */
431 if (pPresentInfo->pResults != NULL)
432 pPresentInfo->pResults[i] = item_result;
433 result = result == VK_SUCCESS ? item_result : result;
434 if (item_result != VK_SUCCESS)
435 continue;
436
437 VkFence last = swapchain->fences[2];
438 swapchain->fences[2] = swapchain->fences[1];
439 swapchain->fences[1] = swapchain->fences[0];
440 swapchain->fences[0] = last;
441
442 if (last != VK_NULL_HANDLE) {
443 anv_WaitForFences(anv_device_to_handle(queue->device),
444 1, &last, true, 1);
445 }
446 }
447
448 return VK_SUCCESS;
449 }