2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "radv_private.h"
27 #include "radv_meta.h"
28 #include "wsi_common.h"
30 static const struct wsi_callbacks wsi_cbs
= {
31 .get_phys_device_format_properties
= radv_GetPhysicalDeviceFormatProperties
,
35 radv_init_wsi(struct radv_physical_device
*physical_device
)
39 memset(physical_device
->wsi_device
.wsi
, 0, sizeof(physical_device
->wsi_device
.wsi
));
41 #ifdef VK_USE_PLATFORM_XCB_KHR
42 result
= wsi_x11_init_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
43 if (result
!= VK_SUCCESS
)
47 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
48 result
= wsi_wl_init_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
,
49 radv_physical_device_to_handle(physical_device
),
51 if (result
!= VK_SUCCESS
) {
52 #ifdef VK_USE_PLATFORM_XCB_KHR
53 wsi_x11_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
63 radv_finish_wsi(struct radv_physical_device
*physical_device
)
65 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
66 wsi_wl_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
68 #ifdef VK_USE_PLATFORM_XCB_KHR
69 wsi_x11_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
73 void radv_DestroySurfaceKHR(
75 VkSurfaceKHR _surface
,
76 const VkAllocationCallbacks
* pAllocator
)
78 RADV_FROM_HANDLE(radv_instance
, instance
, _instance
);
79 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
81 vk_free2(&instance
->alloc
, pAllocator
, surface
);
84 VkResult
radv_GetPhysicalDeviceSurfaceSupportKHR(
85 VkPhysicalDevice physicalDevice
,
86 uint32_t queueFamilyIndex
,
87 VkSurfaceKHR _surface
,
90 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
91 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
92 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
94 return iface
->get_support(surface
, &device
->wsi_device
,
95 &device
->instance
->alloc
,
96 queueFamilyIndex
, device
->local_fd
, true, pSupported
);
99 VkResult
radv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
100 VkPhysicalDevice physicalDevice
,
101 VkSurfaceKHR _surface
,
102 VkSurfaceCapabilitiesKHR
* pSurfaceCapabilities
)
104 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
105 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
106 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
108 return iface
->get_capabilities(surface
, pSurfaceCapabilities
);
111 VkResult
radv_GetPhysicalDeviceSurfaceFormatsKHR(
112 VkPhysicalDevice physicalDevice
,
113 VkSurfaceKHR _surface
,
114 uint32_t* pSurfaceFormatCount
,
115 VkSurfaceFormatKHR
* pSurfaceFormats
)
117 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
118 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
119 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
121 return iface
->get_formats(surface
, &device
->wsi_device
, pSurfaceFormatCount
,
125 VkResult
radv_GetPhysicalDeviceSurfacePresentModesKHR(
126 VkPhysicalDevice physicalDevice
,
127 VkSurfaceKHR _surface
,
128 uint32_t* pPresentModeCount
,
129 VkPresentModeKHR
* pPresentModes
)
131 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
132 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
133 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
135 return iface
->get_present_modes(surface
, pPresentModeCount
,
140 radv_wsi_image_create(VkDevice device_h
,
141 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
142 const VkAllocationCallbacks
* pAllocator
,
143 bool needs_linear_copy
,
146 VkDeviceMemory
*memory_p
,
149 uint32_t *row_pitch
, int *fd_p
)
151 struct radv_device
*device
= radv_device_from_handle(device_h
);
152 VkResult result
= VK_SUCCESS
;
153 struct radeon_surf
*surface
;
155 struct radv_image
*image
;
159 result
= radv_image_create(device_h
,
160 &(struct radv_image_create_info
) {
162 &(VkImageCreateInfo
) {
163 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
164 .imageType
= VK_IMAGE_TYPE_2D
,
165 .format
= pCreateInfo
->imageFormat
,
167 .width
= pCreateInfo
->imageExtent
.width
,
168 .height
= pCreateInfo
->imageExtent
.height
,
174 /* FIXME: Need a way to use X tiling to allow scanout */
175 .tiling
= linear
? VK_IMAGE_TILING_LINEAR
: VK_IMAGE_TILING_OPTIMAL
,
176 .usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
182 if (result
!= VK_SUCCESS
)
185 image
= radv_image_from_handle(image_h
);
186 VkDeviceMemory memory_h
;
187 struct radv_device_memory
*memory
;
189 result
= radv_AllocateMemory(device_h
,
190 &(VkMemoryAllocateInfo
) {
191 .sType
= VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
,
192 .allocationSize
= image
->size
,
193 .memoryTypeIndex
= linear
? 1 : 0,
195 NULL
/* XXX: pAllocator */,
197 if (result
!= VK_SUCCESS
)
198 goto fail_create_image
;
200 memory
= radv_device_memory_from_handle(memory_h
);
202 radv_BindImageMemory(VK_NULL_HANDLE
, image_h
, memory_h
, 0);
205 * return the fd for the image in the no copy mode,
206 * or the fd for the linear image if a copy is required.
208 if (!needs_linear_copy
|| (needs_linear_copy
&& linear
)) {
209 bret
= device
->ws
->buffer_get_fd(device
->ws
,
212 goto fail_alloc_memory
;
217 struct radeon_bo_metadata metadata
;
218 radv_init_metadata(device
, image
, &metadata
);
219 device
->ws
->buffer_set_metadata(memory
->bo
, &metadata
);
222 surface
= &image
->surface
;
225 *memory_p
= memory_h
;
227 *offset
= image
->offset
;
228 *row_pitch
= surface
->level
[0].pitch_bytes
;
231 radv_FreeMemory(device_h
, memory_h
, pAllocator
);
234 radv_DestroyImage(device_h
, image_h
, pAllocator
);
240 radv_wsi_image_free(VkDevice device
,
241 const VkAllocationCallbacks
* pAllocator
,
243 VkDeviceMemory memory_h
)
245 radv_DestroyImage(device
, image_h
, pAllocator
);
247 radv_FreeMemory(device
, memory_h
, pAllocator
);
250 static const struct wsi_image_fns radv_wsi_image_fns
= {
251 .create_wsi_image
= radv_wsi_image_create
,
252 .free_wsi_image
= radv_wsi_image_free
,
255 #define NUM_PRIME_POOLS RADV_QUEUE_TRANSFER
257 radv_wsi_free_prime_command_buffers(struct radv_device
*device
,
258 struct wsi_swapchain
*swapchain
)
260 const int num_pools
= NUM_PRIME_POOLS
;
261 const int num_images
= swapchain
->image_count
;
263 for (i
= 0; i
< num_pools
; i
++) {
264 radv_FreeCommandBuffers(radv_device_to_handle(device
),
265 swapchain
->cmd_pools
[i
],
266 swapchain
->image_count
,
267 &swapchain
->cmd_buffers
[i
* num_images
]);
269 radv_DestroyCommandPool(radv_device_to_handle(device
),
270 swapchain
->cmd_pools
[i
],
276 radv_wsi_create_prime_command_buffers(struct radv_device
*device
,
277 const VkAllocationCallbacks
*alloc
,
278 struct wsi_swapchain
*swapchain
)
280 const int num_pools
= NUM_PRIME_POOLS
;
281 const int num_images
= swapchain
->image_count
;
282 int num_cmd_buffers
= num_images
* num_pools
; //TODO bump to MAX_QUEUE_FAMILIES
286 swapchain
->cmd_buffers
= vk_alloc(alloc
, (sizeof(VkCommandBuffer
) * num_cmd_buffers
), 8,
287 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
288 if (!swapchain
->cmd_buffers
)
289 return VK_ERROR_OUT_OF_HOST_MEMORY
;
291 memset(swapchain
->cmd_buffers
, 0, sizeof(VkCommandBuffer
) * num_cmd_buffers
);
292 memset(swapchain
->cmd_pools
, 0, sizeof(VkCommandPool
) * num_pools
);
293 for (i
= 0; i
< num_pools
; i
++) {
294 VkCommandPoolCreateInfo pool_create_info
;
296 pool_create_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO
;
297 pool_create_info
.pNext
= NULL
;
298 pool_create_info
.flags
= 0;
299 pool_create_info
.queueFamilyIndex
= i
;
301 result
= radv_CreateCommandPool(radv_device_to_handle(device
),
302 &pool_create_info
, alloc
,
303 &swapchain
->cmd_pools
[i
]);
304 if (result
!= VK_SUCCESS
)
307 VkCommandBufferAllocateInfo cmd_buffer_info
;
308 cmd_buffer_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
;
309 cmd_buffer_info
.pNext
= NULL
;
310 cmd_buffer_info
.commandPool
= swapchain
->cmd_pools
[i
];
311 cmd_buffer_info
.level
= VK_COMMAND_BUFFER_LEVEL_PRIMARY
;
312 cmd_buffer_info
.commandBufferCount
= num_images
;
314 result
= radv_AllocateCommandBuffers(radv_device_to_handle(device
),
316 &swapchain
->cmd_buffers
[i
* num_images
]);
317 if (result
!= VK_SUCCESS
)
319 for (j
= 0; j
< num_images
; j
++) {
320 VkImage image
, linear_image
;
321 int idx
= (i
* num_images
) + j
;
323 swapchain
->get_image_and_linear(swapchain
, j
, &image
, &linear_image
);
324 VkCommandBufferBeginInfo begin_info
= {0};
326 begin_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
;
328 radv_BeginCommandBuffer(swapchain
->cmd_buffers
[idx
], &begin_info
);
330 radv_blit_to_prime_linear(radv_cmd_buffer_from_handle(swapchain
->cmd_buffers
[idx
]),
331 radv_image_from_handle(image
),
332 radv_image_from_handle(linear_image
));
334 radv_EndCommandBuffer(swapchain
->cmd_buffers
[idx
]);
339 radv_wsi_free_prime_command_buffers(device
, swapchain
);
343 VkResult
radv_CreateSwapchainKHR(
345 const VkSwapchainCreateInfoKHR
* pCreateInfo
,
346 const VkAllocationCallbacks
* pAllocator
,
347 VkSwapchainKHR
* pSwapchain
)
349 RADV_FROM_HANDLE(radv_device
, device
, _device
);
350 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, pCreateInfo
->surface
);
351 struct wsi_interface
*iface
=
352 device
->physical_device
->wsi_device
.wsi
[surface
->platform
];
353 struct wsi_swapchain
*swapchain
;
354 const VkAllocationCallbacks
*alloc
;
358 alloc
= &device
->alloc
;
359 VkResult result
= iface
->create_swapchain(surface
, _device
,
360 &device
->physical_device
->wsi_device
,
361 device
->physical_device
->local_fd
,
363 alloc
, &radv_wsi_image_fns
,
365 if (result
!= VK_SUCCESS
)
369 swapchain
->alloc
= *pAllocator
;
371 swapchain
->alloc
= device
->alloc
;
373 for (unsigned i
= 0; i
< ARRAY_SIZE(swapchain
->fences
); i
++)
374 swapchain
->fences
[i
] = VK_NULL_HANDLE
;
376 if (swapchain
->needs_linear_copy
) {
377 result
= radv_wsi_create_prime_command_buffers(device
, alloc
,
379 if (result
!= VK_SUCCESS
)
383 *pSwapchain
= wsi_swapchain_to_handle(swapchain
);
388 void radv_DestroySwapchainKHR(
390 VkSwapchainKHR _swapchain
,
391 const VkAllocationCallbacks
* pAllocator
)
393 RADV_FROM_HANDLE(radv_device
, device
, _device
);
394 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
395 const VkAllocationCallbacks
*alloc
;
403 alloc
= &device
->alloc
;
405 for (unsigned i
= 0; i
< ARRAY_SIZE(swapchain
->fences
); i
++) {
406 if (swapchain
->fences
[i
] != VK_NULL_HANDLE
)
407 radv_DestroyFence(_device
, swapchain
->fences
[i
], pAllocator
);
410 if (swapchain
->needs_linear_copy
)
411 radv_wsi_free_prime_command_buffers(device
, swapchain
);
413 swapchain
->destroy(swapchain
, alloc
);
416 VkResult
radv_GetSwapchainImagesKHR(
418 VkSwapchainKHR _swapchain
,
419 uint32_t* pSwapchainImageCount
,
420 VkImage
* pSwapchainImages
)
422 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
424 return swapchain
->get_images(swapchain
, pSwapchainImageCount
,
428 VkResult
radv_AcquireNextImageKHR(
430 VkSwapchainKHR _swapchain
,
432 VkSemaphore semaphore
,
434 uint32_t* pImageIndex
)
436 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
437 RADV_FROM_HANDLE(radv_fence
, fence
, _fence
);
439 VkResult result
= swapchain
->acquire_next_image(swapchain
, timeout
, semaphore
,
442 if (fence
&& result
== VK_SUCCESS
) {
443 fence
->submitted
= true;
444 fence
->signalled
= true;
450 VkResult
radv_QueuePresentKHR(
452 const VkPresentInfoKHR
* pPresentInfo
)
454 RADV_FROM_HANDLE(radv_queue
, queue
, _queue
);
455 VkResult result
= VK_SUCCESS
;
457 for (uint32_t i
= 0; i
< pPresentInfo
->swapchainCount
; i
++) {
458 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, pPresentInfo
->pSwapchains
[i
]);
459 struct radeon_winsys_cs
*cs
;
460 assert(radv_device_from_handle(swapchain
->device
) == queue
->device
);
461 if (swapchain
->fences
[0] == VK_NULL_HANDLE
) {
462 result
= radv_CreateFence(radv_device_to_handle(queue
->device
),
463 &(VkFenceCreateInfo
) {
464 .sType
= VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
,
466 }, &swapchain
->alloc
, &swapchain
->fences
[0]);
467 if (result
!= VK_SUCCESS
)
470 radv_ResetFences(radv_device_to_handle(queue
->device
),
471 1, &swapchain
->fences
[0]);
474 if (swapchain
->needs_linear_copy
) {
475 int idx
= (queue
->queue_family_index
* swapchain
->image_count
) + pPresentInfo
->pImageIndices
[i
];
476 cs
= radv_cmd_buffer_from_handle(swapchain
->cmd_buffers
[idx
])->cs
;
478 cs
= queue
->device
->empty_cs
[queue
->queue_family_index
];
479 RADV_FROM_HANDLE(radv_fence
, fence
, swapchain
->fences
[0]);
480 struct radeon_winsys_fence
*base_fence
= fence
->fence
;
481 struct radeon_winsys_ctx
*ctx
= queue
->hw_ctx
;
482 queue
->device
->ws
->cs_submit(ctx
, queue
->queue_idx
,
485 (struct radeon_winsys_sem
**)pPresentInfo
->pWaitSemaphores
,
486 pPresentInfo
->waitSemaphoreCount
, NULL
, 0, false, base_fence
);
487 fence
->submitted
= true;
489 result
= swapchain
->queue_present(swapchain
,
490 pPresentInfo
->pImageIndices
[i
]);
491 /* TODO: What if one of them returns OUT_OF_DATE? */
492 if (result
!= VK_SUCCESS
)
495 VkFence last
= swapchain
->fences
[2];
496 swapchain
->fences
[2] = swapchain
->fences
[1];
497 swapchain
->fences
[1] = swapchain
->fences
[0];
498 swapchain
->fences
[0] = last
;
500 if (last
!= VK_NULL_HANDLE
) {
501 radv_WaitForFences(radv_device_to_handle(queue
->device
),