2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "radv_private.h"
27 #include "radv_meta.h"
28 #include "wsi_common.h"
30 #include "util/macros.h"
32 MAYBE_UNUSED
static const struct wsi_callbacks wsi_cbs
= {
33 .get_phys_device_format_properties
= radv_GetPhysicalDeviceFormatProperties
,
37 radv_init_wsi(struct radv_physical_device
*physical_device
)
41 memset(physical_device
->wsi_device
.wsi
, 0, sizeof(physical_device
->wsi_device
.wsi
));
43 #ifdef VK_USE_PLATFORM_XCB_KHR
44 result
= wsi_x11_init_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
45 if (result
!= VK_SUCCESS
)
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50 result
= wsi_wl_init_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
,
51 radv_physical_device_to_handle(physical_device
),
53 if (result
!= VK_SUCCESS
) {
54 #ifdef VK_USE_PLATFORM_XCB_KHR
55 wsi_x11_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
65 radv_finish_wsi(struct radv_physical_device
*physical_device
)
67 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
68 wsi_wl_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
70 #ifdef VK_USE_PLATFORM_XCB_KHR
71 wsi_x11_finish_wsi(&physical_device
->wsi_device
, &physical_device
->instance
->alloc
);
75 void radv_DestroySurfaceKHR(
77 VkSurfaceKHR _surface
,
78 const VkAllocationCallbacks
* pAllocator
)
80 RADV_FROM_HANDLE(radv_instance
, instance
, _instance
);
81 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
83 vk_free2(&instance
->alloc
, pAllocator
, surface
);
86 VkResult
radv_GetPhysicalDeviceSurfaceSupportKHR(
87 VkPhysicalDevice physicalDevice
,
88 uint32_t queueFamilyIndex
,
89 VkSurfaceKHR _surface
,
92 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
93 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
94 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
96 return iface
->get_support(surface
, &device
->wsi_device
,
97 &device
->instance
->alloc
,
98 queueFamilyIndex
, device
->local_fd
, true, pSupported
);
101 VkResult
radv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
102 VkPhysicalDevice physicalDevice
,
103 VkSurfaceKHR _surface
,
104 VkSurfaceCapabilitiesKHR
* pSurfaceCapabilities
)
106 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
107 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
108 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
110 return iface
->get_capabilities(surface
, pSurfaceCapabilities
);
113 VkResult
radv_GetPhysicalDeviceSurfaceFormatsKHR(
114 VkPhysicalDevice physicalDevice
,
115 VkSurfaceKHR _surface
,
116 uint32_t* pSurfaceFormatCount
,
117 VkSurfaceFormatKHR
* pSurfaceFormats
)
119 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
120 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
121 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
123 return iface
->get_formats(surface
, &device
->wsi_device
, pSurfaceFormatCount
,
127 VkResult
radv_GetPhysicalDeviceSurfacePresentModesKHR(
128 VkPhysicalDevice physicalDevice
,
129 VkSurfaceKHR _surface
,
130 uint32_t* pPresentModeCount
,
131 VkPresentModeKHR
* pPresentModes
)
133 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
134 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, _surface
);
135 struct wsi_interface
*iface
= device
->wsi_device
.wsi
[surface
->platform
];
137 return iface
->get_present_modes(surface
, pPresentModeCount
,
142 radv_wsi_image_create(VkDevice device_h
,
143 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
144 const VkAllocationCallbacks
* pAllocator
,
145 bool needs_linear_copy
,
148 VkDeviceMemory
*memory_p
,
151 uint32_t *row_pitch
, int *fd_p
)
153 VkResult result
= VK_SUCCESS
;
154 struct radeon_surf
*surface
;
156 struct radv_image
*image
;
158 RADV_FROM_HANDLE(radv_device
, device
, device_h
);
160 result
= radv_image_create(device_h
,
161 &(struct radv_image_create_info
) {
163 &(VkImageCreateInfo
) {
164 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
165 .imageType
= VK_IMAGE_TYPE_2D
,
166 .format
= pCreateInfo
->imageFormat
,
168 .width
= pCreateInfo
->imageExtent
.width
,
169 .height
= pCreateInfo
->imageExtent
.height
,
175 /* FIXME: Need a way to use X tiling to allow scanout */
176 .tiling
= linear
? VK_IMAGE_TILING_LINEAR
: VK_IMAGE_TILING_OPTIMAL
,
177 .usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
183 if (result
!= VK_SUCCESS
)
186 image
= radv_image_from_handle(image_h
);
188 VkDeviceMemory memory_h
;
190 const VkMemoryDedicatedAllocateInfoKHR ded_alloc
= {
191 .sType
= VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR
,
193 .buffer
= VK_NULL_HANDLE
,
197 /* Find the first VRAM memory type, or GART for PRIME images. */
198 int memory_type_index
= -1;
199 for (int i
= 0; i
< device
->physical_device
->memory_properties
.memoryTypeCount
; ++i
) {
200 bool is_local
= !!(device
->physical_device
->memory_properties
.memoryTypes
[i
].propertyFlags
& VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
);
201 if ((linear
&& !is_local
) || (!linear
&& is_local
)) {
202 memory_type_index
= i
;
208 if (memory_type_index
== -1)
209 memory_type_index
= 0;
211 result
= radv_alloc_memory(device_h
,
212 &(VkMemoryAllocateInfo
) {
213 .sType
= VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
,
215 .allocationSize
= image
->size
,
216 .memoryTypeIndex
= memory_type_index
,
218 NULL
/* XXX: pAllocator */,
219 RADV_MEM_IMPLICIT_SYNC
,
221 if (result
!= VK_SUCCESS
)
222 goto fail_create_image
;
224 radv_BindImageMemory(device_h
, image_h
, memory_h
, 0);
227 * return the fd for the image in the no copy mode,
228 * or the fd for the linear image if a copy is required.
230 if (!needs_linear_copy
|| (needs_linear_copy
&& linear
)) {
231 RADV_FROM_HANDLE(radv_device_memory
, memory
, memory_h
);
232 if (!radv_get_memory_fd(device
, memory
, &fd
))
233 goto fail_alloc_memory
;
237 surface
= &image
->surface
;
240 *memory_p
= memory_h
;
242 *offset
= image
->offset
;
244 if (device
->physical_device
->rad_info
.chip_class
>= GFX9
)
245 *row_pitch
= surface
->u
.gfx9
.surf_pitch
* surface
->bpe
;
247 *row_pitch
= surface
->u
.legacy
.level
[0].nblk_x
* surface
->bpe
;
250 radv_FreeMemory(device_h
, memory_h
, pAllocator
);
253 radv_DestroyImage(device_h
, image_h
, pAllocator
);
259 radv_wsi_image_free(VkDevice device
,
260 const VkAllocationCallbacks
* pAllocator
,
262 VkDeviceMemory memory_h
)
264 radv_DestroyImage(device
, image_h
, pAllocator
);
266 radv_FreeMemory(device
, memory_h
, pAllocator
);
269 static const struct wsi_image_fns radv_wsi_image_fns
= {
270 .create_wsi_image
= radv_wsi_image_create
,
271 .free_wsi_image
= radv_wsi_image_free
,
274 #define NUM_PRIME_POOLS RADV_QUEUE_TRANSFER
276 radv_wsi_free_prime_command_buffers(struct radv_device
*device
,
277 struct wsi_swapchain
*swapchain
)
279 const int num_pools
= NUM_PRIME_POOLS
;
280 const int num_images
= swapchain
->image_count
;
282 for (i
= 0; i
< num_pools
; i
++) {
283 radv_FreeCommandBuffers(radv_device_to_handle(device
),
284 swapchain
->cmd_pools
[i
],
285 swapchain
->image_count
,
286 &swapchain
->cmd_buffers
[i
* num_images
]);
288 radv_DestroyCommandPool(radv_device_to_handle(device
),
289 swapchain
->cmd_pools
[i
],
295 radv_wsi_create_prime_command_buffers(struct radv_device
*device
,
296 const VkAllocationCallbacks
*alloc
,
297 struct wsi_swapchain
*swapchain
)
299 const int num_pools
= NUM_PRIME_POOLS
;
300 const int num_images
= swapchain
->image_count
;
301 int num_cmd_buffers
= num_images
* num_pools
; //TODO bump to MAX_QUEUE_FAMILIES
305 swapchain
->cmd_buffers
= vk_alloc(alloc
, (sizeof(VkCommandBuffer
) * num_cmd_buffers
), 8,
306 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE
);
307 if (!swapchain
->cmd_buffers
)
308 return VK_ERROR_OUT_OF_HOST_MEMORY
;
310 memset(swapchain
->cmd_buffers
, 0, sizeof(VkCommandBuffer
) * num_cmd_buffers
);
311 memset(swapchain
->cmd_pools
, 0, sizeof(VkCommandPool
) * num_pools
);
312 for (i
= 0; i
< num_pools
; i
++) {
313 VkCommandPoolCreateInfo pool_create_info
;
315 pool_create_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO
;
316 pool_create_info
.pNext
= NULL
;
317 pool_create_info
.flags
= 0;
318 pool_create_info
.queueFamilyIndex
= i
;
320 result
= radv_CreateCommandPool(radv_device_to_handle(device
),
321 &pool_create_info
, alloc
,
322 &swapchain
->cmd_pools
[i
]);
323 if (result
!= VK_SUCCESS
)
326 VkCommandBufferAllocateInfo cmd_buffer_info
;
327 cmd_buffer_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
;
328 cmd_buffer_info
.pNext
= NULL
;
329 cmd_buffer_info
.commandPool
= swapchain
->cmd_pools
[i
];
330 cmd_buffer_info
.level
= VK_COMMAND_BUFFER_LEVEL_PRIMARY
;
331 cmd_buffer_info
.commandBufferCount
= num_images
;
333 result
= radv_AllocateCommandBuffers(radv_device_to_handle(device
),
335 &swapchain
->cmd_buffers
[i
* num_images
]);
336 if (result
!= VK_SUCCESS
)
338 for (j
= 0; j
< num_images
; j
++) {
339 VkImage image
, linear_image
;
340 int idx
= (i
* num_images
) + j
;
342 swapchain
->get_image_and_linear(swapchain
, j
, &image
, &linear_image
);
343 VkCommandBufferBeginInfo begin_info
= {0};
345 begin_info
.sType
= VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
;
347 radv_BeginCommandBuffer(swapchain
->cmd_buffers
[idx
], &begin_info
);
349 radv_blit_to_prime_linear(radv_cmd_buffer_from_handle(swapchain
->cmd_buffers
[idx
]),
350 radv_image_from_handle(image
),
351 radv_image_from_handle(linear_image
));
353 radv_EndCommandBuffer(swapchain
->cmd_buffers
[idx
]);
358 radv_wsi_free_prime_command_buffers(device
, swapchain
);
362 VkResult
radv_CreateSwapchainKHR(
364 const VkSwapchainCreateInfoKHR
* pCreateInfo
,
365 const VkAllocationCallbacks
* pAllocator
,
366 VkSwapchainKHR
* pSwapchain
)
368 RADV_FROM_HANDLE(radv_device
, device
, _device
);
369 ICD_FROM_HANDLE(VkIcdSurfaceBase
, surface
, pCreateInfo
->surface
);
370 struct wsi_interface
*iface
=
371 device
->physical_device
->wsi_device
.wsi
[surface
->platform
];
372 struct wsi_swapchain
*swapchain
;
373 const VkAllocationCallbacks
*alloc
;
377 alloc
= &device
->alloc
;
378 VkResult result
= iface
->create_swapchain(surface
, _device
,
379 &device
->physical_device
->wsi_device
,
380 device
->physical_device
->local_fd
,
382 alloc
, &radv_wsi_image_fns
,
384 if (result
!= VK_SUCCESS
)
388 swapchain
->alloc
= *pAllocator
;
390 swapchain
->alloc
= device
->alloc
;
392 for (unsigned i
= 0; i
< ARRAY_SIZE(swapchain
->fences
); i
++)
393 swapchain
->fences
[i
] = VK_NULL_HANDLE
;
395 if (swapchain
->needs_linear_copy
) {
396 result
= radv_wsi_create_prime_command_buffers(device
, alloc
,
398 if (result
!= VK_SUCCESS
)
402 *pSwapchain
= wsi_swapchain_to_handle(swapchain
);
407 void radv_DestroySwapchainKHR(
409 VkSwapchainKHR _swapchain
,
410 const VkAllocationCallbacks
* pAllocator
)
412 RADV_FROM_HANDLE(radv_device
, device
, _device
);
413 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
414 const VkAllocationCallbacks
*alloc
;
422 alloc
= &device
->alloc
;
424 for (unsigned i
= 0; i
< ARRAY_SIZE(swapchain
->fences
); i
++) {
425 if (swapchain
->fences
[i
] != VK_NULL_HANDLE
)
426 radv_DestroyFence(_device
, swapchain
->fences
[i
], pAllocator
);
429 if (swapchain
->needs_linear_copy
)
430 radv_wsi_free_prime_command_buffers(device
, swapchain
);
432 swapchain
->destroy(swapchain
, alloc
);
435 VkResult
radv_GetSwapchainImagesKHR(
437 VkSwapchainKHR _swapchain
,
438 uint32_t* pSwapchainImageCount
,
439 VkImage
* pSwapchainImages
)
441 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
443 return swapchain
->get_images(swapchain
, pSwapchainImageCount
,
447 VkResult
radv_AcquireNextImageKHR(
449 VkSwapchainKHR _swapchain
,
451 VkSemaphore semaphore
,
453 uint32_t* pImageIndex
)
455 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, _swapchain
);
456 RADV_FROM_HANDLE(radv_fence
, fence
, _fence
);
458 VkResult result
= swapchain
->acquire_next_image(swapchain
, timeout
, semaphore
,
461 if (fence
&& (result
== VK_SUCCESS
|| result
== VK_SUBOPTIMAL_KHR
)) {
462 fence
->submitted
= true;
463 fence
->signalled
= true;
468 VkResult
radv_QueuePresentKHR(
470 const VkPresentInfoKHR
* pPresentInfo
)
472 RADV_FROM_HANDLE(radv_queue
, queue
, _queue
);
473 VkResult result
= VK_SUCCESS
;
474 const VkPresentRegionsKHR
*regions
=
475 vk_find_struct_const(pPresentInfo
->pNext
, PRESENT_REGIONS_KHR
);
477 for (uint32_t i
= 0; i
< pPresentInfo
->swapchainCount
; i
++) {
478 RADV_FROM_HANDLE(wsi_swapchain
, swapchain
, pPresentInfo
->pSwapchains
[i
]);
479 struct radeon_winsys_cs
*cs
;
480 const VkPresentRegionKHR
*region
= NULL
;
481 VkResult item_result
;
482 struct radv_winsys_sem_info sem_info
;
484 item_result
= radv_alloc_sem_info(&sem_info
,
485 pPresentInfo
->waitSemaphoreCount
,
486 pPresentInfo
->pWaitSemaphores
,
489 if (pPresentInfo
->pResults
!= NULL
)
490 pPresentInfo
->pResults
[i
] = item_result
;
491 result
= result
== VK_SUCCESS
? item_result
: result
;
492 if (item_result
!= VK_SUCCESS
) {
493 radv_free_sem_info(&sem_info
);
497 assert(radv_device_from_handle(swapchain
->device
) == queue
->device
);
498 if (swapchain
->fences
[0] == VK_NULL_HANDLE
) {
499 item_result
= radv_CreateFence(radv_device_to_handle(queue
->device
),
500 &(VkFenceCreateInfo
) {
501 .sType
= VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
,
503 }, &swapchain
->alloc
, &swapchain
->fences
[0]);
504 if (pPresentInfo
->pResults
!= NULL
)
505 pPresentInfo
->pResults
[i
] = item_result
;
506 result
= result
== VK_SUCCESS
? item_result
: result
;
507 if (item_result
!= VK_SUCCESS
) {
508 radv_free_sem_info(&sem_info
);
512 radv_ResetFences(radv_device_to_handle(queue
->device
),
513 1, &swapchain
->fences
[0]);
516 if (swapchain
->needs_linear_copy
) {
517 int idx
= (queue
->queue_family_index
* swapchain
->image_count
) + pPresentInfo
->pImageIndices
[i
];
518 cs
= radv_cmd_buffer_from_handle(swapchain
->cmd_buffers
[idx
])->cs
;
520 cs
= queue
->device
->empty_cs
[queue
->queue_family_index
];
521 RADV_FROM_HANDLE(radv_fence
, fence
, swapchain
->fences
[0]);
522 struct radeon_winsys_fence
*base_fence
= fence
->fence
;
523 struct radeon_winsys_ctx
*ctx
= queue
->hw_ctx
;
525 queue
->device
->ws
->cs_submit(ctx
, queue
->queue_idx
,
530 fence
->submitted
= true;
532 if (regions
&& regions
->pRegions
)
533 region
= ®ions
->pRegions
[i
];
535 item_result
= swapchain
->queue_present(swapchain
,
536 pPresentInfo
->pImageIndices
[i
],
538 /* TODO: What if one of them returns OUT_OF_DATE? */
539 if (pPresentInfo
->pResults
!= NULL
)
540 pPresentInfo
->pResults
[i
] = item_result
;
541 result
= result
== VK_SUCCESS
? item_result
: result
;
542 if (item_result
!= VK_SUCCESS
) {
543 radv_free_sem_info(&sem_info
);
547 VkFence last
= swapchain
->fences
[2];
548 swapchain
->fences
[2] = swapchain
->fences
[1];
549 swapchain
->fences
[1] = swapchain
->fences
[0];
550 swapchain
->fences
[0] = last
;
552 if (last
!= VK_NULL_HANDLE
) {
553 radv_WaitForFences(radv_device_to_handle(queue
->device
),
557 radv_free_sem_info(&sem_info
);