2 * Copyright © 2017, Google Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <hardware/gralloc.h>
25 #include <hardware/hardware.h>
26 #include <hardware/hwvulkan.h>
27 #include <vulkan/vk_android_native_buffer.h>
28 #include <vulkan/vk_icd.h>
29 #include <sync/sync.h>
31 #include "anv_private.h"
33 static int anv_hal_open(const struct hw_module_t
* mod
, const char* id
, struct hw_device_t
** dev
);
34 static int anv_hal_close(struct hw_device_t
*dev
);
39 STATIC_ASSERT(HWVULKAN_DISPATCH_MAGIC
== ICD_LOADER_MAGIC
);
42 PUBLIC
struct hwvulkan_module_t HAL_MODULE_INFO_SYM
= {
44 .tag
= HARDWARE_MODULE_TAG
,
45 .module_api_version
= HWVULKAN_MODULE_API_VERSION_0_1
,
46 .hal_api_version
= HARDWARE_MAKE_API_VERSION(1, 0),
47 .id
= HWVULKAN_HARDWARE_MODULE_ID
,
48 .name
= "Intel Vulkan HAL",
50 .methods
= &(hw_module_methods_t
) {
56 /* If any bits in test_mask are set, then unset them and return true. */
58 unmask32(uint32_t *inout_mask
, uint32_t test_mask
)
60 uint32_t orig_mask
= *inout_mask
;
61 *inout_mask
&= ~test_mask
;
62 return *inout_mask
!= orig_mask
;
66 anv_hal_open(const struct hw_module_t
* mod
, const char* id
,
67 struct hw_device_t
** dev
)
69 assert(mod
== &HAL_MODULE_INFO_SYM
.common
);
70 assert(strcmp(id
, HWVULKAN_DEVICE_0
) == 0);
72 hwvulkan_device_t
*hal_dev
= malloc(sizeof(*hal_dev
));
76 *hal_dev
= (hwvulkan_device_t
) {
78 .tag
= HARDWARE_DEVICE_TAG
,
79 .version
= HWVULKAN_DEVICE_API_VERSION_0_1
,
80 .module
= &HAL_MODULE_INFO_SYM
.common
,
81 .close
= anv_hal_close
,
83 .EnumerateInstanceExtensionProperties
= anv_EnumerateInstanceExtensionProperties
,
84 .CreateInstance
= anv_CreateInstance
,
85 .GetInstanceProcAddr
= anv_GetInstanceProcAddr
,
88 *dev
= &hal_dev
->common
;
93 anv_hal_close(struct hw_device_t
*dev
)
95 /* hwvulkan.h claims that hw_device_t::close() is never called. */
100 anv_image_from_gralloc(VkDevice device_h
,
101 const VkImageCreateInfo
*base_info
,
102 const VkNativeBufferANDROID
*gralloc_info
,
103 const VkAllocationCallbacks
*alloc
,
104 VkImage
*out_image_h
)
107 ANV_FROM_HANDLE(anv_device
, device
, device_h
);
108 VkImage image_h
= VK_NULL_HANDLE
;
109 struct anv_image
*image
= NULL
;
110 struct anv_bo
*bo
= NULL
;
113 struct anv_image_create_info anv_info
= {
114 .vk_info
= base_info
,
115 .isl_extra_usage_flags
= ISL_SURF_USAGE_DISABLE_AUX_BIT
,
118 if (gralloc_info
->handle
->numFds
!= 1) {
119 return vk_errorf(device
->instance
, device
,
120 VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
,
121 "VkNativeBufferANDROID::handle::numFds is %d, "
122 "expected 1", gralloc_info
->handle
->numFds
);
125 /* Do not close the gralloc handle's dma_buf. The lifetime of the dma_buf
126 * must exceed that of the gralloc handle, and we do not own the gralloc
129 int dma_buf
= gralloc_info
->handle
->data
[0];
131 uint64_t bo_flags
= 0;
132 if (device
->instance
->physicalDevice
.supports_48bit_addresses
)
133 bo_flags
|= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
;
134 if (device
->instance
->physicalDevice
.use_softpin
)
135 bo_flags
|= EXEC_OBJECT_PINNED
;
137 result
= anv_bo_cache_import(device
, &device
->bo_cache
, dma_buf
, bo_flags
, &bo
);
138 if (result
!= VK_SUCCESS
) {
139 return vk_errorf(device
->instance
, device
, result
,
140 "failed to import dma-buf from VkNativeBufferANDROID");
143 int i915_tiling
= anv_gem_get_tiling(device
, bo
->gem_handle
);
144 switch (i915_tiling
) {
145 case I915_TILING_NONE
:
146 anv_info
.isl_tiling_flags
= ISL_TILING_LINEAR_BIT
;
149 anv_info
.isl_tiling_flags
= ISL_TILING_X_BIT
;
152 anv_info
.isl_tiling_flags
= ISL_TILING_Y0_BIT
;
155 result
= vk_errorf(device
->instance
, device
,
156 VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
,
157 "DRM_IOCTL_I915_GEM_GET_TILING failed for "
158 "VkNativeBufferANDROID");
161 result
= vk_errorf(device
->instance
, device
,
162 VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
,
163 "DRM_IOCTL_I915_GEM_GET_TILING returned unknown "
164 "tiling %d for VkNativeBufferANDROID", i915_tiling
);
168 enum isl_format format
= anv_get_isl_format(&device
->info
,
170 VK_IMAGE_ASPECT_COLOR_BIT
,
172 assert(format
!= ISL_FORMAT_UNSUPPORTED
);
174 anv_info
.stride
= gralloc_info
->stride
*
175 (isl_format_get_layout(format
)->bpb
/ 8);
177 result
= anv_image_create(device_h
, &anv_info
, alloc
, &image_h
);
178 image
= anv_image_from_handle(image_h
);
179 if (result
!= VK_SUCCESS
)
182 if (bo
->size
< image
->size
) {
183 result
= vk_errorf(device
->instance
, device
,
184 VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR
,
185 "dma-buf from VkNativeBufferANDROID is too small for "
186 "VkImage: %"PRIu64
"B < %"PRIu64
"B",
187 bo
->size
, image
->size
);
191 assert(image
->n_planes
== 1);
192 assert(image
->planes
[0].address
.offset
== 0);
194 image
->planes
[0].address
.bo
= bo
;
195 image
->planes
[0].bo_is_owned
= true;
197 /* We need to set the WRITE flag on window system buffers so that GEM will
198 * know we're writing to them and synchronize uses on other rings (for
199 * example, if the display server uses the blitter ring).
201 * If this function fails and if the imported bo was resident in the cache,
202 * we should avoid updating the bo's flags. Therefore, we defer updating
203 * the flags until success is certain.
206 bo
->flags
&= ~EXEC_OBJECT_ASYNC
;
207 bo
->flags
|= EXEC_OBJECT_WRITE
;
209 /* Don't clobber the out-parameter until success is certain. */
210 *out_image_h
= image_h
;
215 anv_DestroyImage(device_h
, image_h
, alloc
);
218 anv_bo_cache_release(device
, &device
->bo_cache
, bo
);
223 VkResult
anv_GetSwapchainGrallocUsageANDROID(
226 VkImageUsageFlags imageUsage
,
229 ANV_FROM_HANDLE(anv_device
, device
, device_h
);
230 struct anv_physical_device
*phys_dev
= &device
->instance
->physicalDevice
;
231 VkPhysicalDevice phys_dev_h
= anv_physical_device_to_handle(phys_dev
);
235 intel_logd("%s: format=%d, usage=0x%x", __func__
, format
, imageUsage
);
237 /* WARNING: Android Nougat's libvulkan.so hardcodes the VkImageUsageFlags
238 * returned to applications via VkSurfaceCapabilitiesKHR::supportedUsageFlags.
239 * The relevant code in libvulkan/swapchain.cpp contains this fun comment:
241 * TODO(jessehall): I think these are right, but haven't thought hard
242 * about it. Do we need to query the driver for support of any of
245 * Any disagreement between this function and the hardcoded
246 * VkSurfaceCapabilitiesKHR:supportedUsageFlags causes tests
247 * dEQP-VK.wsi.android.swapchain.*.image_usage to fail.
250 const VkPhysicalDeviceImageFormatInfo2KHR image_format_info
= {
251 .sType
= VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR
,
253 .type
= VK_IMAGE_TYPE_2D
,
254 .tiling
= VK_IMAGE_TILING_OPTIMAL
,
258 VkImageFormatProperties2KHR image_format_props
= {
259 .sType
= VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR
,
262 /* Check that requested format and usage are supported. */
263 result
= anv_GetPhysicalDeviceImageFormatProperties2(phys_dev_h
,
264 &image_format_info
, &image_format_props
);
265 if (result
!= VK_SUCCESS
) {
266 return vk_errorf(device
->instance
, device
, result
,
267 "anv_GetPhysicalDeviceImageFormatProperties2 failed "
268 "inside %s", __func__
);
271 /* Reject STORAGE here to avoid complexity elsewhere. */
272 if (imageUsage
& VK_IMAGE_USAGE_STORAGE_BIT
) {
273 return vk_errorf(device
->instance
, device
, VK_ERROR_FORMAT_NOT_SUPPORTED
,
274 "VK_IMAGE_USAGE_STORAGE_BIT unsupported for gralloc "
278 if (unmask32(&imageUsage
, VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
279 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
))
280 *grallocUsage
|= GRALLOC_USAGE_HW_RENDER
;
282 if (unmask32(&imageUsage
, VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
283 VK_IMAGE_USAGE_SAMPLED_BIT
|
284 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
))
285 *grallocUsage
|= GRALLOC_USAGE_HW_TEXTURE
;
287 /* All VkImageUsageFlags not explicitly checked here are unsupported for
288 * gralloc swapchains.
290 if (imageUsage
!= 0) {
291 return vk_errorf(device
->instance
, device
, VK_ERROR_FORMAT_NOT_SUPPORTED
,
292 "unsupported VkImageUsageFlags(0x%x) for gralloc "
293 "swapchain", imageUsage
);
296 /* The below formats support GRALLOC_USAGE_HW_FB (that is, display
297 * scanout). This short list of formats is univserally supported on Intel
298 * but is incomplete. The full set of supported formats is dependent on
299 * kernel and hardware.
301 * FINISHME: Advertise all display-supported formats.
304 case VK_FORMAT_B8G8R8A8_UNORM
:
305 case VK_FORMAT_B5G6R5_UNORM_PACK16
:
306 case VK_FORMAT_R8G8B8A8_UNORM
:
307 case VK_FORMAT_R8G8B8A8_SRGB
:
308 *grallocUsage
|= GRALLOC_USAGE_HW_FB
|
309 GRALLOC_USAGE_HW_COMPOSER
|
310 GRALLOC_USAGE_EXTERNAL_DISP
;
313 intel_logw("%s: unsupported format=%d", __func__
, format
);
316 if (*grallocUsage
== 0)
317 return VK_ERROR_FORMAT_NOT_SUPPORTED
;
323 anv_AcquireImageANDROID(
327 VkSemaphore semaphore_h
,
330 ANV_FROM_HANDLE(anv_device
, device
, device_h
);
331 VkResult result
= VK_SUCCESS
;
333 if (nativeFenceFd
!= -1) {
334 /* As a simple, firstpass implementation of VK_ANDROID_native_buffer, we
335 * block on the nativeFenceFd. This may introduce latency and is
336 * definitiely inefficient, yet it's correct.
338 * FINISHME(chadv): Import the nativeFenceFd into the VkSemaphore and
341 if (sync_wait(nativeFenceFd
, /*timeout*/ -1) < 0) {
342 result
= vk_errorf(device
->instance
, device
, VK_ERROR_DEVICE_LOST
,
343 "%s: failed to wait on nativeFenceFd=%d",
344 __func__
, nativeFenceFd
);
347 /* From VK_ANDROID_native_buffer's pseudo spec
348 * (https://source.android.com/devices/graphics/implement-vulkan):
350 * The driver takes ownership of the fence fd and is responsible for
351 * closing it [...] even if vkAcquireImageANDROID fails and returns
354 close(nativeFenceFd
);
356 if (result
!= VK_SUCCESS
)
360 if (semaphore_h
|| fence_h
) {
361 /* Thanks to implicit sync, the image is ready for GPU access. But we
362 * must still put the semaphore into the "submit" state; otherwise the
363 * client may get unexpected behavior if the client later uses it as
366 * Because we blocked above on the nativeFenceFd, the image is also
367 * ready for foreign-device access (including CPU access). But we must
368 * still signal the fence; otherwise the client may get unexpected
369 * behavior if the client later waits on it.
371 * For some values of anv_semaphore_type, we must submit the semaphore
372 * to execbuf in order to signal it. Likewise for anv_fence_type.
373 * Instead of open-coding here the signal operation for each
374 * anv_semaphore_type and anv_fence_type, we piggy-back on
377 const VkSubmitInfo submit
= {
378 .sType
= VK_STRUCTURE_TYPE_SUBMIT_INFO
,
379 .waitSemaphoreCount
= 0,
380 .commandBufferCount
= 0,
381 .signalSemaphoreCount
= (semaphore_h
? 1 : 0),
382 .pSignalSemaphores
= &semaphore_h
,
385 result
= anv_QueueSubmit(anv_queue_to_handle(&device
->queue
), 1,
387 if (result
!= VK_SUCCESS
) {
388 return vk_errorf(device
->instance
, device
, result
,
389 "anv_QueueSubmit failed inside %s", __func__
);
397 anv_QueueSignalReleaseImageANDROID(
399 uint32_t waitSemaphoreCount
,
400 const VkSemaphore
* pWaitSemaphores
,
406 if (waitSemaphoreCount
== 0)
409 result
= anv_QueueSubmit(queue
, 1,
411 .sType
= VK_STRUCTURE_TYPE_SUBMIT_INFO
,
412 .waitSemaphoreCount
= 1,
413 .pWaitSemaphores
= pWaitSemaphores
,
415 (VkFence
) VK_NULL_HANDLE
);
416 if (result
!= VK_SUCCESS
)
420 if (pNativeFenceFd
) {
421 /* We can rely implicit on sync because above we submitted all
422 * semaphores to the queue.
424 *pNativeFenceFd
= -1;