2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
34 struct anv_dispatch_table dtable
;
37 compiler_debug_log(void *data
, const char *fmt
, ...)
41 compiler_perf_log(void *data
, const char *fmt
, ...)
46 if (unlikely(INTEL_DEBUG
& DEBUG_PERF
))
47 vfprintf(stderr
, fmt
, args
);
53 anv_physical_device_init(struct anv_physical_device
*device
,
54 struct anv_instance
*instance
,
60 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
62 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
63 "failed to open %s: %m", path
);
65 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
66 device
->instance
= instance
;
69 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
70 if (!device
->chipset_id
) {
71 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
72 "failed to get chipset id: %m");
76 device
->name
= brw_get_device_name(device
->chipset_id
);
77 device
->info
= brw_get_device_info(device
->chipset_id
);
79 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
80 "failed to get device info");
84 if (device
->info
->gen
== 7 &&
85 !device
->info
->is_haswell
&& !device
->info
->is_baytrail
) {
86 fprintf(stderr
, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
87 } else if (device
->info
->gen
== 8 && !device
->info
->is_cherryview
) {
88 /* Briadwell is as fully supported as anything */
90 result
= vk_errorf(VK_UNSUPPORTED
,
91 "Vulkan not yet supported on %s", device
->name
);
95 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1) {
96 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
97 "failed to get aperture size: %m");
101 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
)) {
102 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
103 "kernel missing gem wait");
107 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
)) {
108 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
109 "kernel missing execbuf2");
113 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
)) {
114 result
= vk_errorf(VK_ERROR_INITIALIZATION_FAILED
,
121 brw_process_intel_debug_variable();
123 device
->compiler
= brw_compiler_create(NULL
, device
->info
);
124 if (device
->compiler
== NULL
) {
125 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
128 device
->compiler
->shader_debug_log
= compiler_debug_log
;
129 device
->compiler
->shader_perf_log
= compiler_perf_log
;
131 isl_device_init(&device
->isl_dev
, device
->info
);
141 anv_physical_device_finish(struct anv_physical_device
*device
)
143 ralloc_free(device
->compiler
);
146 static void *default_alloc(
150 VkSystemAllocType allocType
)
155 static void default_free(
162 static const VkAllocCallbacks default_alloc_callbacks
= {
164 .pfnAlloc
= default_alloc
,
165 .pfnFree
= default_free
168 static const VkExtensionProperties global_extensions
[] = {
170 .extName
= VK_EXT_KHR_SWAPCHAIN_EXTENSION_NAME
,
175 static const VkExtensionProperties device_extensions
[] = {
177 .extName
= VK_EXT_KHR_DEVICE_SWAPCHAIN_EXTENSION_NAME
,
182 VkResult
anv_CreateInstance(
183 const VkInstanceCreateInfo
* pCreateInfo
,
184 VkInstance
* pInstance
)
186 struct anv_instance
*instance
;
187 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
188 void *user_data
= NULL
;
190 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
192 if (pCreateInfo
->pAppInfo
->apiVersion
!= VK_MAKE_VERSION(0, 170, 2))
193 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER
);
195 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
197 for (uint32_t j
= 0; j
< ARRAY_SIZE(global_extensions
); j
++) {
198 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
199 global_extensions
[j
].extName
) == 0) {
205 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT
);
208 if (pCreateInfo
->pAllocCb
) {
209 alloc_callbacks
= pCreateInfo
->pAllocCb
;
210 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
212 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
213 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
215 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
217 instance
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
218 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
219 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
220 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
221 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
222 instance
->physicalDeviceCount
= -1;
226 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
228 anv_init_wsi(instance
);
230 *pInstance
= anv_instance_to_handle(instance
);
235 void anv_DestroyInstance(
236 VkInstance _instance
)
238 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
240 if (instance
->physicalDeviceCount
> 0) {
241 /* We support at most one physical device. */
242 assert(instance
->physicalDeviceCount
== 1);
243 anv_physical_device_finish(&instance
->physicalDevice
);
246 anv_finish_wsi(instance
);
248 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
252 instance
->pfnFree(instance
->pAllocUserData
, instance
);
256 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
257 size_t alignment
, VkSystemAllocType allocType
)
259 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
260 size
, alignment
, allocType
);
262 VG(VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
));
263 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
));
269 anv_instance_free(struct anv_instance
*instance
, void *mem
)
274 VG(VALGRIND_MEMPOOL_FREE(instance
, mem
));
276 instance
->pfnFree(instance
->pAllocUserData
, mem
);
279 VkResult
anv_EnumeratePhysicalDevices(
280 VkInstance _instance
,
281 uint32_t* pPhysicalDeviceCount
,
282 VkPhysicalDevice
* pPhysicalDevices
)
284 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
287 if (instance
->physicalDeviceCount
< 0) {
288 result
= anv_physical_device_init(&instance
->physicalDevice
,
289 instance
, "/dev/dri/renderD128");
290 if (result
== VK_UNSUPPORTED
) {
291 instance
->physicalDeviceCount
= 0;
292 } else if (result
== VK_SUCCESS
) {
293 instance
->physicalDeviceCount
= 1;
299 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
300 * otherwise it's an inout parameter.
302 * The Vulkan spec (git aaed022) says:
304 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
305 * that is initialized with the number of devices the application is
306 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
307 * an array of at least this many VkPhysicalDevice handles [...].
309 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
310 * overwrites the contents of the variable pointed to by
311 * pPhysicalDeviceCount with the number of physical devices in in the
312 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
313 * pPhysicalDeviceCount with the number of physical handles written to
316 if (!pPhysicalDevices
) {
317 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
318 } else if (*pPhysicalDeviceCount
>= 1) {
319 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
320 *pPhysicalDeviceCount
= 1;
322 *pPhysicalDeviceCount
= 0;
328 VkResult
anv_GetPhysicalDeviceFeatures(
329 VkPhysicalDevice physicalDevice
,
330 VkPhysicalDeviceFeatures
* pFeatures
)
332 anv_finishme("Get correct values for PhysicalDeviceFeatures");
334 *pFeatures
= (VkPhysicalDeviceFeatures
) {
335 .robustBufferAccess
= false,
336 .fullDrawIndexUint32
= false,
337 .imageCubeArray
= false,
338 .independentBlend
= false,
339 .geometryShader
= true,
340 .tessellationShader
= false,
341 .sampleRateShading
= false,
342 .dualSourceBlend
= true,
344 .multiDrawIndirect
= true,
346 .depthBiasClamp
= false,
347 .fillModeNonSolid
= true,
348 .depthBounds
= false,
351 .textureCompressionETC2
= true,
352 .textureCompressionASTC_LDR
= true,
353 .textureCompressionBC
= true,
354 .occlusionQueryNonConservative
= false, /* FINISHME */
355 .pipelineStatisticsQuery
= true,
356 .vertexSideEffects
= false,
357 .tessellationSideEffects
= false,
358 .geometrySideEffects
= false,
359 .fragmentSideEffects
= false,
360 .shaderTessellationPointSize
= false,
361 .shaderGeometryPointSize
= true,
362 .shaderImageGatherExtended
= true,
363 .shaderStorageImageExtendedFormats
= false,
364 .shaderStorageImageMultisample
= false,
365 .shaderUniformBufferArrayDynamicIndexing
= true,
366 .shaderSampledImageArrayDynamicIndexing
= false,
367 .shaderStorageBufferArrayDynamicIndexing
= false,
368 .shaderStorageImageArrayDynamicIndexing
= false,
369 .shaderClipDistance
= false,
370 .shaderCullDistance
= false,
371 .shaderFloat64
= false,
372 .shaderInt64
= false,
373 .shaderInt16
= false,
380 VkResult
anv_GetPhysicalDeviceProperties(
381 VkPhysicalDevice physicalDevice
,
382 VkPhysicalDeviceProperties
* pProperties
)
384 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
385 const struct brw_device_info
*devinfo
= pdevice
->info
;
387 anv_finishme("Get correct values for VkPhysicalDeviceLimits");
389 VkPhysicalDeviceLimits limits
= {
390 .maxImageDimension1D
= (1 << 14),
391 .maxImageDimension2D
= (1 << 14),
392 .maxImageDimension3D
= (1 << 10),
393 .maxImageDimensionCube
= (1 << 14),
394 .maxImageArrayLayers
= (1 << 10),
396 /* Broadwell supports 1, 2, 4, and 8 samples. */
399 .maxTexelBufferSize
= (1 << 14),
400 .maxUniformBufferSize
= UINT32_MAX
,
401 .maxStorageBufferSize
= UINT32_MAX
,
402 .maxPushConstantsSize
= MAX_PUSH_CONSTANTS_SIZE
,
403 .maxMemoryAllocationCount
= UINT32_MAX
,
404 .bufferImageGranularity
= 64, /* A cache line */
405 .sparseAddressSpaceSize
= 0,
406 .maxBoundDescriptorSets
= MAX_SETS
,
407 .maxDescriptorSets
= UINT32_MAX
,
408 .maxPerStageDescriptorSamplers
= 64,
409 .maxPerStageDescriptorUniformBuffers
= 64,
410 .maxPerStageDescriptorStorageBuffers
= 64,
411 .maxPerStageDescriptorSampledImages
= 64,
412 .maxPerStageDescriptorStorageImages
= 64,
413 .maxDescriptorSetSamplers
= 256,
414 .maxDescriptorSetUniformBuffers
= 256,
415 .maxDescriptorSetUniformBuffersDynamic
= 256,
416 .maxDescriptorSetStorageBuffers
= 256,
417 .maxDescriptorSetStorageBuffersDynamic
= 256,
418 .maxDescriptorSetSampledImages
= 256,
419 .maxDescriptorSetStorageImages
= 256,
420 .maxVertexInputAttributes
= 32,
421 .maxVertexInputBindings
= 32,
422 .maxVertexInputAttributeOffset
= 256,
423 .maxVertexInputBindingStride
= 256,
424 .maxVertexOutputComponents
= 32,
425 .maxTessGenLevel
= 0,
426 .maxTessPatchSize
= 0,
427 .maxTessControlPerVertexInputComponents
= 0,
428 .maxTessControlPerVertexOutputComponents
= 0,
429 .maxTessControlPerPatchOutputComponents
= 0,
430 .maxTessControlTotalOutputComponents
= 0,
431 .maxTessEvaluationInputComponents
= 0,
432 .maxTessEvaluationOutputComponents
= 0,
433 .maxGeometryShaderInvocations
= 6,
434 .maxGeometryInputComponents
= 16,
435 .maxGeometryOutputComponents
= 16,
436 .maxGeometryOutputVertices
= 16,
437 .maxGeometryTotalOutputComponents
= 16,
438 .maxFragmentInputComponents
= 16,
439 .maxFragmentOutputBuffers
= 8,
440 .maxFragmentDualSourceBuffers
= 2,
441 .maxFragmentCombinedOutputResources
= 8,
442 .maxComputeSharedMemorySize
= 1024,
443 .maxComputeWorkGroupCount
= {
444 16 * devinfo
->max_cs_threads
,
445 16 * devinfo
->max_cs_threads
,
446 16 * devinfo
->max_cs_threads
,
448 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
449 .maxComputeWorkGroupSize
= {
450 16 * devinfo
->max_cs_threads
,
451 16 * devinfo
->max_cs_threads
,
452 16 * devinfo
->max_cs_threads
,
454 .subPixelPrecisionBits
= 4 /* FIXME */,
455 .subTexelPrecisionBits
= 4 /* FIXME */,
456 .mipmapPrecisionBits
= 4 /* FIXME */,
457 .maxDrawIndexedIndexValue
= UINT32_MAX
,
458 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
459 .primitiveRestartForPatches
= UINT32_MAX
,
460 .maxSamplerLodBias
= 16,
461 .maxSamplerAnisotropy
= 16,
462 .maxViewports
= MAX_VIEWPORTS
,
463 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
464 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
465 .viewportSubPixelBits
= 13, /* We take a float? */
466 .minMemoryMapAlignment
= 64, /* A cache line */
467 .minTexelBufferOffsetAlignment
= 1,
468 .minUniformBufferOffsetAlignment
= 1,
469 .minStorageBufferOffsetAlignment
= 1,
470 .minTexelOffset
= 0, /* FIXME */
471 .maxTexelOffset
= 0, /* FIXME */
472 .minTexelGatherOffset
= 0, /* FIXME */
473 .maxTexelGatherOffset
= 0, /* FIXME */
474 .minInterpolationOffset
= 0, /* FIXME */
475 .maxInterpolationOffset
= 0, /* FIXME */
476 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
477 .maxFramebufferWidth
= (1 << 14),
478 .maxFramebufferHeight
= (1 << 14),
479 .maxFramebufferLayers
= (1 << 10),
480 .maxFramebufferColorSamples
= 8,
481 .maxFramebufferDepthSamples
= 8,
482 .maxFramebufferStencilSamples
= 8,
483 .maxColorAttachments
= MAX_RTS
,
484 .maxSampledImageColorSamples
= 8,
485 .maxSampledImageDepthSamples
= 8,
486 .maxSampledImageIntegerSamples
= 1,
487 .maxStorageImageSamples
= 1,
488 .maxSampleMaskWords
= 1,
489 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
490 .maxClipDistances
= 0 /* FIXME */,
491 .maxCullDistances
= 0 /* FIXME */,
492 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
493 .pointSizeRange
= { 0.125, 255.875 },
494 .lineWidthRange
= { 0.0, 7.9921875 },
495 .pointSizeGranularity
= (1.0 / 8.0),
496 .lineWidthGranularity
= (1.0 / 128.0),
499 *pProperties
= (VkPhysicalDeviceProperties
) {
500 .apiVersion
= VK_MAKE_VERSION(0, 170, 2),
503 .deviceId
= pdevice
->chipset_id
,
504 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
506 .sparseProperties
= {0}, /* Broadwell doesn't do sparse. */
509 strcpy(pProperties
->deviceName
, pdevice
->name
);
510 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
511 "anv-%s", MESA_GIT_SHA1
+ 4);
516 VkResult
anv_GetPhysicalDeviceQueueFamilyProperties(
517 VkPhysicalDevice physicalDevice
,
519 VkQueueFamilyProperties
* pQueueFamilyProperties
)
521 if (pQueueFamilyProperties
== NULL
) {
526 assert(*pCount
>= 1);
528 *pQueueFamilyProperties
= (VkQueueFamilyProperties
) {
529 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
530 VK_QUEUE_COMPUTE_BIT
|
533 .supportsTimestamps
= true,
539 VkResult
anv_GetPhysicalDeviceMemoryProperties(
540 VkPhysicalDevice physicalDevice
,
541 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
543 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
544 VkDeviceSize heap_size
;
546 /* Reserve some wiggle room for the driver by exposing only 75% of the
547 * aperture to the heap.
549 heap_size
= 3 * physical_device
->aperture_size
/ 4;
551 /* The property flags below are valid only for llc platforms. */
552 pMemoryProperties
->memoryTypeCount
= 1;
553 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
554 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
558 pMemoryProperties
->memoryHeapCount
= 1;
559 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
561 .flags
= VK_MEMORY_HEAP_HOST_LOCAL_BIT
,
567 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
571 return anv_lookup_entrypoint(pName
);
574 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
578 return anv_lookup_entrypoint(pName
);
582 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
584 queue
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
585 queue
->device
= device
;
586 queue
->pool
= &device
->surface_state_pool
;
592 anv_queue_finish(struct anv_queue
*queue
)
597 anv_device_init_border_colors(struct anv_device
*device
)
599 static const VkClearColorValue border_colors
[] = {
600 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .float32
= { 0.0, 0.0, 0.0, 0.0 } },
601 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .float32
= { 0.0, 0.0, 0.0, 1.0 } },
602 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .float32
= { 1.0, 1.0, 1.0, 1.0 } },
603 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .uint32
= { 0, 0, 0, 0 } },
604 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .uint32
= { 0, 0, 0, 1 } },
605 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .uint32
= { 1, 1, 1, 1 } },
608 device
->border_colors
=
609 anv_state_pool_alloc(&device
->dynamic_state_pool
,
610 sizeof(border_colors
), 32);
611 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
614 VkResult
anv_CreateDevice(
615 VkPhysicalDevice physicalDevice
,
616 const VkDeviceCreateInfo
* pCreateInfo
,
619 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
620 struct anv_instance
*instance
= physical_device
->instance
;
621 struct anv_device
*device
;
623 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
625 for (uint32_t i
= 0; i
< pCreateInfo
->extensionCount
; i
++) {
627 for (uint32_t j
= 0; j
< ARRAY_SIZE(device_extensions
); j
++) {
628 if (strcmp(pCreateInfo
->ppEnabledExtensionNames
[i
],
629 device_extensions
[j
].extName
) == 0) {
635 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT
);
638 anv_set_dispatch_gen(physical_device
->info
->gen
);
640 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
641 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
643 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
645 device
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
646 device
->instance
= physical_device
->instance
;
648 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
649 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
650 if (device
->fd
== -1)
653 device
->context_id
= anv_gem_create_context(device
);
654 if (device
->context_id
== -1)
657 pthread_mutex_init(&device
->mutex
, NULL
);
659 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
661 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
663 anv_state_pool_init(&device
->dynamic_state_pool
,
664 &device
->dynamic_state_block_pool
);
666 anv_block_pool_init(&device
->instruction_block_pool
, device
, 4096);
667 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 4096);
669 anv_state_pool_init(&device
->surface_state_pool
,
670 &device
->surface_state_block_pool
);
672 anv_bo_init_new(&device
->workaround_bo
, device
, 1024);
674 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
676 device
->info
= *physical_device
->info
;
677 device
->isl_dev
= physical_device
->isl_dev
;
679 anv_queue_init(device
, &device
->queue
);
681 anv_device_init_meta(device
);
683 anv_device_init_border_colors(device
);
685 *pDevice
= anv_device_to_handle(device
);
692 anv_device_free(device
, device
);
694 return vk_error(VK_ERROR_INITIALIZATION_FAILED
);
697 void anv_DestroyDevice(
700 ANV_FROM_HANDLE(anv_device
, device
, _device
);
702 anv_queue_finish(&device
->queue
);
704 anv_device_finish_meta(device
);
707 /* We only need to free these to prevent valgrind errors. The backing
708 * BO will go away in a couple of lines so we don't actually leak.
710 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
713 anv_gem_munmap(device
->workaround_bo
.map
, device
->workaround_bo
.size
);
714 anv_gem_close(device
, device
->workaround_bo
.gem_handle
);
716 anv_bo_pool_finish(&device
->batch_bo_pool
);
717 anv_state_pool_finish(&device
->dynamic_state_pool
);
718 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
719 anv_block_pool_finish(&device
->instruction_block_pool
);
720 anv_state_pool_finish(&device
->surface_state_pool
);
721 anv_block_pool_finish(&device
->surface_state_block_pool
);
722 anv_block_pool_finish(&device
->scratch_block_pool
);
726 anv_instance_free(device
->instance
, device
);
729 VkResult
anv_EnumerateInstanceExtensionProperties(
730 const char* pLayerName
,
732 VkExtensionProperties
* pProperties
)
734 if (pProperties
== NULL
) {
735 *pCount
= ARRAY_SIZE(global_extensions
);
739 assert(*pCount
>= ARRAY_SIZE(global_extensions
));
741 *pCount
= ARRAY_SIZE(global_extensions
);
742 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
747 VkResult
anv_EnumerateDeviceExtensionProperties(
748 VkPhysicalDevice physicalDevice
,
749 const char* pLayerName
,
751 VkExtensionProperties
* pProperties
)
753 if (pProperties
== NULL
) {
754 *pCount
= ARRAY_SIZE(device_extensions
);
758 assert(*pCount
>= ARRAY_SIZE(device_extensions
));
760 *pCount
= ARRAY_SIZE(device_extensions
);
761 memcpy(pProperties
, device_extensions
, sizeof(device_extensions
));
766 VkResult
anv_EnumerateInstanceLayerProperties(
768 VkLayerProperties
* pProperties
)
770 if (pProperties
== NULL
) {
775 /* None supported at this time */
776 return vk_error(VK_ERROR_LAYER_NOT_PRESENT
);
779 VkResult
anv_EnumerateDeviceLayerProperties(
780 VkPhysicalDevice physicalDevice
,
782 VkLayerProperties
* pProperties
)
784 if (pProperties
== NULL
) {
789 /* None supported at this time */
790 return vk_error(VK_ERROR_LAYER_NOT_PRESENT
);
793 VkResult
anv_GetDeviceQueue(
795 uint32_t queueNodeIndex
,
799 ANV_FROM_HANDLE(anv_device
, device
, _device
);
801 assert(queueIndex
== 0);
803 *pQueue
= anv_queue_to_handle(&device
->queue
);
808 VkResult
anv_QueueSubmit(
810 uint32_t cmdBufferCount
,
811 const VkCmdBuffer
* pCmdBuffers
,
814 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
815 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
816 struct anv_device
*device
= queue
->device
;
819 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
820 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
822 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
824 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
826 /* We don't know the real error. */
827 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
,
828 "execbuf2 failed: %m");
832 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
834 /* We don't know the real error. */
835 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
,
836 "execbuf2 failed: %m");
840 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
841 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
847 VkResult
anv_QueueWaitIdle(
850 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
852 return ANV_CALL(DeviceWaitIdle
)(anv_device_to_handle(queue
->device
));
855 VkResult
anv_DeviceWaitIdle(
858 ANV_FROM_HANDLE(anv_device
, device
, _device
);
859 struct anv_state state
;
860 struct anv_batch batch
;
861 struct drm_i915_gem_execbuffer2 execbuf
;
862 struct drm_i915_gem_exec_object2 exec2_objects
[1];
863 struct anv_bo
*bo
= NULL
;
868 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
869 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
870 batch
.start
= batch
.next
= state
.map
;
871 batch
.end
= state
.map
+ 32;
872 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
873 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
875 exec2_objects
[0].handle
= bo
->gem_handle
;
876 exec2_objects
[0].relocation_count
= 0;
877 exec2_objects
[0].relocs_ptr
= 0;
878 exec2_objects
[0].alignment
= 0;
879 exec2_objects
[0].offset
= bo
->offset
;
880 exec2_objects
[0].flags
= 0;
881 exec2_objects
[0].rsvd1
= 0;
882 exec2_objects
[0].rsvd2
= 0;
884 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
885 execbuf
.buffer_count
= 1;
886 execbuf
.batch_start_offset
= state
.offset
;
887 execbuf
.batch_len
= batch
.next
- state
.map
;
888 execbuf
.cliprects_ptr
= 0;
889 execbuf
.num_cliprects
= 0;
894 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
895 execbuf
.rsvd1
= device
->context_id
;
898 ret
= anv_gem_execbuffer(device
, &execbuf
);
900 /* We don't know the real error. */
901 result
= vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
, "execbuf2 failed: %m");
906 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
908 /* We don't know the real error. */
909 result
= vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
, "execbuf2 failed: %m");
913 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
918 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
924 anv_device_alloc(struct anv_device
* device
,
927 VkSystemAllocType allocType
)
929 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
933 anv_device_free(struct anv_device
* device
,
936 anv_instance_free(device
->instance
, mem
);
940 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
942 bo
->gem_handle
= anv_gem_create(device
, size
);
944 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
954 VkResult
anv_AllocMemory(
956 const VkMemoryAllocInfo
* pAllocInfo
,
957 VkDeviceMemory
* pMem
)
959 ANV_FROM_HANDLE(anv_device
, device
, _device
);
960 struct anv_device_memory
*mem
;
963 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
965 /* We support exactly one memory heap. */
966 assert(pAllocInfo
->memoryTypeIndex
== 0);
968 /* FINISHME: Fail if allocation request exceeds heap size. */
970 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
971 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
973 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
975 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
976 if (result
!= VK_SUCCESS
)
979 *pMem
= anv_device_memory_to_handle(mem
);
984 anv_device_free(device
, mem
);
993 ANV_FROM_HANDLE(anv_device
, device
, _device
);
994 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
997 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
999 if (mem
->bo
.gem_handle
!= 0)
1000 anv_gem_close(device
, mem
->bo
.gem_handle
);
1002 anv_device_free(device
, mem
);
1005 VkResult
anv_MapMemory(
1007 VkDeviceMemory _mem
,
1008 VkDeviceSize offset
,
1010 VkMemoryMapFlags flags
,
1013 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1014 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1016 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1017 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1018 * at a time is valid. We could just mmap up front and return an offset
1019 * pointer here, but that may exhaust virtual memory on 32 bit
1022 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
1023 mem
->map_size
= size
;
1030 void anv_UnmapMemory(
1032 VkDeviceMemory _mem
)
1034 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1036 anv_gem_munmap(mem
->map
, mem
->map_size
);
1039 VkResult
anv_FlushMappedMemoryRanges(
1041 uint32_t memRangeCount
,
1042 const VkMappedMemoryRange
* pMemRanges
)
1044 /* clflush here for !llc platforms */
1049 VkResult
anv_InvalidateMappedMemoryRanges(
1051 uint32_t memRangeCount
,
1052 const VkMappedMemoryRange
* pMemRanges
)
1054 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
1057 VkResult
anv_GetBufferMemoryRequirements(
1060 VkMemoryRequirements
* pMemoryRequirements
)
1062 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1064 /* The Vulkan spec (git aaed022) says:
1066 * memoryTypeBits is a bitfield and contains one bit set for every
1067 * supported memory type for the resource. The bit `1<<i` is set if and
1068 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1069 * structure for the physical device is supported.
1071 * We support exactly one memory type.
1073 pMemoryRequirements
->memoryTypeBits
= 1;
1075 pMemoryRequirements
->size
= buffer
->size
;
1076 pMemoryRequirements
->alignment
= 16;
1081 VkResult
anv_GetImageMemoryRequirements(
1084 VkMemoryRequirements
* pMemoryRequirements
)
1086 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1088 /* The Vulkan spec (git aaed022) says:
1090 * memoryTypeBits is a bitfield and contains one bit set for every
1091 * supported memory type for the resource. The bit `1<<i` is set if and
1092 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1093 * structure for the physical device is supported.
1095 * We support exactly one memory type.
1097 pMemoryRequirements
->memoryTypeBits
= 1;
1099 pMemoryRequirements
->size
= image
->size
;
1100 pMemoryRequirements
->alignment
= image
->alignment
;
1105 VkResult
anv_GetImageSparseMemoryRequirements(
1108 uint32_t* pNumRequirements
,
1109 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1111 return vk_error(VK_UNSUPPORTED
);
1114 VkResult
anv_GetDeviceMemoryCommitment(
1116 VkDeviceMemory memory
,
1117 VkDeviceSize
* pCommittedMemoryInBytes
)
1119 *pCommittedMemoryInBytes
= 0;
1120 stub_return(VK_SUCCESS
);
1123 VkResult
anv_BindBufferMemory(
1126 VkDeviceMemory _mem
,
1127 VkDeviceSize memOffset
)
1129 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1130 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1132 buffer
->bo
= &mem
->bo
;
1133 buffer
->offset
= memOffset
;
1138 VkResult
anv_BindImageMemory(
1141 VkDeviceMemory _mem
,
1142 VkDeviceSize memOffset
)
1144 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1145 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1147 image
->bo
= &mem
->bo
;
1148 image
->offset
= memOffset
;
1153 VkResult
anv_QueueBindSparseBufferMemory(
1156 uint32_t numBindings
,
1157 const VkSparseMemoryBindInfo
* pBindInfo
)
1159 stub_return(VK_UNSUPPORTED
);
1162 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1165 uint32_t numBindings
,
1166 const VkSparseMemoryBindInfo
* pBindInfo
)
1168 stub_return(VK_UNSUPPORTED
);
1171 VkResult
anv_QueueBindSparseImageMemory(
1174 uint32_t numBindings
,
1175 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1177 stub_return(VK_UNSUPPORTED
);
1180 VkResult
anv_CreateFence(
1182 const VkFenceCreateInfo
* pCreateInfo
,
1185 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1186 struct anv_fence
*fence
;
1187 struct anv_batch batch
;
1190 const uint32_t fence_size
= 128;
1192 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1194 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1195 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1197 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1199 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1200 if (result
!= VK_SUCCESS
)
1204 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1205 batch
.next
= batch
.start
= fence
->bo
.map
;
1206 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1207 anv_batch_emit(&batch
, GEN7_MI_BATCH_BUFFER_END
);
1208 anv_batch_emit(&batch
, GEN7_MI_NOOP
);
1210 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1211 fence
->exec2_objects
[0].relocation_count
= 0;
1212 fence
->exec2_objects
[0].relocs_ptr
= 0;
1213 fence
->exec2_objects
[0].alignment
= 0;
1214 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1215 fence
->exec2_objects
[0].flags
= 0;
1216 fence
->exec2_objects
[0].rsvd1
= 0;
1217 fence
->exec2_objects
[0].rsvd2
= 0;
1219 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1220 fence
->execbuf
.buffer_count
= 1;
1221 fence
->execbuf
.batch_start_offset
= 0;
1222 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1223 fence
->execbuf
.cliprects_ptr
= 0;
1224 fence
->execbuf
.num_cliprects
= 0;
1225 fence
->execbuf
.DR1
= 0;
1226 fence
->execbuf
.DR4
= 0;
1228 fence
->execbuf
.flags
=
1229 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1230 fence
->execbuf
.rsvd1
= device
->context_id
;
1231 fence
->execbuf
.rsvd2
= 0;
1233 *pFence
= anv_fence_to_handle(fence
);
1238 anv_device_free(device
, fence
);
1243 void anv_DestroyFence(
1247 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1248 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1250 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1251 anv_gem_close(device
, fence
->bo
.gem_handle
);
1252 anv_device_free(device
, fence
);
1255 VkResult
anv_ResetFences(
1257 uint32_t fenceCount
,
1258 const VkFence
* pFences
)
1260 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1261 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1262 fence
->ready
= false;
1268 VkResult
anv_GetFenceStatus(
1272 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1273 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1280 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1282 fence
->ready
= true;
1286 return VK_NOT_READY
;
1289 VkResult
anv_WaitForFences(
1291 uint32_t fenceCount
,
1292 const VkFence
* pFences
,
1296 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1298 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1299 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1300 * for a couple of kernel releases. Since there's no way to know
1301 * whether or not the kernel we're using is one of the broken ones, the
1302 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1303 * maximum timeout from 584 years to 292 years - likely not a big deal.
1305 if (timeout
> INT64_MAX
)
1306 timeout
= INT64_MAX
;
1308 int64_t t
= timeout
;
1310 /* FIXME: handle !waitAll */
1312 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1313 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1314 int ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1315 if (ret
== -1 && errno
== ETIME
) {
1317 } else if (ret
== -1) {
1318 /* We don't know the real error. */
1319 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
,
1320 "gem wait failed: %m");
1327 // Queue semaphore functions
1329 VkResult
anv_CreateSemaphore(
1331 const VkSemaphoreCreateInfo
* pCreateInfo
,
1332 VkSemaphore
* pSemaphore
)
1334 pSemaphore
->handle
= 1;
1335 stub_return(VK_SUCCESS
);
1338 void anv_DestroySemaphore(
1340 VkSemaphore semaphore
)
1345 VkResult
anv_QueueSignalSemaphore(
1347 VkSemaphore semaphore
)
1349 stub_return(VK_UNSUPPORTED
);
1352 VkResult
anv_QueueWaitSemaphore(
1354 VkSemaphore semaphore
)
1356 stub_return(VK_UNSUPPORTED
);
1361 VkResult
anv_CreateEvent(
1363 const VkEventCreateInfo
* pCreateInfo
,
1366 stub_return(VK_UNSUPPORTED
);
1369 void anv_DestroyEvent(
1376 VkResult
anv_GetEventStatus(
1380 stub_return(VK_UNSUPPORTED
);
1383 VkResult
anv_SetEvent(
1387 stub_return(VK_UNSUPPORTED
);
1390 VkResult
anv_ResetEvent(
1394 stub_return(VK_UNSUPPORTED
);
1399 VkResult
anv_CreateBuffer(
1401 const VkBufferCreateInfo
* pCreateInfo
,
1404 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1405 struct anv_buffer
*buffer
;
1407 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1409 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1410 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1412 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1414 buffer
->size
= pCreateInfo
->size
;
1418 *pBuffer
= anv_buffer_to_handle(buffer
);
1423 void anv_DestroyBuffer(
1427 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1428 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1430 anv_device_free(device
, buffer
);
1434 anv_fill_buffer_surface_state(struct anv_device
*device
, void *state
,
1435 const struct anv_format
*format
,
1436 uint32_t offset
, uint32_t range
, uint32_t stride
)
1438 switch (device
->info
.gen
) {
1440 gen7_fill_buffer_surface_state(state
, format
, offset
, range
, stride
);
1443 gen8_fill_buffer_surface_state(state
, format
, offset
, range
, stride
);
1446 unreachable("unsupported gen\n");
1450 VkResult
anv_CreateBufferView(
1452 const VkBufferViewCreateInfo
* pCreateInfo
,
1453 VkBufferView
* pView
)
1455 stub_return(VK_UNSUPPORTED
);
1458 void anv_DestroyBufferView(
1460 VkBufferView _bview
)
1465 void anv_DestroySampler(
1469 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1470 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1472 anv_device_free(device
, sampler
);
1475 // Descriptor set functions
1477 VkResult
anv_CreateDescriptorSetLayout(
1479 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1480 VkDescriptorSetLayout
* pSetLayout
)
1482 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1483 struct anv_descriptor_set_layout
*set_layout
;
1486 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1488 uint32_t immutable_sampler_count
= 0;
1489 for (uint32_t b
= 0; b
< pCreateInfo
->count
; b
++) {
1490 if (pCreateInfo
->pBinding
[b
].pImmutableSamplers
)
1491 immutable_sampler_count
+= pCreateInfo
->pBinding
[b
].arraySize
;
1494 size_t size
= sizeof(struct anv_descriptor_set_layout
) +
1495 pCreateInfo
->count
* sizeof(set_layout
->binding
[0]) +
1496 immutable_sampler_count
* sizeof(struct anv_sampler
*);
1498 set_layout
= anv_device_alloc(device
, size
, 8,
1499 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1501 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1503 /* We just allocate all the samplers at the end of the struct */
1504 struct anv_sampler
**samplers
=
1505 (struct anv_sampler
**)&set_layout
->binding
[pCreateInfo
->count
];
1507 set_layout
->binding_count
= pCreateInfo
->count
;
1508 set_layout
->shader_stages
= 0;
1509 set_layout
->size
= 0;
1511 /* Initialize all binding_layout entries to -1 */
1512 memset(set_layout
->binding
, -1,
1513 pCreateInfo
->count
* sizeof(set_layout
->binding
[0]));
1515 /* Initialize all samplers to 0 */
1516 memset(samplers
, 0, immutable_sampler_count
* sizeof(*samplers
));
1518 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1519 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1520 uint32_t dynamic_offset_count
= 0;
1522 for (uint32_t b
= 0; b
< pCreateInfo
->count
; b
++) {
1523 uint32_t array_size
= MAX2(1, pCreateInfo
->pBinding
[b
].arraySize
);
1524 set_layout
->binding
[b
].array_size
= array_size
;
1525 set_layout
->binding
[b
].descriptor_index
= set_layout
->size
;
1526 set_layout
->size
+= array_size
;
1528 switch (pCreateInfo
->pBinding
[b
].descriptorType
) {
1529 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1530 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1531 for_each_bit(s
, pCreateInfo
->pBinding
[b
].stageFlags
) {
1532 set_layout
->binding
[b
].stage
[s
].sampler_index
= sampler_count
[s
];
1533 sampler_count
[s
] += array_size
;
1540 switch (pCreateInfo
->pBinding
[b
].descriptorType
) {
1541 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1542 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1543 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1544 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1545 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1546 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1547 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1548 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1549 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1550 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1551 for_each_bit(s
, pCreateInfo
->pBinding
[b
].stageFlags
) {
1552 set_layout
->binding
[b
].stage
[s
].surface_index
= surface_count
[s
];
1553 surface_count
[s
] += array_size
;
1560 switch (pCreateInfo
->pBinding
[b
].descriptorType
) {
1561 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1562 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1563 set_layout
->binding
[b
].dynamic_offset_index
= dynamic_offset_count
;
1564 dynamic_offset_count
+= array_size
;
1570 if (pCreateInfo
->pBinding
[b
].pImmutableSamplers
) {
1571 set_layout
->binding
[b
].immutable_samplers
= samplers
;
1572 samplers
+= array_size
;
1574 for (uint32_t i
= 0; i
< array_size
; i
++)
1575 set_layout
->binding
[b
].immutable_samplers
[i
] =
1576 anv_sampler_from_handle(pCreateInfo
->pBinding
[b
].pImmutableSamplers
[i
]);
1578 set_layout
->binding
[b
].immutable_samplers
= NULL
;
1581 set_layout
->shader_stages
|= pCreateInfo
->pBinding
[b
].stageFlags
;
1584 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
1586 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1591 void anv_DestroyDescriptorSetLayout(
1593 VkDescriptorSetLayout _set_layout
)
1595 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1596 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1598 anv_device_free(device
, set_layout
);
1601 VkResult
anv_CreateDescriptorPool(
1603 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1604 VkDescriptorPool
* pDescriptorPool
)
1606 anv_finishme("VkDescriptorPool is a stub");
1607 pDescriptorPool
->handle
= 1;
1611 void anv_DestroyDescriptorPool(
1613 VkDescriptorPool _pool
)
1615 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1618 VkResult
anv_ResetDescriptorPool(
1620 VkDescriptorPool descriptorPool
)
1622 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1627 anv_descriptor_set_create(struct anv_device
*device
,
1628 const struct anv_descriptor_set_layout
*layout
,
1629 struct anv_descriptor_set
**out_set
)
1631 struct anv_descriptor_set
*set
;
1632 size_t size
= sizeof(*set
) + layout
->size
* sizeof(set
->descriptors
[0]);
1634 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1636 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1638 /* A descriptor set may not be 100% filled. Clear the set so we can can
1639 * later detect holes in it.
1641 memset(set
, 0, size
);
1643 set
->layout
= layout
;
1645 /* Go through and fill out immutable samplers if we have any */
1646 struct anv_descriptor
*desc
= set
->descriptors
;
1647 for (uint32_t b
= 0; b
< layout
->binding_count
; b
++) {
1648 if (layout
->binding
[b
].immutable_samplers
) {
1649 for (uint32_t i
= 0; i
< layout
->binding
[b
].array_size
; i
++)
1650 desc
[i
].sampler
= layout
->binding
[b
].immutable_samplers
[i
];
1652 desc
+= layout
->binding
[b
].array_size
;
1661 anv_descriptor_set_destroy(struct anv_device
*device
,
1662 struct anv_descriptor_set
*set
)
1664 anv_device_free(device
, set
);
1667 VkResult
anv_AllocDescriptorSets(
1669 VkDescriptorPool descriptorPool
,
1670 VkDescriptorSetUsage setUsage
,
1672 const VkDescriptorSetLayout
* pSetLayouts
,
1673 VkDescriptorSet
* pDescriptorSets
)
1675 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1677 VkResult result
= VK_SUCCESS
;
1678 struct anv_descriptor_set
*set
;
1681 for (i
= 0; i
< count
; i
++) {
1682 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1684 result
= anv_descriptor_set_create(device
, layout
, &set
);
1685 if (result
!= VK_SUCCESS
)
1688 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1691 if (result
!= VK_SUCCESS
)
1692 anv_FreeDescriptorSets(_device
, descriptorPool
, i
, pDescriptorSets
);
1697 VkResult
anv_FreeDescriptorSets(
1699 VkDescriptorPool descriptorPool
,
1701 const VkDescriptorSet
* pDescriptorSets
)
1703 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1705 for (uint32_t i
= 0; i
< count
; i
++) {
1706 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1708 anv_descriptor_set_destroy(device
, set
);
1714 void anv_UpdateDescriptorSets(
1716 uint32_t writeCount
,
1717 const VkWriteDescriptorSet
* pDescriptorWrites
,
1719 const VkCopyDescriptorSet
* pDescriptorCopies
)
1721 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1722 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1723 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1724 const struct anv_descriptor_set_binding_layout
*bind_layout
=
1725 &set
->layout
->binding
[write
->destBinding
];
1726 struct anv_descriptor
*desc
=
1727 &set
->descriptors
[bind_layout
->descriptor_index
];
1729 switch (write
->descriptorType
) {
1730 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1731 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1732 ANV_FROM_HANDLE(anv_sampler
, sampler
,
1733 write
->pDescriptors
[j
].sampler
);
1735 desc
[j
] = (struct anv_descriptor
) {
1736 .type
= VK_DESCRIPTOR_TYPE_SAMPLER
,
1742 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1743 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1744 ANV_FROM_HANDLE(anv_image_view
, iview
,
1745 write
->pDescriptors
[j
].imageView
);
1746 ANV_FROM_HANDLE(anv_sampler
, sampler
,
1747 write
->pDescriptors
[j
].sampler
);
1749 desc
[j
].type
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
;
1750 desc
[j
].image_view
= iview
;
1752 /* If this descriptor has an immutable sampler, we don't want
1756 desc
->sampler
= sampler
;
1760 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1761 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1762 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1763 ANV_FROM_HANDLE(anv_image_view
, iview
,
1764 write
->pDescriptors
[j
].imageView
);
1766 desc
[j
] = (struct anv_descriptor
) {
1767 .type
= write
->descriptorType
,
1768 .image_view
= iview
,
1773 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1774 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1775 anv_finishme("texel buffers not implemented");
1778 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1779 anv_finishme("input attachments not implemented");
1782 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1783 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1784 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1785 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1786 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1787 assert(write
->pDescriptors
[j
].bufferInfo
.buffer
.handle
);
1788 ANV_FROM_HANDLE(anv_buffer
, buffer
,
1789 write
->pDescriptors
[j
].bufferInfo
.buffer
);
1792 desc
[j
] = (struct anv_descriptor
) {
1793 .type
= write
->descriptorType
,
1795 .offset
= write
->pDescriptors
[j
].bufferInfo
.offset
,
1796 .range
= write
->pDescriptors
[j
].bufferInfo
.range
,
1799 /* For buffers with dynamic offsets, we use the full possible
1800 * range in the surface state and do the actual range-checking
1803 if (bind_layout
->dynamic_offset_index
>= 0)
1804 desc
[j
].range
= buffer
->size
- desc
[j
].offset
;
1812 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1813 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1814 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1815 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1816 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1817 dest
->descriptors
[copy
->destBinding
+ j
] =
1818 src
->descriptors
[copy
->srcBinding
+ j
];
1823 VkResult
anv_CreateFramebuffer(
1825 const VkFramebufferCreateInfo
* pCreateInfo
,
1826 VkFramebuffer
* pFramebuffer
)
1828 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1829 struct anv_framebuffer
*framebuffer
;
1831 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
1833 size_t size
= sizeof(*framebuffer
) +
1834 sizeof(struct anv_image_view
*) * pCreateInfo
->attachmentCount
;
1835 framebuffer
= anv_device_alloc(device
, size
, 8,
1836 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1837 if (framebuffer
== NULL
)
1838 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1840 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
1841 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
1842 VkImageView _iview
= pCreateInfo
->pAttachments
[i
];
1843 framebuffer
->attachments
[i
] = anv_image_view_from_handle(_iview
);
1846 framebuffer
->width
= pCreateInfo
->width
;
1847 framebuffer
->height
= pCreateInfo
->height
;
1848 framebuffer
->layers
= pCreateInfo
->layers
;
1850 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
1855 void anv_DestroyFramebuffer(
1859 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1860 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
1862 anv_device_free(device
, fb
);
1865 void vkCmdDbgMarkerBegin(
1866 VkCmdBuffer cmdBuffer
,
1867 const char* pMarker
)
1868 __attribute__ ((visibility ("default")));
1870 void vkCmdDbgMarkerEnd(
1871 VkCmdBuffer cmdBuffer
)
1872 __attribute__ ((visibility ("default")));
1874 void vkCmdDbgMarkerBegin(
1875 VkCmdBuffer cmdBuffer
,
1876 const char* pMarker
)
1880 void vkCmdDbgMarkerEnd(
1881 VkCmdBuffer cmdBuffer
)