2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
35 anv_physical_device_init(struct anv_physical_device
*device
,
36 struct anv_instance
*instance
,
41 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
43 return vk_error(VK_ERROR_UNAVAILABLE
);
45 device
->instance
= instance
;
48 device
->chipset_id
= anv_gem_get_param(fd
, I915_PARAM_CHIPSET_ID
);
49 if (!device
->chipset_id
)
52 device
->name
= brw_get_device_name(device
->chipset_id
);
53 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
57 if (anv_gem_get_aperture(fd
, &device
->aperture_size
) == -1)
60 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
63 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXECBUF2
))
66 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_LLC
))
69 if (!anv_gem_get_param(fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
78 return vk_error(VK_ERROR_UNAVAILABLE
);
81 static void *default_alloc(
85 VkSystemAllocType allocType
)
90 static void default_free(
97 static const VkAllocCallbacks default_alloc_callbacks
= {
99 .pfnAlloc
= default_alloc
,
100 .pfnFree
= default_free
103 VkResult
anv_CreateInstance(
104 const VkInstanceCreateInfo
* pCreateInfo
,
105 VkInstance
* pInstance
)
107 struct anv_instance
*instance
;
108 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
109 void *user_data
= NULL
;
111 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
113 if (pCreateInfo
->pAllocCb
) {
114 alloc_callbacks
= pCreateInfo
->pAllocCb
;
115 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
117 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
118 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
122 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
123 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
124 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
125 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
126 instance
->physicalDeviceCount
= 0;
130 VG(VALGRIND_CREATE_MEMPOOL(instance
, 0, false));
132 *pInstance
= anv_instance_to_handle(instance
);
137 VkResult
anv_DestroyInstance(
138 VkInstance _instance
)
140 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
142 VG(VALGRIND_DESTROY_MEMPOOL(instance
));
146 instance
->pfnFree(instance
->pAllocUserData
, instance
);
152 anv_instance_alloc(struct anv_instance
*instance
, size_t size
,
153 size_t alignment
, VkSystemAllocType allocType
)
155 void *mem
= instance
->pfnAlloc(instance
->pAllocUserData
,
156 size
, alignment
, allocType
);
158 VALGRIND_MEMPOOL_ALLOC(instance
, mem
, size
);
159 VALGRIND_MAKE_MEM_UNDEFINED(mem
, size
);
165 anv_instance_free(struct anv_instance
*instance
, void *mem
)
170 VALGRIND_MEMPOOL_FREE(instance
, mem
);
172 instance
->pfnFree(instance
->pAllocUserData
, mem
);
175 VkResult
anv_EnumeratePhysicalDevices(
176 VkInstance _instance
,
177 uint32_t* pPhysicalDeviceCount
,
178 VkPhysicalDevice
* pPhysicalDevices
)
180 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
183 if (instance
->physicalDeviceCount
== 0) {
184 result
= anv_physical_device_init(&instance
->physicalDevice
,
185 instance
, "/dev/dri/renderD128");
186 if (result
!= VK_SUCCESS
)
189 instance
->physicalDeviceCount
= 1;
192 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
193 * otherwise it's an inout parameter.
195 * The Vulkan spec (git aaed022) says:
197 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
198 * that is initialized with the number of devices the application is
199 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
200 * an array of at least this many VkPhysicalDevice handles [...].
202 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
203 * overwrites the contents of the variable pointed to by
204 * pPhysicalDeviceCount with the number of physical devices in in the
205 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
206 * pPhysicalDeviceCount with the number of physical handles written to
209 if (!pPhysicalDevices
) {
210 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
211 } else if (*pPhysicalDeviceCount
>= 1) {
212 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
213 *pPhysicalDeviceCount
= 1;
215 *pPhysicalDeviceCount
= 0;
221 VkResult
anv_GetPhysicalDeviceFeatures(
222 VkPhysicalDevice physicalDevice
,
223 VkPhysicalDeviceFeatures
* pFeatures
)
225 anv_finishme("Get correct values for PhysicalDeviceFeatures");
227 *pFeatures
= (VkPhysicalDeviceFeatures
) {
228 .robustBufferAccess
= false,
229 .fullDrawIndexUint32
= false,
230 .imageCubeArray
= false,
231 .independentBlend
= false,
232 .geometryShader
= true,
233 .tessellationShader
= false,
234 .sampleRateShading
= false,
235 .dualSourceBlend
= true,
237 .instancedDrawIndirect
= true,
239 .depthBiasClamp
= false,
240 .fillModeNonSolid
= true,
241 .depthBounds
= false,
244 .textureCompressionETC2
= true,
245 .textureCompressionASTC_LDR
= true,
246 .textureCompressionBC
= true,
247 .pipelineStatisticsQuery
= true,
248 .vertexSideEffects
= false,
249 .tessellationSideEffects
= false,
250 .geometrySideEffects
= false,
251 .fragmentSideEffects
= false,
252 .shaderTessellationPointSize
= false,
253 .shaderGeometryPointSize
= true,
254 .shaderTextureGatherExtended
= true,
255 .shaderStorageImageExtendedFormats
= false,
256 .shaderStorageImageMultisample
= false,
257 .shaderStorageBufferArrayConstantIndexing
= false,
258 .shaderStorageImageArrayConstantIndexing
= false,
259 .shaderUniformBufferArrayDynamicIndexing
= true,
260 .shaderSampledImageArrayDynamicIndexing
= false,
261 .shaderStorageBufferArrayDynamicIndexing
= false,
262 .shaderStorageImageArrayDynamicIndexing
= false,
263 .shaderClipDistance
= false,
264 .shaderCullDistance
= false,
265 .shaderFloat64
= false,
266 .shaderInt64
= false,
267 .shaderFloat16
= false,
268 .shaderInt16
= false,
274 VkResult
anv_GetPhysicalDeviceLimits(
275 VkPhysicalDevice physicalDevice
,
276 VkPhysicalDeviceLimits
* pLimits
)
278 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
279 const struct brw_device_info
*devinfo
= physical_device
->info
;
281 anv_finishme("Get correct values for PhysicalDeviceLimits");
283 *pLimits
= (VkPhysicalDeviceLimits
) {
284 .maxImageDimension1D
= (1 << 14),
285 .maxImageDimension2D
= (1 << 14),
286 .maxImageDimension3D
= (1 << 10),
287 .maxImageDimensionCube
= (1 << 14),
288 .maxImageArrayLayers
= (1 << 10),
289 .maxTexelBufferSize
= (1 << 14),
290 .maxUniformBufferSize
= UINT32_MAX
,
291 .maxStorageBufferSize
= UINT32_MAX
,
292 .maxPushConstantsSize
= 128,
293 .maxMemoryAllocationCount
= UINT32_MAX
,
294 .bufferImageGranularity
= 64, /* A cache line */
295 .maxBoundDescriptorSets
= MAX_SETS
,
296 .maxDescriptorSets
= UINT32_MAX
,
297 .maxPerStageDescriptorSamplers
= 64,
298 .maxPerStageDescriptorUniformBuffers
= 64,
299 .maxPerStageDescriptorStorageBuffers
= 64,
300 .maxPerStageDescriptorSampledImages
= 64,
301 .maxPerStageDescriptorStorageImages
= 64,
302 .maxDescriptorSetSamplers
= 256,
303 .maxDescriptorSetUniformBuffers
= 256,
304 .maxDescriptorSetStorageBuffers
= 256,
305 .maxDescriptorSetSampledImages
= 256,
306 .maxDescriptorSetStorageImages
= 256,
307 .maxVertexInputAttributes
= 32,
308 .maxVertexInputAttributeOffset
= 256,
309 .maxVertexInputBindingStride
= 256,
310 .maxVertexOutputComponents
= 32,
311 .maxTessGenLevel
= 0,
312 .maxTessPatchSize
= 0,
313 .maxTessControlPerVertexInputComponents
= 0,
314 .maxTessControlPerVertexOutputComponents
= 0,
315 .maxTessControlPerPatchOutputComponents
= 0,
316 .maxTessControlTotalOutputComponents
= 0,
317 .maxTessEvaluationInputComponents
= 0,
318 .maxTessEvaluationOutputComponents
= 0,
319 .maxGeometryShaderInvocations
= 6,
320 .maxGeometryInputComponents
= 16,
321 .maxGeometryOutputComponents
= 16,
322 .maxGeometryOutputVertices
= 16,
323 .maxGeometryTotalOutputComponents
= 16,
324 .maxFragmentInputComponents
= 16,
325 .maxFragmentOutputBuffers
= 8,
326 .maxFragmentDualSourceBuffers
= 2,
327 .maxFragmentCombinedOutputResources
= 8,
328 .maxComputeSharedMemorySize
= 1024,
329 .maxComputeWorkGroupCount
= {
330 16 * devinfo
->max_cs_threads
,
331 16 * devinfo
->max_cs_threads
,
332 16 * devinfo
->max_cs_threads
,
334 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
335 .maxComputeWorkGroupSize
= {
336 16 * devinfo
->max_cs_threads
,
337 16 * devinfo
->max_cs_threads
,
338 16 * devinfo
->max_cs_threads
,
340 .subPixelPrecisionBits
= 4 /* FIXME */,
341 .subTexelPrecisionBits
= 4 /* FIXME */,
342 .mipmapPrecisionBits
= 4 /* FIXME */,
343 .maxDrawIndexedIndexValue
= UINT32_MAX
,
344 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
345 .primitiveRestartForPatches
= UINT32_MAX
,
346 .maxSamplerLodBias
= 16,
347 .maxSamplerAnisotropy
= 16,
349 .maxDynamicViewportStates
= UINT32_MAX
,
350 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
351 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
352 .viewportSubPixelBits
= 13, /* We take a float? */
353 .minMemoryMapAlignment
= 64, /* A cache line */
354 .minTexelBufferOffsetAlignment
= 1,
355 .minUniformBufferOffsetAlignment
= 1,
356 .minStorageBufferOffsetAlignment
= 1,
357 .minTexelOffset
= 0, /* FIXME */
358 .maxTexelOffset
= 0, /* FIXME */
359 .minTexelGatherOffset
= 0, /* FIXME */
360 .maxTexelGatherOffset
= 0, /* FIXME */
361 .minInterpolationOffset
= 0, /* FIXME */
362 .maxInterpolationOffset
= 0, /* FIXME */
363 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
364 .maxFramebufferWidth
= (1 << 14),
365 .maxFramebufferHeight
= (1 << 14),
366 .maxFramebufferLayers
= (1 << 10),
367 .maxFramebufferColorSamples
= 8,
368 .maxFramebufferDepthSamples
= 8,
369 .maxFramebufferStencilSamples
= 8,
370 .maxColorAttachments
= MAX_RTS
,
371 .maxSampledImageColorSamples
= 8,
372 .maxSampledImageDepthSamples
= 8,
373 .maxSampledImageIntegerSamples
= 1,
374 .maxStorageImageSamples
= 1,
375 .maxSampleMaskWords
= 1,
376 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
377 .maxClipDistances
= 0 /* FIXME */,
378 .maxCullDistances
= 0 /* FIXME */,
379 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
380 .pointSizeRange
= { 0.125, 255.875 },
381 .lineWidthRange
= { 0.0, 7.9921875 },
382 .pointSizeGranularity
= (1.0 / 8.0),
383 .lineWidthGranularity
= (1.0 / 128.0),
389 VkResult
anv_GetPhysicalDeviceProperties(
390 VkPhysicalDevice physicalDevice
,
391 VkPhysicalDeviceProperties
* pProperties
)
393 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
395 *pProperties
= (VkPhysicalDeviceProperties
) {
396 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
399 .deviceId
= pdevice
->chipset_id
,
400 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
403 strcpy(pProperties
->deviceName
, pdevice
->name
);
404 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
405 "anv-%s", MESA_GIT_SHA1
+ 4);
410 VkResult
anv_GetPhysicalDeviceQueueCount(
411 VkPhysicalDevice physicalDevice
,
419 VkResult
anv_GetPhysicalDeviceQueueProperties(
420 VkPhysicalDevice physicalDevice
,
422 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
426 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
427 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
428 VK_QUEUE_COMPUTE_BIT
|
431 .supportsTimestamps
= true,
437 VkResult
anv_GetPhysicalDeviceMemoryProperties(
438 VkPhysicalDevice physicalDevice
,
439 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
441 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
442 VkDeviceSize heap_size
;
444 /* Reserve some wiggle room for the driver by exposing only 75% of the
445 * aperture to the heap.
447 heap_size
= 3 * physical_device
->aperture_size
/ 4;
449 /* The property flags below are valid only for llc platforms. */
450 pMemoryProperties
->memoryTypeCount
= 1;
451 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
452 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
456 pMemoryProperties
->memoryHeapCount
= 1;
457 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
459 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
465 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
469 return anv_lookup_entrypoint(pName
);
472 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
476 return anv_lookup_entrypoint(pName
);
480 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
482 queue
->device
= device
;
483 queue
->pool
= &device
->surface_state_pool
;
485 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
486 if (queue
->completed_serial
.map
== NULL
)
487 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
489 *(uint32_t *)queue
->completed_serial
.map
= 0;
490 queue
->next_serial
= 1;
496 anv_queue_finish(struct anv_queue
*queue
)
499 /* This gets torn down with the device so we only need to do this if
500 * valgrind is present.
502 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
507 anv_device_init_border_colors(struct anv_device
*device
)
509 static const VkClearColorValue border_colors
[] = {
510 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
511 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
512 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
513 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
514 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
515 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
518 device
->border_colors
=
519 anv_state_pool_alloc(&device
->dynamic_state_pool
,
520 sizeof(border_colors
), 32);
521 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
524 VkResult
anv_CreateDevice(
525 VkPhysicalDevice physicalDevice
,
526 const VkDeviceCreateInfo
* pCreateInfo
,
529 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
530 struct anv_instance
*instance
= physical_device
->instance
;
531 struct anv_device
*device
;
533 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
535 device
= anv_instance_alloc(instance
, sizeof(*device
), 8,
536 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
538 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
540 device
->instance
= physical_device
->instance
;
542 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
543 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
544 if (device
->fd
== -1)
547 device
->context_id
= anv_gem_create_context(device
);
548 if (device
->context_id
== -1)
551 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
553 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
555 anv_state_pool_init(&device
->dynamic_state_pool
,
556 &device
->dynamic_state_block_pool
);
558 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
559 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
561 anv_state_pool_init(&device
->surface_state_pool
,
562 &device
->surface_state_block_pool
);
564 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
566 device
->info
= *physical_device
->info
;
568 device
->compiler
= anv_compiler_create(device
);
570 pthread_mutex_init(&device
->mutex
, NULL
);
572 anv_queue_init(device
, &device
->queue
);
574 anv_device_init_meta(device
);
576 anv_device_init_border_colors(device
);
578 *pDevice
= anv_device_to_handle(device
);
585 anv_device_free(device
, device
);
587 return vk_error(VK_ERROR_UNAVAILABLE
);
590 VkResult
anv_DestroyDevice(
593 ANV_FROM_HANDLE(anv_device
, device
, _device
);
595 anv_compiler_destroy(device
->compiler
);
597 anv_queue_finish(&device
->queue
);
599 anv_device_finish_meta(device
);
602 /* We only need to free these to prevent valgrind errors. The backing
603 * BO will go away in a couple of lines so we don't actually leak.
605 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
608 anv_bo_pool_finish(&device
->batch_bo_pool
);
609 anv_state_pool_finish(&device
->dynamic_state_pool
);
610 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
611 anv_block_pool_finish(&device
->instruction_block_pool
);
612 anv_state_pool_finish(&device
->surface_state_pool
);
613 anv_block_pool_finish(&device
->surface_state_block_pool
);
614 anv_block_pool_finish(&device
->scratch_block_pool
);
618 anv_instance_free(device
->instance
, device
);
623 static const VkExtensionProperties global_extensions
[] = {
625 .extName
= "VK_WSI_LunarG",
630 VkResult
anv_GetGlobalExtensionProperties(
631 const char* pLayerName
,
633 VkExtensionProperties
* pProperties
)
635 if (pProperties
== NULL
) {
636 *pCount
= ARRAY_SIZE(global_extensions
);
640 assert(*pCount
< ARRAY_SIZE(global_extensions
));
642 *pCount
= ARRAY_SIZE(global_extensions
);
643 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
648 VkResult
anv_GetPhysicalDeviceExtensionProperties(
649 VkPhysicalDevice physicalDevice
,
650 const char* pLayerName
,
652 VkExtensionProperties
* pProperties
)
654 if (pProperties
== NULL
) {
659 /* None supported at this time */
660 return vk_error(VK_ERROR_INVALID_EXTENSION
);
663 VkResult
anv_GetGlobalLayerProperties(
665 VkLayerProperties
* pProperties
)
667 if (pProperties
== NULL
) {
672 /* None supported at this time */
673 return vk_error(VK_ERROR_INVALID_LAYER
);
676 VkResult
anv_GetPhysicalDeviceLayerProperties(
677 VkPhysicalDevice physicalDevice
,
679 VkLayerProperties
* pProperties
)
681 if (pProperties
== NULL
) {
686 /* None supported at this time */
687 return vk_error(VK_ERROR_INVALID_LAYER
);
690 VkResult
anv_GetDeviceQueue(
692 uint32_t queueNodeIndex
,
696 ANV_FROM_HANDLE(anv_device
, device
, _device
);
698 assert(queueIndex
== 0);
700 *pQueue
= anv_queue_to_handle(&device
->queue
);
705 VkResult
anv_QueueSubmit(
707 uint32_t cmdBufferCount
,
708 const VkCmdBuffer
* pCmdBuffers
,
711 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
712 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
713 struct anv_device
*device
= queue
->device
;
716 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
717 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
719 assert(cmd_buffer
->level
== VK_CMD_BUFFER_LEVEL_PRIMARY
);
721 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
723 return vk_error(VK_ERROR_UNKNOWN
);
726 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
728 return vk_error(VK_ERROR_UNKNOWN
);
731 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
732 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
738 VkResult
anv_QueueWaitIdle(
741 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
743 return vkDeviceWaitIdle(anv_device_to_handle(queue
->device
));
746 VkResult
anv_DeviceWaitIdle(
749 ANV_FROM_HANDLE(anv_device
, device
, _device
);
750 struct anv_state state
;
751 struct anv_batch batch
;
752 struct drm_i915_gem_execbuffer2 execbuf
;
753 struct drm_i915_gem_exec_object2 exec2_objects
[1];
754 struct anv_bo
*bo
= NULL
;
759 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
760 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
761 batch
.start
= batch
.next
= state
.map
;
762 batch
.end
= state
.map
+ 32;
763 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
764 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
766 exec2_objects
[0].handle
= bo
->gem_handle
;
767 exec2_objects
[0].relocation_count
= 0;
768 exec2_objects
[0].relocs_ptr
= 0;
769 exec2_objects
[0].alignment
= 0;
770 exec2_objects
[0].offset
= bo
->offset
;
771 exec2_objects
[0].flags
= 0;
772 exec2_objects
[0].rsvd1
= 0;
773 exec2_objects
[0].rsvd2
= 0;
775 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
776 execbuf
.buffer_count
= 1;
777 execbuf
.batch_start_offset
= state
.offset
;
778 execbuf
.batch_len
= batch
.next
- state
.map
;
779 execbuf
.cliprects_ptr
= 0;
780 execbuf
.num_cliprects
= 0;
785 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
786 execbuf
.rsvd1
= device
->context_id
;
789 ret
= anv_gem_execbuffer(device
, &execbuf
);
791 result
= vk_error(VK_ERROR_UNKNOWN
);
796 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
798 result
= vk_error(VK_ERROR_UNKNOWN
);
802 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
807 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
813 anv_device_alloc(struct anv_device
* device
,
816 VkSystemAllocType allocType
)
818 return anv_instance_alloc(device
->instance
, size
, alignment
, allocType
);
822 anv_device_free(struct anv_device
* device
,
825 anv_instance_free(device
->instance
, mem
);
829 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
831 bo
->gem_handle
= anv_gem_create(device
, size
);
833 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
843 VkResult
anv_AllocMemory(
845 const VkMemoryAllocInfo
* pAllocInfo
,
846 VkDeviceMemory
* pMem
)
848 ANV_FROM_HANDLE(anv_device
, device
, _device
);
849 struct anv_device_memory
*mem
;
852 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
854 if (pAllocInfo
->memoryTypeIndex
!= 0) {
855 /* We support exactly one memory heap. */
856 return vk_error(VK_ERROR_INVALID_VALUE
);
859 /* FINISHME: Fail if allocation request exceeds heap size. */
861 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
862 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
864 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
866 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
867 if (result
!= VK_SUCCESS
)
870 *pMem
= anv_device_memory_to_handle(mem
);
875 anv_device_free(device
, mem
);
880 VkResult
anv_FreeMemory(
884 ANV_FROM_HANDLE(anv_device
, device
, _device
);
885 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
888 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
890 if (mem
->bo
.gem_handle
!= 0)
891 anv_gem_close(device
, mem
->bo
.gem_handle
);
893 anv_device_free(device
, mem
);
898 VkResult
anv_MapMemory(
903 VkMemoryMapFlags flags
,
906 ANV_FROM_HANDLE(anv_device
, device
, _device
);
907 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
909 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
910 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
911 * at a time is valid. We could just mmap up front and return an offset
912 * pointer here, but that may exhaust virtual memory on 32 bit
915 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
916 mem
->map_size
= size
;
923 VkResult
anv_UnmapMemory(
927 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
929 anv_gem_munmap(mem
->map
, mem
->map_size
);
934 VkResult
anv_FlushMappedMemoryRanges(
936 uint32_t memRangeCount
,
937 const VkMappedMemoryRange
* pMemRanges
)
939 /* clflush here for !llc platforms */
944 VkResult
anv_InvalidateMappedMemoryRanges(
946 uint32_t memRangeCount
,
947 const VkMappedMemoryRange
* pMemRanges
)
949 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
952 VkResult
anv_GetBufferMemoryRequirements(
955 VkMemoryRequirements
* pMemoryRequirements
)
957 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
959 /* The Vulkan spec (git aaed022) says:
961 * memoryTypeBits is a bitfield and contains one bit set for every
962 * supported memory type for the resource. The bit `1<<i` is set if and
963 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
964 * structure for the physical device is supported.
966 * We support exactly one memory type.
968 pMemoryRequirements
->memoryTypeBits
= 1;
970 pMemoryRequirements
->size
= buffer
->size
;
971 pMemoryRequirements
->alignment
= 16;
976 VkResult
anv_GetImageMemoryRequirements(
979 VkMemoryRequirements
* pMemoryRequirements
)
981 ANV_FROM_HANDLE(anv_image
, image
, _image
);
983 /* The Vulkan spec (git aaed022) says:
985 * memoryTypeBits is a bitfield and contains one bit set for every
986 * supported memory type for the resource. The bit `1<<i` is set if and
987 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
988 * structure for the physical device is supported.
990 * We support exactly one memory type.
992 pMemoryRequirements
->memoryTypeBits
= 1;
994 pMemoryRequirements
->size
= image
->size
;
995 pMemoryRequirements
->alignment
= image
->alignment
;
1000 VkResult
anv_GetImageSparseMemoryRequirements(
1003 uint32_t* pNumRequirements
,
1004 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1006 return vk_error(VK_UNSUPPORTED
);
1009 VkResult
anv_GetDeviceMemoryCommitment(
1011 VkDeviceMemory memory
,
1012 VkDeviceSize
* pCommittedMemoryInBytes
)
1014 *pCommittedMemoryInBytes
= 0;
1015 stub_return(VK_SUCCESS
);
1018 VkResult
anv_BindBufferMemory(
1021 VkDeviceMemory _mem
,
1022 VkDeviceSize memOffset
)
1024 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1025 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1027 buffer
->bo
= &mem
->bo
;
1028 buffer
->offset
= memOffset
;
1033 VkResult
anv_BindImageMemory(
1036 VkDeviceMemory _mem
,
1037 VkDeviceSize memOffset
)
1039 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1040 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1042 image
->bo
= &mem
->bo
;
1043 image
->offset
= memOffset
;
1048 VkResult
anv_QueueBindSparseBufferMemory(
1051 uint32_t numBindings
,
1052 const VkSparseMemoryBindInfo
* pBindInfo
)
1054 stub_return(VK_UNSUPPORTED
);
1057 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1060 uint32_t numBindings
,
1061 const VkSparseMemoryBindInfo
* pBindInfo
)
1063 stub_return(VK_UNSUPPORTED
);
1066 VkResult
anv_QueueBindSparseImageMemory(
1069 uint32_t numBindings
,
1070 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1072 stub_return(VK_UNSUPPORTED
);
1075 VkResult
anv_CreateFence(
1077 const VkFenceCreateInfo
* pCreateInfo
,
1080 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1081 struct anv_fence
*fence
;
1082 struct anv_batch batch
;
1085 const uint32_t fence_size
= 128;
1087 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1089 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1090 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1092 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1094 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1095 if (result
!= VK_SUCCESS
)
1099 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1100 batch
.next
= batch
.start
= fence
->bo
.map
;
1101 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1102 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
1103 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
1105 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1106 fence
->exec2_objects
[0].relocation_count
= 0;
1107 fence
->exec2_objects
[0].relocs_ptr
= 0;
1108 fence
->exec2_objects
[0].alignment
= 0;
1109 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1110 fence
->exec2_objects
[0].flags
= 0;
1111 fence
->exec2_objects
[0].rsvd1
= 0;
1112 fence
->exec2_objects
[0].rsvd2
= 0;
1114 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1115 fence
->execbuf
.buffer_count
= 1;
1116 fence
->execbuf
.batch_start_offset
= 0;
1117 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1118 fence
->execbuf
.cliprects_ptr
= 0;
1119 fence
->execbuf
.num_cliprects
= 0;
1120 fence
->execbuf
.DR1
= 0;
1121 fence
->execbuf
.DR4
= 0;
1123 fence
->execbuf
.flags
=
1124 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1125 fence
->execbuf
.rsvd1
= device
->context_id
;
1126 fence
->execbuf
.rsvd2
= 0;
1128 *pFence
= anv_fence_to_handle(fence
);
1133 anv_device_free(device
, fence
);
1138 VkResult
anv_DestroyFence(
1142 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1143 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1145 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1146 anv_gem_close(device
, fence
->bo
.gem_handle
);
1147 anv_device_free(device
, fence
);
1152 VkResult
anv_ResetFences(
1154 uint32_t fenceCount
,
1155 const VkFence
* pFences
)
1157 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1158 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1159 fence
->ready
= false;
1165 VkResult
anv_GetFenceStatus(
1169 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1170 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1177 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1179 fence
->ready
= true;
1183 return VK_NOT_READY
;
1186 VkResult
anv_WaitForFences(
1188 uint32_t fenceCount
,
1189 const VkFence
* pFences
,
1193 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1194 int64_t t
= timeout
;
1197 /* FIXME: handle !waitAll */
1199 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1200 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1201 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1202 if (ret
== -1 && errno
== ETIME
)
1205 return vk_error(VK_ERROR_UNKNOWN
);
1211 // Queue semaphore functions
1213 VkResult
anv_CreateSemaphore(
1215 const VkSemaphoreCreateInfo
* pCreateInfo
,
1216 VkSemaphore
* pSemaphore
)
1218 stub_return(VK_UNSUPPORTED
);
1221 VkResult
anv_DestroySemaphore(
1223 VkSemaphore semaphore
)
1225 stub_return(VK_UNSUPPORTED
);
1228 VkResult
anv_QueueSignalSemaphore(
1230 VkSemaphore semaphore
)
1232 stub_return(VK_UNSUPPORTED
);
1235 VkResult
anv_QueueWaitSemaphore(
1237 VkSemaphore semaphore
)
1239 stub_return(VK_UNSUPPORTED
);
1244 VkResult
anv_CreateEvent(
1246 const VkEventCreateInfo
* pCreateInfo
,
1249 stub_return(VK_UNSUPPORTED
);
1252 VkResult
anv_DestroyEvent(
1256 stub_return(VK_UNSUPPORTED
);
1259 VkResult
anv_GetEventStatus(
1263 stub_return(VK_UNSUPPORTED
);
1266 VkResult
anv_SetEvent(
1270 stub_return(VK_UNSUPPORTED
);
1273 VkResult
anv_ResetEvent(
1277 stub_return(VK_UNSUPPORTED
);
1282 VkResult
anv_CreateBuffer(
1284 const VkBufferCreateInfo
* pCreateInfo
,
1287 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1288 struct anv_buffer
*buffer
;
1290 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1292 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1293 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1295 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1297 buffer
->size
= pCreateInfo
->size
;
1301 *pBuffer
= anv_buffer_to_handle(buffer
);
1306 VkResult
anv_DestroyBuffer(
1310 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1311 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1313 anv_device_free(device
, buffer
);
1318 // Buffer view functions
1321 anv_fill_buffer_surface_state(void *state
, const struct anv_format
*format
,
1322 uint32_t offset
, uint32_t range
)
1324 /* This assumes RGBA float format. */
1325 uint32_t stride
= 4;
1326 uint32_t num_elements
= range
/ stride
;
1328 struct GEN8_RENDER_SURFACE_STATE surface_state
= {
1329 .SurfaceType
= SURFTYPE_BUFFER
,
1330 .SurfaceArray
= false,
1331 .SurfaceFormat
= format
->surface_format
,
1332 .SurfaceVerticalAlignment
= VALIGN4
,
1333 .SurfaceHorizontalAlignment
= HALIGN4
,
1335 .VerticalLineStride
= 0,
1336 .VerticalLineStrideOffset
= 0,
1337 .SamplerL2BypassModeDisable
= true,
1338 .RenderCacheReadWriteMode
= WriteOnlyCache
,
1339 .MemoryObjectControlState
= GEN8_MOCS
,
1340 .BaseMipLevel
= 0.0,
1342 .Height
= (num_elements
>> 7) & 0x3fff,
1343 .Width
= num_elements
& 0x7f,
1344 .Depth
= (num_elements
>> 21) & 0x3f,
1345 .SurfacePitch
= stride
- 1,
1346 .MinimumArrayElement
= 0,
1347 .NumberofMultisamples
= MULTISAMPLECOUNT_1
,
1352 .AuxiliarySurfaceMode
= AUX_NONE
,
1354 .GreenClearColor
= 0,
1355 .BlueClearColor
= 0,
1356 .AlphaClearColor
= 0,
1357 .ShaderChannelSelectRed
= SCS_RED
,
1358 .ShaderChannelSelectGreen
= SCS_GREEN
,
1359 .ShaderChannelSelectBlue
= SCS_BLUE
,
1360 .ShaderChannelSelectAlpha
= SCS_ALPHA
,
1361 .ResourceMinLOD
= 0.0,
1362 /* FIXME: We assume that the image must be bound at this time. */
1363 .SurfaceBaseAddress
= { NULL
, offset
},
1366 GEN8_RENDER_SURFACE_STATE_pack(NULL
, state
, &surface_state
);
1369 VkResult
anv_CreateBufferView(
1371 const VkBufferViewCreateInfo
* pCreateInfo
,
1372 VkBufferView
* pView
)
1374 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1375 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1376 struct anv_buffer_view
*bview
;
1377 struct anv_surface_view
*view
;
1379 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1381 bview
= anv_device_alloc(device
, sizeof(*view
), 8,
1382 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1384 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1386 view
= &bview
->view
;
1387 view
->bo
= buffer
->bo
;
1388 view
->offset
= buffer
->offset
+ pCreateInfo
->offset
;
1389 view
->surface_state
=
1390 anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64);
1391 view
->format
= anv_format_for_vk_format(pCreateInfo
->format
);
1392 view
->range
= pCreateInfo
->range
;
1394 anv_fill_buffer_surface_state(view
->surface_state
.map
, view
->format
,
1395 view
->offset
, pCreateInfo
->range
);
1397 *pView
= anv_buffer_view_to_handle(bview
);
1402 VkResult
anv_DestroyBufferView(
1404 VkBufferView _bview
)
1406 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1407 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1409 anv_surface_view_fini(device
, &bview
->view
);
1410 anv_device_free(device
, bview
);
1415 // Sampler functions
1417 VkResult
anv_CreateSampler(
1419 const VkSamplerCreateInfo
* pCreateInfo
,
1420 VkSampler
* pSampler
)
1422 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1423 struct anv_sampler
*sampler
;
1424 uint32_t mag_filter
, min_filter
, max_anisotropy
;
1426 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1428 sampler
= anv_device_alloc(device
, sizeof(*sampler
), 8,
1429 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1431 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1433 static const uint32_t vk_to_gen_tex_filter
[] = {
1434 [VK_TEX_FILTER_NEAREST
] = MAPFILTER_NEAREST
,
1435 [VK_TEX_FILTER_LINEAR
] = MAPFILTER_LINEAR
1438 static const uint32_t vk_to_gen_mipmap_mode
[] = {
1439 [VK_TEX_MIPMAP_MODE_BASE
] = MIPFILTER_NONE
,
1440 [VK_TEX_MIPMAP_MODE_NEAREST
] = MIPFILTER_NEAREST
,
1441 [VK_TEX_MIPMAP_MODE_LINEAR
] = MIPFILTER_LINEAR
1444 static const uint32_t vk_to_gen_tex_address
[] = {
1445 [VK_TEX_ADDRESS_WRAP
] = TCM_WRAP
,
1446 [VK_TEX_ADDRESS_MIRROR
] = TCM_MIRROR
,
1447 [VK_TEX_ADDRESS_CLAMP
] = TCM_CLAMP
,
1448 [VK_TEX_ADDRESS_MIRROR_ONCE
] = TCM_MIRROR_ONCE
,
1449 [VK_TEX_ADDRESS_CLAMP_BORDER
] = TCM_CLAMP_BORDER
,
1452 static const uint32_t vk_to_gen_compare_op
[] = {
1453 [VK_COMPARE_OP_NEVER
] = PREFILTEROPNEVER
,
1454 [VK_COMPARE_OP_LESS
] = PREFILTEROPLESS
,
1455 [VK_COMPARE_OP_EQUAL
] = PREFILTEROPEQUAL
,
1456 [VK_COMPARE_OP_LESS_EQUAL
] = PREFILTEROPLEQUAL
,
1457 [VK_COMPARE_OP_GREATER
] = PREFILTEROPGREATER
,
1458 [VK_COMPARE_OP_NOT_EQUAL
] = PREFILTEROPNOTEQUAL
,
1459 [VK_COMPARE_OP_GREATER_EQUAL
] = PREFILTEROPGEQUAL
,
1460 [VK_COMPARE_OP_ALWAYS
] = PREFILTEROPALWAYS
,
1463 if (pCreateInfo
->maxAnisotropy
> 1) {
1464 mag_filter
= MAPFILTER_ANISOTROPIC
;
1465 min_filter
= MAPFILTER_ANISOTROPIC
;
1466 max_anisotropy
= (pCreateInfo
->maxAnisotropy
- 2) / 2;
1468 mag_filter
= vk_to_gen_tex_filter
[pCreateInfo
->magFilter
];
1469 min_filter
= vk_to_gen_tex_filter
[pCreateInfo
->minFilter
];
1470 max_anisotropy
= RATIO21
;
1473 struct GEN8_SAMPLER_STATE sampler_state
= {
1474 .SamplerDisable
= false,
1475 .TextureBorderColorMode
= DX10OGL
,
1476 .LODPreClampMode
= 0,
1477 .BaseMipLevel
= 0.0,
1478 .MipModeFilter
= vk_to_gen_mipmap_mode
[pCreateInfo
->mipMode
],
1479 .MagModeFilter
= mag_filter
,
1480 .MinModeFilter
= min_filter
,
1481 .TextureLODBias
= pCreateInfo
->mipLodBias
* 256,
1482 .AnisotropicAlgorithm
= EWAApproximation
,
1483 .MinLOD
= pCreateInfo
->minLod
,
1484 .MaxLOD
= pCreateInfo
->maxLod
,
1485 .ChromaKeyEnable
= 0,
1486 .ChromaKeyIndex
= 0,
1488 .ShadowFunction
= vk_to_gen_compare_op
[pCreateInfo
->compareOp
],
1489 .CubeSurfaceControlMode
= 0,
1491 .IndirectStatePointer
=
1492 device
->border_colors
.offset
+
1493 pCreateInfo
->borderColor
* sizeof(float) * 4,
1495 .LODClampMagnificationMode
= MIPNONE
,
1496 .MaximumAnisotropy
= max_anisotropy
,
1497 .RAddressMinFilterRoundingEnable
= 0,
1498 .RAddressMagFilterRoundingEnable
= 0,
1499 .VAddressMinFilterRoundingEnable
= 0,
1500 .VAddressMagFilterRoundingEnable
= 0,
1501 .UAddressMinFilterRoundingEnable
= 0,
1502 .UAddressMagFilterRoundingEnable
= 0,
1503 .TrilinearFilterQuality
= 0,
1504 .NonnormalizedCoordinateEnable
= 0,
1505 .TCXAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressU
],
1506 .TCYAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressV
],
1507 .TCZAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressW
],
1510 GEN8_SAMPLER_STATE_pack(NULL
, sampler
->state
, &sampler_state
);
1512 *pSampler
= anv_sampler_to_handle(sampler
);
1517 VkResult
anv_DestroySampler(
1521 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1522 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1524 anv_device_free(device
, sampler
);
1529 // Descriptor set functions
1531 VkResult
anv_CreateDescriptorSetLayout(
1533 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1534 VkDescriptorSetLayout
* pSetLayout
)
1536 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1537 struct anv_descriptor_set_layout
*set_layout
;
1539 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1541 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1542 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1543 uint32_t num_dynamic_buffers
= 0;
1545 uint32_t stages
= 0;
1548 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1549 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1550 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1551 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1552 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1553 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1559 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1560 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1561 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1562 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1563 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1564 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1565 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1566 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1567 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1568 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1569 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1570 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1571 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1577 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1578 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1579 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1580 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1586 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1587 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1590 uint32_t sampler_total
= 0;
1591 uint32_t surface_total
= 0;
1592 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1593 sampler_total
+= sampler_count
[s
];
1594 surface_total
+= surface_count
[s
];
1597 size_t size
= sizeof(*set_layout
) +
1598 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1599 set_layout
= anv_device_alloc(device
, size
, 8,
1600 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1602 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1604 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1605 set_layout
->count
= count
;
1606 set_layout
->shader_stages
= stages
;
1608 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1609 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1610 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1611 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1612 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1613 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1614 p
+= surface_count
[s
];
1615 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1616 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1617 p
+= sampler_count
[s
];
1620 uint32_t descriptor
= 0;
1621 int8_t dynamic_slot
= 0;
1623 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1624 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1625 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1626 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1627 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1628 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1629 sampler
[s
]->index
= descriptor
+ j
;
1630 sampler
[s
]->dynamic_slot
= -1;
1638 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1639 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1640 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1648 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1649 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1650 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1651 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1652 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1653 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1654 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1655 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1656 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1657 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1658 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1659 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1660 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1661 surface
[s
]->index
= descriptor
+ j
;
1663 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1665 surface
[s
]->dynamic_slot
= -1;
1674 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1676 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1679 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1684 VkResult
anv_DestroyDescriptorSetLayout(
1686 VkDescriptorSetLayout _set_layout
)
1688 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1689 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1691 anv_device_free(device
, set_layout
);
1696 VkResult
anv_CreateDescriptorPool(
1698 VkDescriptorPoolUsage poolUsage
,
1700 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1701 VkDescriptorPool
* pDescriptorPool
)
1703 anv_finishme("VkDescriptorPool is a stub");
1704 pDescriptorPool
->handle
= 1;
1708 VkResult
anv_DestroyDescriptorPool(
1710 VkDescriptorPool _pool
)
1712 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1716 VkResult
anv_ResetDescriptorPool(
1718 VkDescriptorPool descriptorPool
)
1720 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1725 anv_descriptor_set_create(struct anv_device
*device
,
1726 const struct anv_descriptor_set_layout
*layout
,
1727 struct anv_descriptor_set
**out_set
)
1729 struct anv_descriptor_set
*set
;
1730 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1732 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1734 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1736 /* A descriptor set may not be 100% filled. Clear the set so we can can
1737 * later detect holes in it.
1739 memset(set
, 0, size
);
1747 anv_descriptor_set_destroy(struct anv_device
*device
,
1748 struct anv_descriptor_set
*set
)
1750 anv_device_free(device
, set
);
1753 VkResult
anv_AllocDescriptorSets(
1755 VkDescriptorPool descriptorPool
,
1756 VkDescriptorSetUsage setUsage
,
1758 const VkDescriptorSetLayout
* pSetLayouts
,
1759 VkDescriptorSet
* pDescriptorSets
,
1762 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1765 struct anv_descriptor_set
*set
;
1767 for (uint32_t i
= 0; i
< count
; i
++) {
1768 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1770 result
= anv_descriptor_set_create(device
, layout
, &set
);
1771 if (result
!= VK_SUCCESS
) {
1776 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1784 VkResult
anv_FreeDescriptorSets(
1786 VkDescriptorPool descriptorPool
,
1788 const VkDescriptorSet
* pDescriptorSets
)
1790 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1792 for (uint32_t i
= 0; i
< count
; i
++) {
1793 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1795 anv_descriptor_set_destroy(device
, set
);
1801 VkResult
anv_UpdateDescriptorSets(
1803 uint32_t writeCount
,
1804 const VkWriteDescriptorSet
* pDescriptorWrites
,
1806 const VkCopyDescriptorSet
* pDescriptorCopies
)
1808 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1809 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1810 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1812 switch (write
->descriptorType
) {
1813 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1814 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1815 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1816 set
->descriptors
[write
->destBinding
+ j
].sampler
=
1817 anv_sampler_from_handle(write
->pDescriptors
[j
].sampler
);
1820 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1825 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1826 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1827 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1828 ANV_FROM_HANDLE(anv_image_view
, iview
,
1829 write
->pDescriptors
[j
].imageView
);
1830 set
->descriptors
[write
->destBinding
+ j
].view
= &iview
->view
;
1834 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1835 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1836 anv_finishme("texel buffers not implemented");
1839 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1840 anv_finishme("input attachments not implemented");
1843 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1844 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1845 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1846 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1847 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1848 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1849 write
->pDescriptors
[j
].bufferView
);
1850 set
->descriptors
[write
->destBinding
+ j
].view
= &bview
->view
;
1858 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1859 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1860 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1861 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1862 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1863 dest
->descriptors
[copy
->destBinding
+ j
] =
1864 src
->descriptors
[copy
->srcBinding
+ j
];
1871 // State object functions
1873 static inline int64_t
1874 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1884 VkResult
anv_CreateDynamicViewportState(
1886 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1887 VkDynamicViewportState
* pState
)
1889 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1890 struct anv_dynamic_vp_state
*state
;
1892 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1894 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1895 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1897 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1899 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1900 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1902 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1904 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1907 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1908 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1909 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1911 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1912 .ViewportMatrixElementm00
= vp
->width
/ 2,
1913 .ViewportMatrixElementm11
= vp
->height
/ 2,
1914 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1915 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1916 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1917 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1918 .XMinClipGuardband
= -1.0f
,
1919 .XMaxClipGuardband
= 1.0f
,
1920 .YMinClipGuardband
= -1.0f
,
1921 .YMaxClipGuardband
= 1.0f
,
1922 .XMinViewPort
= vp
->originX
,
1923 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1924 .YMinViewPort
= vp
->originY
,
1925 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1928 struct GEN8_CC_VIEWPORT cc_viewport
= {
1929 .MinimumDepth
= vp
->minDepth
,
1930 .MaximumDepth
= vp
->maxDepth
1933 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1934 * ymax < ymin for empty clips. In case clip x, y, width height are all
1935 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1936 * what we want. Just special case empty clips and produce a canonical
1938 static const struct GEN8_SCISSOR_RECT empty_scissor
= {
1939 .ScissorRectangleYMin
= 1,
1940 .ScissorRectangleXMin
= 1,
1941 .ScissorRectangleYMax
= 0,
1942 .ScissorRectangleXMax
= 0
1945 const int max
= 0xffff;
1946 struct GEN8_SCISSOR_RECT scissor
= {
1947 /* Do this math using int64_t so overflow gets clamped correctly. */
1948 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1949 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1950 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1951 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1954 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1955 GEN8_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1957 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1958 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1960 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
1964 *pState
= anv_dynamic_vp_state_to_handle(state
);
1969 VkResult
anv_DestroyDynamicViewportState(
1971 VkDynamicViewportState _vp_state
)
1973 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1974 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
1976 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
1977 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
1978 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
1980 anv_device_free(device
, vp_state
);
1985 VkResult
anv_CreateDynamicRasterState(
1987 const VkDynamicRasterStateCreateInfo
* pCreateInfo
,
1988 VkDynamicRasterState
* pState
)
1990 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1991 struct anv_dynamic_rs_state
*state
;
1993 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO
);
1995 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1996 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1998 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2000 struct GEN8_3DSTATE_SF sf
= {
2001 GEN8_3DSTATE_SF_header
,
2002 .LineWidth
= pCreateInfo
->lineWidth
,
2005 GEN8_3DSTATE_SF_pack(NULL
, state
->state_sf
, &sf
);
2007 bool enable_bias
= pCreateInfo
->depthBias
!= 0.0f
||
2008 pCreateInfo
->slopeScaledDepthBias
!= 0.0f
;
2009 struct GEN8_3DSTATE_RASTER raster
= {
2010 .GlobalDepthOffsetEnableSolid
= enable_bias
,
2011 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
2012 .GlobalDepthOffsetEnablePoint
= enable_bias
,
2013 .GlobalDepthOffsetConstant
= pCreateInfo
->depthBias
,
2014 .GlobalDepthOffsetScale
= pCreateInfo
->slopeScaledDepthBias
,
2015 .GlobalDepthOffsetClamp
= pCreateInfo
->depthBiasClamp
2018 GEN8_3DSTATE_RASTER_pack(NULL
, state
->state_raster
, &raster
);
2020 *pState
= anv_dynamic_rs_state_to_handle(state
);
2025 VkResult
anv_DestroyDynamicRasterState(
2027 VkDynamicRasterState _rs_state
)
2029 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2030 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
2032 anv_device_free(device
, rs_state
);
2037 VkResult
anv_CreateDynamicColorBlendState(
2039 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
2040 VkDynamicColorBlendState
* pState
)
2042 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2043 struct anv_dynamic_cb_state
*state
;
2045 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
2047 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2048 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2050 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2052 struct GEN8_COLOR_CALC_STATE color_calc_state
= {
2053 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
2054 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
2055 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
2056 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
2059 GEN8_COLOR_CALC_STATE_pack(NULL
, state
->state_color_calc
, &color_calc_state
);
2061 *pState
= anv_dynamic_cb_state_to_handle(state
);
2066 VkResult
anv_DestroyDynamicColorBlendState(
2068 VkDynamicColorBlendState _cb_state
)
2070 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2071 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
2073 anv_device_free(device
, cb_state
);
2078 VkResult
anv_CreateDynamicDepthStencilState(
2080 const VkDynamicDepthStencilStateCreateInfo
* pCreateInfo
,
2081 VkDynamicDepthStencilState
* pState
)
2083 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2084 struct anv_dynamic_ds_state
*state
;
2086 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO
);
2088 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2089 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2091 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2093 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
2094 GEN8_3DSTATE_WM_DEPTH_STENCIL_header
,
2096 /* Is this what we need to do? */
2097 .StencilBufferWriteEnable
= pCreateInfo
->stencilWriteMask
!= 0,
2099 .StencilTestMask
= pCreateInfo
->stencilReadMask
& 0xff,
2100 .StencilWriteMask
= pCreateInfo
->stencilWriteMask
& 0xff,
2102 .BackfaceStencilTestMask
= pCreateInfo
->stencilReadMask
& 0xff,
2103 .BackfaceStencilWriteMask
= pCreateInfo
->stencilWriteMask
& 0xff,
2106 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, state
->state_wm_depth_stencil
,
2109 struct GEN8_COLOR_CALC_STATE color_calc_state
= {
2110 .StencilReferenceValue
= pCreateInfo
->stencilFrontRef
,
2111 .BackFaceStencilReferenceValue
= pCreateInfo
->stencilBackRef
2114 GEN8_COLOR_CALC_STATE_pack(NULL
, state
->state_color_calc
, &color_calc_state
);
2116 *pState
= anv_dynamic_ds_state_to_handle(state
);
2121 VkResult
anv_DestroyDynamicDepthStencilState(
2123 VkDynamicDepthStencilState _ds_state
)
2125 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2126 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
2128 anv_device_free(device
, ds_state
);
2133 VkResult
anv_CreateFramebuffer(
2135 const VkFramebufferCreateInfo
* pCreateInfo
,
2136 VkFramebuffer
* pFramebuffer
)
2138 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2139 struct anv_framebuffer
*framebuffer
;
2141 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2143 size_t size
= sizeof(*framebuffer
) +
2144 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
2145 framebuffer
= anv_device_alloc(device
, size
, 8,
2146 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2147 if (framebuffer
== NULL
)
2148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2150 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
2151 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2152 ANV_FROM_HANDLE(anv_attachment_view
, view
,
2153 pCreateInfo
->pAttachments
[i
].view
);
2155 framebuffer
->attachments
[i
] = view
;
2158 framebuffer
->width
= pCreateInfo
->width
;
2159 framebuffer
->height
= pCreateInfo
->height
;
2160 framebuffer
->layers
= pCreateInfo
->layers
;
2162 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
2163 &(VkDynamicViewportStateCreateInfo
) {
2164 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
2165 .viewportAndScissorCount
= 1,
2166 .pViewports
= (VkViewport
[]) {
2170 .width
= pCreateInfo
->width
,
2171 .height
= pCreateInfo
->height
,
2176 .pScissors
= (VkRect2D
[]) {
2178 { pCreateInfo
->width
, pCreateInfo
->height
} },
2181 &framebuffer
->vp_state
);
2183 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2188 VkResult
anv_DestroyFramebuffer(
2192 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2193 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2195 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2197 anv_device_free(device
, fb
);
2202 VkResult
anv_CreateRenderPass(
2204 const VkRenderPassCreateInfo
* pCreateInfo
,
2205 VkRenderPass
* pRenderPass
)
2207 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2208 struct anv_render_pass
*pass
;
2211 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2213 size
= sizeof(*pass
) +
2214 pCreateInfo
->subpassCount
* sizeof(pass
->subpasses
[0]);
2215 pass
= anv_device_alloc(device
, size
, 8,
2216 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2218 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2220 /* Clear the subpasses along with the parent pass. This required because
2221 * each array member of anv_subpass must be a valid pointer if not NULL.
2223 memset(pass
, 0, size
);
2225 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2226 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2228 size
= pCreateInfo
->attachmentCount
* sizeof(*pass
->attachments
);
2229 pass
->attachments
= anv_device_alloc(device
, size
, 8,
2230 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2231 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2232 pass
->attachments
[i
].format
=
2233 anv_format_for_vk_format(pCreateInfo
->pAttachments
[i
].format
);
2234 pass
->attachments
[i
].samples
= pCreateInfo
->pAttachments
[i
].samples
;
2235 pass
->attachments
[i
].load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2236 pass
->attachments
[i
].stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2237 // pass->attachments[i].store_op = pCreateInfo->pAttachments[i].storeOp;
2238 // pass->attachments[i].stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2241 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2242 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2243 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2245 subpass
->input_count
= desc
->inputCount
;
2246 subpass
->color_count
= desc
->colorCount
;
2248 if (desc
->inputCount
> 0) {
2249 subpass
->input_attachments
=
2250 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2251 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2253 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2254 subpass
->input_attachments
[j
]
2255 = desc
->inputAttachments
[j
].attachment
;
2259 if (desc
->colorCount
> 0) {
2260 subpass
->color_attachments
=
2261 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2262 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2264 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2265 subpass
->color_attachments
[j
]
2266 = desc
->colorAttachments
[j
].attachment
;
2270 if (desc
->resolveAttachments
) {
2271 subpass
->resolve_attachments
=
2272 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2273 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2275 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2276 subpass
->resolve_attachments
[j
]
2277 = desc
->resolveAttachments
[j
].attachment
;
2281 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2284 *pRenderPass
= anv_render_pass_to_handle(pass
);
2289 VkResult
anv_DestroyRenderPass(
2293 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2294 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2296 anv_device_free(device
, pass
->attachments
);
2298 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2299 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2300 * Don't free the null arrays.
2302 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2304 anv_device_free(device
, subpass
->input_attachments
);
2305 anv_device_free(device
, subpass
->color_attachments
);
2306 anv_device_free(device
, subpass
->resolve_attachments
);
2309 anv_device_free(device
, pass
);
2314 VkResult
anv_GetRenderAreaGranularity(
2316 VkRenderPass renderPass
,
2317 VkExtent2D
* pGranularity
)
2319 *pGranularity
= (VkExtent2D
) { 1, 1 };
2324 void vkCmdDbgMarkerBegin(
2325 VkCmdBuffer cmdBuffer
,
2326 const char* pMarker
)
2327 __attribute__ ((visibility ("default")));
2329 void vkCmdDbgMarkerEnd(
2330 VkCmdBuffer cmdBuffer
)
2331 __attribute__ ((visibility ("default")));
2333 void vkCmdDbgMarkerBegin(
2334 VkCmdBuffer cmdBuffer
,
2335 const char* pMarker
)
2339 void vkCmdDbgMarkerEnd(
2340 VkCmdBuffer cmdBuffer
)