2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
34 anv_env_get_int(const char *name
)
36 const char *val
= getenv(name
);
41 return strtol(val
, NULL
, 0);
45 anv_physical_device_finish(struct anv_physical_device
*device
)
52 anv_physical_device_init(struct anv_physical_device
*device
,
53 struct anv_instance
*instance
,
56 device
->fd
= open(path
, O_RDWR
| O_CLOEXEC
);
58 return vk_error(VK_ERROR_UNAVAILABLE
);
60 device
->instance
= instance
;
63 device
->chipset_id
= anv_env_get_int("INTEL_DEVID_OVERRIDE");
64 device
->no_hw
= false;
65 if (device
->chipset_id
) {
66 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
69 device
->chipset_id
= anv_gem_get_param(device
->fd
, I915_PARAM_CHIPSET_ID
);
71 if (!device
->chipset_id
)
74 device
->name
= brw_get_device_name(device
->chipset_id
);
75 device
->info
= brw_get_device_info(device
->chipset_id
, -1);
79 if (!anv_gem_get_param(device
->fd
, I915_PARAM_HAS_WAIT_TIMEOUT
))
82 if (!anv_gem_get_param(device
->fd
, I915_PARAM_HAS_EXECBUF2
))
85 if (!anv_gem_get_param(device
->fd
, I915_PARAM_HAS_LLC
))
88 if (!anv_gem_get_param(device
->fd
, I915_PARAM_HAS_EXEC_CONSTANTS
))
94 anv_physical_device_finish(device
);
95 return vk_error(VK_ERROR_UNAVAILABLE
);
98 static void *default_alloc(
102 VkSystemAllocType allocType
)
107 static void default_free(
114 static const VkAllocCallbacks default_alloc_callbacks
= {
116 .pfnAlloc
= default_alloc
,
117 .pfnFree
= default_free
120 VkResult
anv_CreateInstance(
121 const VkInstanceCreateInfo
* pCreateInfo
,
122 VkInstance
* pInstance
)
124 struct anv_instance
*instance
;
125 const VkAllocCallbacks
*alloc_callbacks
= &default_alloc_callbacks
;
126 void *user_data
= NULL
;
128 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
);
130 if (pCreateInfo
->pAllocCb
) {
131 alloc_callbacks
= pCreateInfo
->pAllocCb
;
132 user_data
= pCreateInfo
->pAllocCb
->pUserData
;
134 instance
= alloc_callbacks
->pfnAlloc(user_data
, sizeof(*instance
), 8,
135 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
137 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
139 instance
->pAllocUserData
= alloc_callbacks
->pUserData
;
140 instance
->pfnAlloc
= alloc_callbacks
->pfnAlloc
;
141 instance
->pfnFree
= alloc_callbacks
->pfnFree
;
142 instance
->apiVersion
= pCreateInfo
->pAppInfo
->apiVersion
;
143 instance
->physicalDeviceCount
= 0;
145 *pInstance
= anv_instance_to_handle(instance
);
150 VkResult
anv_DestroyInstance(
151 VkInstance _instance
)
153 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
155 if (instance
->physicalDeviceCount
> 0) {
156 anv_physical_device_finish(&instance
->physicalDevice
);
159 instance
->pfnFree(instance
->pAllocUserData
, instance
);
164 VkResult
anv_EnumeratePhysicalDevices(
165 VkInstance _instance
,
166 uint32_t* pPhysicalDeviceCount
,
167 VkPhysicalDevice
* pPhysicalDevices
)
169 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
172 if (instance
->physicalDeviceCount
== 0) {
173 result
= anv_physical_device_init(&instance
->physicalDevice
,
174 instance
, "/dev/dri/renderD128");
175 if (result
!= VK_SUCCESS
)
178 instance
->physicalDeviceCount
= 1;
181 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
182 * otherwise it's an inout parameter.
184 * The Vulkan spec (git aaed022) says:
186 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
187 * that is initialized with the number of devices the application is
188 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
189 * an array of at least this many VkPhysicalDevice handles [...].
191 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
192 * overwrites the contents of the variable pointed to by
193 * pPhysicalDeviceCount with the number of physical devices in in the
194 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
195 * pPhysicalDeviceCount with the number of physical handles written to
198 if (!pPhysicalDevices
) {
199 *pPhysicalDeviceCount
= instance
->physicalDeviceCount
;
200 } else if (*pPhysicalDeviceCount
>= 1) {
201 pPhysicalDevices
[0] = anv_physical_device_to_handle(&instance
->physicalDevice
);
202 *pPhysicalDeviceCount
= 1;
204 *pPhysicalDeviceCount
= 0;
210 VkResult
anv_GetPhysicalDeviceFeatures(
211 VkPhysicalDevice physicalDevice
,
212 VkPhysicalDeviceFeatures
* pFeatures
)
214 anv_finishme("Get correct values for PhysicalDeviceFeatures");
216 *pFeatures
= (VkPhysicalDeviceFeatures
) {
217 .robustBufferAccess
= false,
218 .fullDrawIndexUint32
= false,
219 .imageCubeArray
= false,
220 .independentBlend
= false,
221 .geometryShader
= true,
222 .tessellationShader
= false,
223 .sampleRateShading
= false,
224 .dualSourceBlend
= true,
226 .instancedDrawIndirect
= true,
228 .depthBiasClamp
= false,
229 .fillModeNonSolid
= true,
230 .depthBounds
= false,
233 .textureCompressionETC2
= true,
234 .textureCompressionASTC_LDR
= true,
235 .textureCompressionBC
= true,
236 .pipelineStatisticsQuery
= true,
237 .vertexSideEffects
= false,
238 .tessellationSideEffects
= false,
239 .geometrySideEffects
= false,
240 .fragmentSideEffects
= false,
241 .shaderTessellationPointSize
= false,
242 .shaderGeometryPointSize
= true,
243 .shaderTextureGatherExtended
= true,
244 .shaderStorageImageExtendedFormats
= false,
245 .shaderStorageImageMultisample
= false,
246 .shaderStorageBufferArrayConstantIndexing
= false,
247 .shaderStorageImageArrayConstantIndexing
= false,
248 .shaderUniformBufferArrayDynamicIndexing
= true,
249 .shaderSampledImageArrayDynamicIndexing
= false,
250 .shaderStorageBufferArrayDynamicIndexing
= false,
251 .shaderStorageImageArrayDynamicIndexing
= false,
252 .shaderClipDistance
= false,
253 .shaderCullDistance
= false,
254 .shaderFloat64
= false,
255 .shaderInt64
= false,
256 .shaderFloat16
= false,
257 .shaderInt16
= false,
263 VkResult
anv_GetPhysicalDeviceLimits(
264 VkPhysicalDevice physicalDevice
,
265 VkPhysicalDeviceLimits
* pLimits
)
267 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
268 const struct brw_device_info
*devinfo
= physical_device
->info
;
270 anv_finishme("Get correct values for PhysicalDeviceLimits");
272 *pLimits
= (VkPhysicalDeviceLimits
) {
273 .maxImageDimension1D
= (1 << 14),
274 .maxImageDimension2D
= (1 << 14),
275 .maxImageDimension3D
= (1 << 10),
276 .maxImageDimensionCube
= (1 << 14),
277 .maxImageArrayLayers
= (1 << 10),
278 .maxTexelBufferSize
= (1 << 14),
279 .maxUniformBufferSize
= UINT32_MAX
,
280 .maxStorageBufferSize
= UINT32_MAX
,
281 .maxPushConstantsSize
= 128,
282 .maxMemoryAllocationCount
= UINT32_MAX
,
283 .bufferImageGranularity
= 64, /* A cache line */
284 .maxBoundDescriptorSets
= MAX_SETS
,
285 .maxDescriptorSets
= UINT32_MAX
,
286 .maxPerStageDescriptorSamplers
= 64,
287 .maxPerStageDescriptorUniformBuffers
= 64,
288 .maxPerStageDescriptorStorageBuffers
= 64,
289 .maxPerStageDescriptorSampledImages
= 64,
290 .maxPerStageDescriptorStorageImages
= 64,
291 .maxDescriptorSetSamplers
= 256,
292 .maxDescriptorSetUniformBuffers
= 256,
293 .maxDescriptorSetStorageBuffers
= 256,
294 .maxDescriptorSetSampledImages
= 256,
295 .maxDescriptorSetStorageImages
= 256,
296 .maxVertexInputAttributes
= 32,
297 .maxVertexInputAttributeOffset
= 256,
298 .maxVertexInputBindingStride
= 256,
299 .maxVertexOutputComponents
= 32,
300 .maxTessGenLevel
= 0,
301 .maxTessPatchSize
= 0,
302 .maxTessControlPerVertexInputComponents
= 0,
303 .maxTessControlPerVertexOutputComponents
= 0,
304 .maxTessControlPerPatchOutputComponents
= 0,
305 .maxTessControlTotalOutputComponents
= 0,
306 .maxTessEvaluationInputComponents
= 0,
307 .maxTessEvaluationOutputComponents
= 0,
308 .maxGeometryShaderInvocations
= 6,
309 .maxGeometryInputComponents
= 16,
310 .maxGeometryOutputComponents
= 16,
311 .maxGeometryOutputVertices
= 16,
312 .maxGeometryTotalOutputComponents
= 16,
313 .maxFragmentInputComponents
= 16,
314 .maxFragmentOutputBuffers
= 8,
315 .maxFragmentDualSourceBuffers
= 2,
316 .maxFragmentCombinedOutputResources
= 8,
317 .maxComputeSharedMemorySize
= 1024,
318 .maxComputeWorkGroupCount
= {
319 16 * devinfo
->max_cs_threads
,
320 16 * devinfo
->max_cs_threads
,
321 16 * devinfo
->max_cs_threads
,
323 .maxComputeWorkGroupInvocations
= 16 * devinfo
->max_cs_threads
,
324 .maxComputeWorkGroupSize
= {
325 16 * devinfo
->max_cs_threads
,
326 16 * devinfo
->max_cs_threads
,
327 16 * devinfo
->max_cs_threads
,
329 .subPixelPrecisionBits
= 4 /* FIXME */,
330 .subTexelPrecisionBits
= 4 /* FIXME */,
331 .mipmapPrecisionBits
= 4 /* FIXME */,
332 .maxDrawIndexedIndexValue
= UINT32_MAX
,
333 .maxDrawIndirectInstanceCount
= UINT32_MAX
,
334 .primitiveRestartForPatches
= UINT32_MAX
,
335 .maxSamplerLodBias
= 16,
336 .maxSamplerAnisotropy
= 16,
338 .maxDynamicViewportStates
= UINT32_MAX
,
339 .maxViewportDimensions
= { (1 << 14), (1 << 14) },
340 .viewportBoundsRange
= { -1.0, 1.0 }, /* FIXME */
341 .viewportSubPixelBits
= 13, /* We take a float? */
342 .minMemoryMapAlignment
= 64, /* A cache line */
343 .minTexelBufferOffsetAlignment
= 1,
344 .minUniformBufferOffsetAlignment
= 1,
345 .minStorageBufferOffsetAlignment
= 1,
346 .minTexelOffset
= 0, /* FIXME */
347 .maxTexelOffset
= 0, /* FIXME */
348 .minTexelGatherOffset
= 0, /* FIXME */
349 .maxTexelGatherOffset
= 0, /* FIXME */
350 .minInterpolationOffset
= 0, /* FIXME */
351 .maxInterpolationOffset
= 0, /* FIXME */
352 .subPixelInterpolationOffsetBits
= 0, /* FIXME */
353 .maxFramebufferWidth
= (1 << 14),
354 .maxFramebufferHeight
= (1 << 14),
355 .maxFramebufferLayers
= (1 << 10),
356 .maxFramebufferColorSamples
= 8,
357 .maxFramebufferDepthSamples
= 8,
358 .maxFramebufferStencilSamples
= 8,
359 .maxColorAttachments
= MAX_RTS
,
360 .maxSampledImageColorSamples
= 8,
361 .maxSampledImageDepthSamples
= 8,
362 .maxSampledImageIntegerSamples
= 1,
363 .maxStorageImageSamples
= 1,
364 .maxSampleMaskWords
= 1,
365 .timestampFrequency
= 1000 * 1000 * 1000 / 80,
366 .maxClipDistances
= 0 /* FIXME */,
367 .maxCullDistances
= 0 /* FIXME */,
368 .maxCombinedClipAndCullDistances
= 0 /* FIXME */,
369 .pointSizeRange
= { 0.125, 255.875 },
370 .lineWidthRange
= { 0.0, 7.9921875 },
371 .pointSizeGranularity
= (1.0 / 8.0),
372 .lineWidthGranularity
= (1.0 / 128.0),
378 VkResult
anv_GetPhysicalDeviceProperties(
379 VkPhysicalDevice physicalDevice
,
380 VkPhysicalDeviceProperties
* pProperties
)
382 ANV_FROM_HANDLE(anv_physical_device
, pdevice
, physicalDevice
);
384 *pProperties
= (VkPhysicalDeviceProperties
) {
385 .apiVersion
= VK_MAKE_VERSION(0, 138, 1),
388 .deviceId
= pdevice
->chipset_id
,
389 .deviceType
= VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
,
392 strcpy(pProperties
->deviceName
, pdevice
->name
);
393 snprintf((char *)pProperties
->pipelineCacheUUID
, VK_UUID_LENGTH
,
394 "anv-%s", MESA_GIT_SHA1
+ 4);
399 VkResult
anv_GetPhysicalDeviceQueueCount(
400 VkPhysicalDevice physicalDevice
,
408 VkResult
anv_GetPhysicalDeviceQueueProperties(
409 VkPhysicalDevice physicalDevice
,
411 VkPhysicalDeviceQueueProperties
* pQueueProperties
)
415 *pQueueProperties
= (VkPhysicalDeviceQueueProperties
) {
416 .queueFlags
= VK_QUEUE_GRAPHICS_BIT
|
417 VK_QUEUE_COMPUTE_BIT
|
420 .supportsTimestamps
= true,
426 VkResult
anv_GetPhysicalDeviceMemoryProperties(
427 VkPhysicalDevice physicalDevice
,
428 VkPhysicalDeviceMemoryProperties
* pMemoryProperties
)
430 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
432 size_t aperture_size
;
435 if (anv_gem_get_aperture(physical_device
, &aperture_size
) == -1)
436 return vk_error(VK_ERROR_UNAVAILABLE
);
438 /* Reserve some wiggle room for the driver by exposing only 75% of the
439 * aperture to the heap.
441 heap_size
= 3 * aperture_size
/ 4;
443 /* The property flags below are valid only for llc platforms. */
444 pMemoryProperties
->memoryTypeCount
= 1;
445 pMemoryProperties
->memoryTypes
[0] = (VkMemoryType
) {
446 .propertyFlags
= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
,
450 pMemoryProperties
->memoryHeapCount
= 1;
451 pMemoryProperties
->memoryHeaps
[0] = (VkMemoryHeap
) {
453 .flags
= VK_MEMORY_HEAP_HOST_LOCAL
,
459 PFN_vkVoidFunction
anv_GetInstanceProcAddr(
463 return anv_lookup_entrypoint(pName
);
466 PFN_vkVoidFunction
anv_GetDeviceProcAddr(
470 return anv_lookup_entrypoint(pName
);
474 parse_debug_flags(struct anv_device
*device
)
476 const char *debug
, *p
, *end
;
478 debug
= getenv("INTEL_DEBUG");
479 device
->dump_aub
= false;
481 for (p
= debug
; *p
; p
= end
+ 1) {
482 end
= strchrnul(p
, ',');
483 if (end
- p
== 3 && memcmp(p
, "aub", 3) == 0)
484 device
->dump_aub
= true;
485 if (end
- p
== 5 && memcmp(p
, "no_hw", 5) == 0)
486 device
->no_hw
= true;
494 anv_queue_init(struct anv_device
*device
, struct anv_queue
*queue
)
496 queue
->device
= device
;
497 queue
->pool
= &device
->surface_state_pool
;
499 queue
->completed_serial
= anv_state_pool_alloc(queue
->pool
, 4, 4);
500 if (queue
->completed_serial
.map
== NULL
)
501 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
503 *(uint32_t *)queue
->completed_serial
.map
= 0;
504 queue
->next_serial
= 1;
510 anv_queue_finish(struct anv_queue
*queue
)
513 /* This gets torn down with the device so we only need to do this if
514 * valgrind is present.
516 anv_state_pool_free(queue
->pool
, queue
->completed_serial
);
521 anv_device_init_border_colors(struct anv_device
*device
)
523 static const VkClearColorValue border_colors
[] = {
524 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 0.0 } },
525 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
] = { .f32
= { 0.0, 0.0, 0.0, 1.0 } },
526 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
] = { .f32
= { 1.0, 1.0, 1.0, 1.0 } },
527 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
] = { .u32
= { 0, 0, 0, 0 } },
528 [VK_BORDER_COLOR_INT_OPAQUE_BLACK
] = { .u32
= { 0, 0, 0, 1 } },
529 [VK_BORDER_COLOR_INT_OPAQUE_WHITE
] = { .u32
= { 1, 1, 1, 1 } },
532 device
->border_colors
=
533 anv_state_pool_alloc(&device
->dynamic_state_pool
,
534 sizeof(border_colors
), 32);
535 memcpy(device
->border_colors
.map
, border_colors
, sizeof(border_colors
));
538 VkResult
anv_CreateDevice(
539 VkPhysicalDevice physicalDevice
,
540 const VkDeviceCreateInfo
* pCreateInfo
,
543 ANV_FROM_HANDLE(anv_physical_device
, physical_device
, physicalDevice
);
544 struct anv_instance
*instance
= physical_device
->instance
;
545 struct anv_device
*device
;
547 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
);
549 device
= instance
->pfnAlloc(instance
->pAllocUserData
,
551 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
553 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
555 device
->no_hw
= physical_device
->no_hw
;
556 parse_debug_flags(device
);
558 device
->instance
= physical_device
->instance
;
560 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
561 device
->fd
= open(physical_device
->path
, O_RDWR
| O_CLOEXEC
);
562 if (device
->fd
== -1)
565 device
->context_id
= anv_gem_create_context(device
);
566 if (device
->context_id
== -1)
569 anv_bo_pool_init(&device
->batch_bo_pool
, device
, ANV_CMD_BUFFER_BATCH_SIZE
);
571 anv_block_pool_init(&device
->dynamic_state_block_pool
, device
, 2048);
573 anv_state_pool_init(&device
->dynamic_state_pool
,
574 &device
->dynamic_state_block_pool
);
576 anv_block_pool_init(&device
->instruction_block_pool
, device
, 2048);
577 anv_block_pool_init(&device
->surface_state_block_pool
, device
, 2048);
579 anv_state_pool_init(&device
->surface_state_pool
,
580 &device
->surface_state_block_pool
);
582 anv_block_pool_init(&device
->scratch_block_pool
, device
, 0x10000);
584 device
->info
= *physical_device
->info
;
586 device
->compiler
= anv_compiler_create(device
);
587 device
->aub_writer
= NULL
;
589 pthread_mutex_init(&device
->mutex
, NULL
);
591 anv_queue_init(device
, &device
->queue
);
593 anv_device_init_meta(device
);
595 anv_device_init_border_colors(device
);
597 *pDevice
= anv_device_to_handle(device
);
604 anv_device_free(device
, device
);
606 return vk_error(VK_ERROR_UNAVAILABLE
);
609 VkResult
anv_DestroyDevice(
612 ANV_FROM_HANDLE(anv_device
, device
, _device
);
614 anv_compiler_destroy(device
->compiler
);
616 anv_queue_finish(&device
->queue
);
618 anv_device_finish_meta(device
);
621 /* We only need to free these to prevent valgrind errors. The backing
622 * BO will go away in a couple of lines so we don't actually leak.
624 anv_state_pool_free(&device
->dynamic_state_pool
, device
->border_colors
);
627 anv_bo_pool_finish(&device
->batch_bo_pool
);
628 anv_block_pool_finish(&device
->dynamic_state_block_pool
);
629 anv_block_pool_finish(&device
->instruction_block_pool
);
630 anv_block_pool_finish(&device
->surface_state_block_pool
);
631 anv_block_pool_finish(&device
->scratch_block_pool
);
635 if (device
->aub_writer
)
636 anv_aub_writer_destroy(device
->aub_writer
);
638 anv_device_free(device
, device
);
643 static const VkExtensionProperties global_extensions
[] = {
645 .extName
= "VK_WSI_LunarG",
650 VkResult
anv_GetGlobalExtensionProperties(
651 const char* pLayerName
,
653 VkExtensionProperties
* pProperties
)
655 if (pProperties
== NULL
) {
656 *pCount
= ARRAY_SIZE(global_extensions
);
660 assert(*pCount
< ARRAY_SIZE(global_extensions
));
662 *pCount
= ARRAY_SIZE(global_extensions
);
663 memcpy(pProperties
, global_extensions
, sizeof(global_extensions
));
668 VkResult
anv_GetPhysicalDeviceExtensionProperties(
669 VkPhysicalDevice physicalDevice
,
670 const char* pLayerName
,
672 VkExtensionProperties
* pProperties
)
674 if (pProperties
== NULL
) {
679 /* None supported at this time */
680 return vk_error(VK_ERROR_INVALID_EXTENSION
);
683 VkResult
anv_GetGlobalLayerProperties(
685 VkLayerProperties
* pProperties
)
687 if (pProperties
== NULL
) {
692 /* None supported at this time */
693 return vk_error(VK_ERROR_INVALID_LAYER
);
696 VkResult
anv_GetPhysicalDeviceLayerProperties(
697 VkPhysicalDevice physicalDevice
,
699 VkLayerProperties
* pProperties
)
701 if (pProperties
== NULL
) {
706 /* None supported at this time */
707 return vk_error(VK_ERROR_INVALID_LAYER
);
710 VkResult
anv_GetDeviceQueue(
712 uint32_t queueNodeIndex
,
716 ANV_FROM_HANDLE(anv_device
, device
, _device
);
718 assert(queueIndex
== 0);
720 *pQueue
= anv_queue_to_handle(&device
->queue
);
725 VkResult
anv_QueueSubmit(
727 uint32_t cmdBufferCount
,
728 const VkCmdBuffer
* pCmdBuffers
,
731 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
732 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
733 struct anv_device
*device
= queue
->device
;
736 for (uint32_t i
= 0; i
< cmdBufferCount
; i
++) {
737 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCmdBuffers
[i
]);
739 if (device
->dump_aub
)
740 anv_cmd_buffer_dump(cmd_buffer
);
742 if (!device
->no_hw
) {
743 ret
= anv_gem_execbuffer(device
, &cmd_buffer
->execbuf2
.execbuf
);
745 return vk_error(VK_ERROR_UNKNOWN
);
748 ret
= anv_gem_execbuffer(device
, &fence
->execbuf
);
750 return vk_error(VK_ERROR_UNKNOWN
);
753 for (uint32_t i
= 0; i
< cmd_buffer
->execbuf2
.bo_count
; i
++)
754 cmd_buffer
->execbuf2
.bos
[i
]->offset
= cmd_buffer
->execbuf2
.objects
[i
].offset
;
756 *(uint32_t *)queue
->completed_serial
.map
= cmd_buffer
->serial
;
763 VkResult
anv_QueueWaitIdle(
766 ANV_FROM_HANDLE(anv_queue
, queue
, _queue
);
768 return vkDeviceWaitIdle(anv_device_to_handle(queue
->device
));
771 VkResult
anv_DeviceWaitIdle(
774 ANV_FROM_HANDLE(anv_device
, device
, _device
);
775 struct anv_state state
;
776 struct anv_batch batch
;
777 struct drm_i915_gem_execbuffer2 execbuf
;
778 struct drm_i915_gem_exec_object2 exec2_objects
[1];
779 struct anv_bo
*bo
= NULL
;
784 state
= anv_state_pool_alloc(&device
->dynamic_state_pool
, 32, 32);
785 bo
= &device
->dynamic_state_pool
.block_pool
->bo
;
786 batch
.start
= batch
.next
= state
.map
;
787 batch
.end
= state
.map
+ 32;
788 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
789 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
791 exec2_objects
[0].handle
= bo
->gem_handle
;
792 exec2_objects
[0].relocation_count
= 0;
793 exec2_objects
[0].relocs_ptr
= 0;
794 exec2_objects
[0].alignment
= 0;
795 exec2_objects
[0].offset
= bo
->offset
;
796 exec2_objects
[0].flags
= 0;
797 exec2_objects
[0].rsvd1
= 0;
798 exec2_objects
[0].rsvd2
= 0;
800 execbuf
.buffers_ptr
= (uintptr_t) exec2_objects
;
801 execbuf
.buffer_count
= 1;
802 execbuf
.batch_start_offset
= state
.offset
;
803 execbuf
.batch_len
= batch
.next
- state
.map
;
804 execbuf
.cliprects_ptr
= 0;
805 execbuf
.num_cliprects
= 0;
810 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
811 execbuf
.rsvd1
= device
->context_id
;
814 if (!device
->no_hw
) {
815 ret
= anv_gem_execbuffer(device
, &execbuf
);
817 result
= vk_error(VK_ERROR_UNKNOWN
);
822 ret
= anv_gem_wait(device
, bo
->gem_handle
, &timeout
);
824 result
= vk_error(VK_ERROR_UNKNOWN
);
829 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
834 anv_state_pool_free(&device
->dynamic_state_pool
, state
);
840 anv_device_alloc(struct anv_device
* device
,
843 VkSystemAllocType allocType
)
845 return device
->instance
->pfnAlloc(device
->instance
->pAllocUserData
,
852 anv_device_free(struct anv_device
* device
,
858 return device
->instance
->pfnFree(device
->instance
->pAllocUserData
,
863 anv_bo_init_new(struct anv_bo
*bo
, struct anv_device
*device
, uint64_t size
)
865 bo
->gem_handle
= anv_gem_create(device
, size
);
867 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
877 VkResult
anv_AllocMemory(
879 const VkMemoryAllocInfo
* pAllocInfo
,
880 VkDeviceMemory
* pMem
)
882 ANV_FROM_HANDLE(anv_device
, device
, _device
);
883 struct anv_device_memory
*mem
;
886 assert(pAllocInfo
->sType
== VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
);
888 if (pAllocInfo
->memoryTypeIndex
!= 0) {
889 /* We support exactly one memory heap. */
890 return vk_error(VK_ERROR_INVALID_VALUE
);
893 /* FINISHME: Fail if allocation request exceeds heap size. */
895 mem
= anv_device_alloc(device
, sizeof(*mem
), 8,
896 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
898 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
900 result
= anv_bo_init_new(&mem
->bo
, device
, pAllocInfo
->allocationSize
);
901 if (result
!= VK_SUCCESS
)
904 *pMem
= anv_device_memory_to_handle(mem
);
909 anv_device_free(device
, mem
);
914 VkResult
anv_FreeMemory(
918 ANV_FROM_HANDLE(anv_device
, device
, _device
);
919 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
922 anv_gem_munmap(mem
->bo
.map
, mem
->bo
.size
);
924 if (mem
->bo
.gem_handle
!= 0)
925 anv_gem_close(device
, mem
->bo
.gem_handle
);
927 anv_device_free(device
, mem
);
932 VkResult
anv_MapMemory(
937 VkMemoryMapFlags flags
,
940 ANV_FROM_HANDLE(anv_device
, device
, _device
);
941 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
943 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
944 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
945 * at a time is valid. We could just mmap up front and return an offset
946 * pointer here, but that may exhaust virtual memory on 32 bit
949 mem
->map
= anv_gem_mmap(device
, mem
->bo
.gem_handle
, offset
, size
);
950 mem
->map_size
= size
;
957 VkResult
anv_UnmapMemory(
961 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
963 anv_gem_munmap(mem
->map
, mem
->map_size
);
968 VkResult
anv_FlushMappedMemoryRanges(
970 uint32_t memRangeCount
,
971 const VkMappedMemoryRange
* pMemRanges
)
973 /* clflush here for !llc platforms */
978 VkResult
anv_InvalidateMappedMemoryRanges(
980 uint32_t memRangeCount
,
981 const VkMappedMemoryRange
* pMemRanges
)
983 return anv_FlushMappedMemoryRanges(device
, memRangeCount
, pMemRanges
);
986 VkResult
anv_GetBufferMemoryRequirements(
989 VkMemoryRequirements
* pMemoryRequirements
)
991 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
993 /* The Vulkan spec (git aaed022) says:
995 * memoryTypeBits is a bitfield and contains one bit set for every
996 * supported memory type for the resource. The bit `1<<i` is set if and
997 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
998 * structure for the physical device is supported.
1000 * We support exactly one memory type.
1002 pMemoryRequirements
->memoryTypeBits
= 1;
1004 pMemoryRequirements
->size
= buffer
->size
;
1005 pMemoryRequirements
->alignment
= 16;
1010 VkResult
anv_GetImageMemoryRequirements(
1013 VkMemoryRequirements
* pMemoryRequirements
)
1015 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1017 /* The Vulkan spec (git aaed022) says:
1019 * memoryTypeBits is a bitfield and contains one bit set for every
1020 * supported memory type for the resource. The bit `1<<i` is set if and
1021 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1022 * structure for the physical device is supported.
1024 * We support exactly one memory type.
1026 pMemoryRequirements
->memoryTypeBits
= 1;
1028 pMemoryRequirements
->size
= image
->size
;
1029 pMemoryRequirements
->alignment
= image
->alignment
;
1034 VkResult
anv_GetImageSparseMemoryRequirements(
1037 uint32_t* pNumRequirements
,
1038 VkSparseImageMemoryRequirements
* pSparseMemoryRequirements
)
1040 return vk_error(VK_UNSUPPORTED
);
1043 VkResult
anv_GetDeviceMemoryCommitment(
1045 VkDeviceMemory memory
,
1046 VkDeviceSize
* pCommittedMemoryInBytes
)
1048 *pCommittedMemoryInBytes
= 0;
1049 stub_return(VK_SUCCESS
);
1052 VkResult
anv_BindBufferMemory(
1055 VkDeviceMemory _mem
,
1056 VkDeviceSize memOffset
)
1058 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1059 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1061 buffer
->bo
= &mem
->bo
;
1062 buffer
->offset
= memOffset
;
1067 VkResult
anv_BindImageMemory(
1070 VkDeviceMemory _mem
,
1071 VkDeviceSize memOffset
)
1073 ANV_FROM_HANDLE(anv_device_memory
, mem
, _mem
);
1074 ANV_FROM_HANDLE(anv_image
, image
, _image
);
1076 image
->bo
= &mem
->bo
;
1077 image
->offset
= memOffset
;
1082 VkResult
anv_QueueBindSparseBufferMemory(
1085 uint32_t numBindings
,
1086 const VkSparseMemoryBindInfo
* pBindInfo
)
1088 stub_return(VK_UNSUPPORTED
);
1091 VkResult
anv_QueueBindSparseImageOpaqueMemory(
1094 uint32_t numBindings
,
1095 const VkSparseMemoryBindInfo
* pBindInfo
)
1097 stub_return(VK_UNSUPPORTED
);
1100 VkResult
anv_QueueBindSparseImageMemory(
1103 uint32_t numBindings
,
1104 const VkSparseImageMemoryBindInfo
* pBindInfo
)
1106 stub_return(VK_UNSUPPORTED
);
1109 VkResult
anv_CreateFence(
1111 const VkFenceCreateInfo
* pCreateInfo
,
1114 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1115 struct anv_fence
*fence
;
1116 struct anv_batch batch
;
1119 const uint32_t fence_size
= 128;
1121 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
);
1123 fence
= anv_device_alloc(device
, sizeof(*fence
), 8,
1124 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1126 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1128 result
= anv_bo_init_new(&fence
->bo
, device
, fence_size
);
1129 if (result
!= VK_SUCCESS
)
1133 anv_gem_mmap(device
, fence
->bo
.gem_handle
, 0, fence
->bo
.size
);
1134 batch
.next
= batch
.start
= fence
->bo
.map
;
1135 batch
.end
= fence
->bo
.map
+ fence
->bo
.size
;
1136 anv_batch_emit(&batch
, GEN8_MI_BATCH_BUFFER_END
);
1137 anv_batch_emit(&batch
, GEN8_MI_NOOP
);
1139 fence
->exec2_objects
[0].handle
= fence
->bo
.gem_handle
;
1140 fence
->exec2_objects
[0].relocation_count
= 0;
1141 fence
->exec2_objects
[0].relocs_ptr
= 0;
1142 fence
->exec2_objects
[0].alignment
= 0;
1143 fence
->exec2_objects
[0].offset
= fence
->bo
.offset
;
1144 fence
->exec2_objects
[0].flags
= 0;
1145 fence
->exec2_objects
[0].rsvd1
= 0;
1146 fence
->exec2_objects
[0].rsvd2
= 0;
1148 fence
->execbuf
.buffers_ptr
= (uintptr_t) fence
->exec2_objects
;
1149 fence
->execbuf
.buffer_count
= 1;
1150 fence
->execbuf
.batch_start_offset
= 0;
1151 fence
->execbuf
.batch_len
= batch
.next
- fence
->bo
.map
;
1152 fence
->execbuf
.cliprects_ptr
= 0;
1153 fence
->execbuf
.num_cliprects
= 0;
1154 fence
->execbuf
.DR1
= 0;
1155 fence
->execbuf
.DR4
= 0;
1157 fence
->execbuf
.flags
=
1158 I915_EXEC_HANDLE_LUT
| I915_EXEC_NO_RELOC
| I915_EXEC_RENDER
;
1159 fence
->execbuf
.rsvd1
= device
->context_id
;
1160 fence
->execbuf
.rsvd2
= 0;
1162 *pFence
= anv_fence_to_handle(fence
);
1167 anv_device_free(device
, fence
);
1172 VkResult
anv_DestroyFence(
1176 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1177 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1179 anv_gem_munmap(fence
->bo
.map
, fence
->bo
.size
);
1180 anv_gem_close(device
, fence
->bo
.gem_handle
);
1181 anv_device_free(device
, fence
);
1186 VkResult
anv_ResetFences(
1188 uint32_t fenceCount
,
1189 const VkFence
* pFences
)
1191 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1192 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1193 fence
->ready
= false;
1199 VkResult
anv_GetFenceStatus(
1203 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1204 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1211 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1213 fence
->ready
= true;
1217 return VK_NOT_READY
;
1220 VkResult
anv_WaitForFences(
1222 uint32_t fenceCount
,
1223 const VkFence
* pFences
,
1227 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1228 int64_t t
= timeout
;
1231 /* FIXME: handle !waitAll */
1233 for (uint32_t i
= 0; i
< fenceCount
; i
++) {
1234 ANV_FROM_HANDLE(anv_fence
, fence
, pFences
[i
]);
1235 ret
= anv_gem_wait(device
, fence
->bo
.gem_handle
, &t
);
1236 if (ret
== -1 && errno
== ETIME
)
1239 return vk_error(VK_ERROR_UNKNOWN
);
1245 // Queue semaphore functions
1247 VkResult
anv_CreateSemaphore(
1249 const VkSemaphoreCreateInfo
* pCreateInfo
,
1250 VkSemaphore
* pSemaphore
)
1252 stub_return(VK_UNSUPPORTED
);
1255 VkResult
anv_DestroySemaphore(
1257 VkSemaphore semaphore
)
1259 stub_return(VK_UNSUPPORTED
);
1262 VkResult
anv_QueueSignalSemaphore(
1264 VkSemaphore semaphore
)
1266 stub_return(VK_UNSUPPORTED
);
1269 VkResult
anv_QueueWaitSemaphore(
1271 VkSemaphore semaphore
)
1273 stub_return(VK_UNSUPPORTED
);
1278 VkResult
anv_CreateEvent(
1280 const VkEventCreateInfo
* pCreateInfo
,
1283 stub_return(VK_UNSUPPORTED
);
1286 VkResult
anv_DestroyEvent(
1290 stub_return(VK_UNSUPPORTED
);
1293 VkResult
anv_GetEventStatus(
1297 stub_return(VK_UNSUPPORTED
);
1300 VkResult
anv_SetEvent(
1304 stub_return(VK_UNSUPPORTED
);
1307 VkResult
anv_ResetEvent(
1311 stub_return(VK_UNSUPPORTED
);
1316 VkResult
anv_CreateBuffer(
1318 const VkBufferCreateInfo
* pCreateInfo
,
1321 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1322 struct anv_buffer
*buffer
;
1324 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
);
1326 buffer
= anv_device_alloc(device
, sizeof(*buffer
), 8,
1327 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1329 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1331 buffer
->size
= pCreateInfo
->size
;
1335 *pBuffer
= anv_buffer_to_handle(buffer
);
1340 VkResult
anv_DestroyBuffer(
1344 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1345 ANV_FROM_HANDLE(anv_buffer
, buffer
, _buffer
);
1347 anv_device_free(device
, buffer
);
1352 // Buffer view functions
1355 anv_fill_buffer_surface_state(void *state
, VkFormat format
,
1356 uint32_t offset
, uint32_t range
)
1358 const struct anv_format
*info
;
1360 info
= anv_format_for_vk_format(format
);
1361 /* This assumes RGBA float format. */
1362 uint32_t stride
= 4;
1363 uint32_t num_elements
= range
/ stride
;
1365 struct GEN8_RENDER_SURFACE_STATE surface_state
= {
1366 .SurfaceType
= SURFTYPE_BUFFER
,
1367 .SurfaceArray
= false,
1368 .SurfaceFormat
= info
->surface_format
,
1369 .SurfaceVerticalAlignment
= VALIGN4
,
1370 .SurfaceHorizontalAlignment
= HALIGN4
,
1372 .VerticalLineStride
= 0,
1373 .VerticalLineStrideOffset
= 0,
1374 .SamplerL2BypassModeDisable
= true,
1375 .RenderCacheReadWriteMode
= WriteOnlyCache
,
1376 .MemoryObjectControlState
= GEN8_MOCS
,
1377 .BaseMipLevel
= 0.0,
1379 .Height
= (num_elements
>> 7) & 0x3fff,
1380 .Width
= num_elements
& 0x7f,
1381 .Depth
= (num_elements
>> 21) & 0x3f,
1382 .SurfacePitch
= stride
- 1,
1383 .MinimumArrayElement
= 0,
1384 .NumberofMultisamples
= MULTISAMPLECOUNT_1
,
1389 .AuxiliarySurfaceMode
= AUX_NONE
,
1391 .GreenClearColor
= 0,
1392 .BlueClearColor
= 0,
1393 .AlphaClearColor
= 0,
1394 .ShaderChannelSelectRed
= SCS_RED
,
1395 .ShaderChannelSelectGreen
= SCS_GREEN
,
1396 .ShaderChannelSelectBlue
= SCS_BLUE
,
1397 .ShaderChannelSelectAlpha
= SCS_ALPHA
,
1398 .ResourceMinLOD
= 0.0,
1399 /* FIXME: We assume that the image must be bound at this time. */
1400 .SurfaceBaseAddress
= { NULL
, offset
},
1403 GEN8_RENDER_SURFACE_STATE_pack(NULL
, state
, &surface_state
);
1406 VkResult
anv_CreateBufferView(
1408 const VkBufferViewCreateInfo
* pCreateInfo
,
1409 VkBufferView
* pView
)
1411 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1412 ANV_FROM_HANDLE(anv_buffer
, buffer
, pCreateInfo
->buffer
);
1413 struct anv_buffer_view
*bview
;
1414 struct anv_surface_view
*view
;
1416 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
);
1418 bview
= anv_device_alloc(device
, sizeof(*view
), 8,
1419 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1421 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1423 view
= &bview
->view
;
1424 view
->bo
= buffer
->bo
;
1425 view
->offset
= buffer
->offset
+ pCreateInfo
->offset
;
1426 view
->surface_state
=
1427 anv_state_pool_alloc(&device
->surface_state_pool
, 64, 64);
1428 view
->format
= pCreateInfo
->format
;
1429 view
->range
= pCreateInfo
->range
;
1431 anv_fill_buffer_surface_state(view
->surface_state
.map
,
1432 pCreateInfo
->format
,
1433 view
->offset
, pCreateInfo
->range
);
1435 *pView
= anv_buffer_view_to_handle(bview
);
1440 VkResult
anv_DestroyBufferView(
1442 VkBufferView _bview
)
1444 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1445 ANV_FROM_HANDLE(anv_buffer_view
, bview
, _bview
);
1447 anv_surface_view_fini(device
, &bview
->view
);
1448 anv_device_free(device
, bview
);
1453 // Sampler functions
1455 VkResult
anv_CreateSampler(
1457 const VkSamplerCreateInfo
* pCreateInfo
,
1458 VkSampler
* pSampler
)
1460 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1461 struct anv_sampler
*sampler
;
1462 uint32_t mag_filter
, min_filter
, max_anisotropy
;
1464 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
);
1466 sampler
= anv_device_alloc(device
, sizeof(*sampler
), 8,
1467 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1469 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1471 static const uint32_t vk_to_gen_tex_filter
[] = {
1472 [VK_TEX_FILTER_NEAREST
] = MAPFILTER_NEAREST
,
1473 [VK_TEX_FILTER_LINEAR
] = MAPFILTER_LINEAR
1476 static const uint32_t vk_to_gen_mipmap_mode
[] = {
1477 [VK_TEX_MIPMAP_MODE_BASE
] = MIPFILTER_NONE
,
1478 [VK_TEX_MIPMAP_MODE_NEAREST
] = MIPFILTER_NEAREST
,
1479 [VK_TEX_MIPMAP_MODE_LINEAR
] = MIPFILTER_LINEAR
1482 static const uint32_t vk_to_gen_tex_address
[] = {
1483 [VK_TEX_ADDRESS_WRAP
] = TCM_WRAP
,
1484 [VK_TEX_ADDRESS_MIRROR
] = TCM_MIRROR
,
1485 [VK_TEX_ADDRESS_CLAMP
] = TCM_CLAMP
,
1486 [VK_TEX_ADDRESS_MIRROR_ONCE
] = TCM_MIRROR_ONCE
,
1487 [VK_TEX_ADDRESS_CLAMP_BORDER
] = TCM_CLAMP_BORDER
,
1490 static const uint32_t vk_to_gen_compare_op
[] = {
1491 [VK_COMPARE_OP_NEVER
] = PREFILTEROPNEVER
,
1492 [VK_COMPARE_OP_LESS
] = PREFILTEROPLESS
,
1493 [VK_COMPARE_OP_EQUAL
] = PREFILTEROPEQUAL
,
1494 [VK_COMPARE_OP_LESS_EQUAL
] = PREFILTEROPLEQUAL
,
1495 [VK_COMPARE_OP_GREATER
] = PREFILTEROPGREATER
,
1496 [VK_COMPARE_OP_NOT_EQUAL
] = PREFILTEROPNOTEQUAL
,
1497 [VK_COMPARE_OP_GREATER_EQUAL
] = PREFILTEROPGEQUAL
,
1498 [VK_COMPARE_OP_ALWAYS
] = PREFILTEROPALWAYS
,
1501 if (pCreateInfo
->maxAnisotropy
> 1) {
1502 mag_filter
= MAPFILTER_ANISOTROPIC
;
1503 min_filter
= MAPFILTER_ANISOTROPIC
;
1504 max_anisotropy
= (pCreateInfo
->maxAnisotropy
- 2) / 2;
1506 mag_filter
= vk_to_gen_tex_filter
[pCreateInfo
->magFilter
];
1507 min_filter
= vk_to_gen_tex_filter
[pCreateInfo
->minFilter
];
1508 max_anisotropy
= RATIO21
;
1511 struct GEN8_SAMPLER_STATE sampler_state
= {
1512 .SamplerDisable
= false,
1513 .TextureBorderColorMode
= DX10OGL
,
1514 .LODPreClampMode
= 0,
1515 .BaseMipLevel
= 0.0,
1516 .MipModeFilter
= vk_to_gen_mipmap_mode
[pCreateInfo
->mipMode
],
1517 .MagModeFilter
= mag_filter
,
1518 .MinModeFilter
= min_filter
,
1519 .TextureLODBias
= pCreateInfo
->mipLodBias
* 256,
1520 .AnisotropicAlgorithm
= EWAApproximation
,
1521 .MinLOD
= pCreateInfo
->minLod
,
1522 .MaxLOD
= pCreateInfo
->maxLod
,
1523 .ChromaKeyEnable
= 0,
1524 .ChromaKeyIndex
= 0,
1526 .ShadowFunction
= vk_to_gen_compare_op
[pCreateInfo
->compareOp
],
1527 .CubeSurfaceControlMode
= 0,
1529 .IndirectStatePointer
=
1530 device
->border_colors
.offset
+
1531 pCreateInfo
->borderColor
* sizeof(float) * 4,
1533 .LODClampMagnificationMode
= MIPNONE
,
1534 .MaximumAnisotropy
= max_anisotropy
,
1535 .RAddressMinFilterRoundingEnable
= 0,
1536 .RAddressMagFilterRoundingEnable
= 0,
1537 .VAddressMinFilterRoundingEnable
= 0,
1538 .VAddressMagFilterRoundingEnable
= 0,
1539 .UAddressMinFilterRoundingEnable
= 0,
1540 .UAddressMagFilterRoundingEnable
= 0,
1541 .TrilinearFilterQuality
= 0,
1542 .NonnormalizedCoordinateEnable
= 0,
1543 .TCXAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressU
],
1544 .TCYAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressV
],
1545 .TCZAddressControlMode
= vk_to_gen_tex_address
[pCreateInfo
->addressW
],
1548 GEN8_SAMPLER_STATE_pack(NULL
, sampler
->state
, &sampler_state
);
1550 *pSampler
= anv_sampler_to_handle(sampler
);
1555 VkResult
anv_DestroySampler(
1559 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1560 ANV_FROM_HANDLE(anv_sampler
, sampler
, _sampler
);
1562 anv_device_free(device
, sampler
);
1567 // Descriptor set functions
1569 VkResult
anv_CreateDescriptorSetLayout(
1571 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
1572 VkDescriptorSetLayout
* pSetLayout
)
1574 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1575 struct anv_descriptor_set_layout
*set_layout
;
1577 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
1579 uint32_t sampler_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1580 uint32_t surface_count
[VK_SHADER_STAGE_NUM
] = { 0, };
1581 uint32_t num_dynamic_buffers
= 0;
1583 uint32_t stages
= 0;
1586 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1587 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1588 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1589 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1590 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1591 sampler_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1597 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1598 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1599 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1600 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1601 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1602 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1603 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1604 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1605 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1606 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1607 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1608 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1609 surface_count
[s
] += pCreateInfo
->pBinding
[i
].arraySize
;
1615 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1616 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1617 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1618 num_dynamic_buffers
+= pCreateInfo
->pBinding
[i
].arraySize
;
1624 stages
|= pCreateInfo
->pBinding
[i
].stageFlags
;
1625 count
+= pCreateInfo
->pBinding
[i
].arraySize
;
1628 uint32_t sampler_total
= 0;
1629 uint32_t surface_total
= 0;
1630 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1631 sampler_total
+= sampler_count
[s
];
1632 surface_total
+= surface_count
[s
];
1635 size_t size
= sizeof(*set_layout
) +
1636 (sampler_total
+ surface_total
) * sizeof(set_layout
->entries
[0]);
1637 set_layout
= anv_device_alloc(device
, size
, 8,
1638 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1640 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1642 set_layout
->num_dynamic_buffers
= num_dynamic_buffers
;
1643 set_layout
->count
= count
;
1644 set_layout
->shader_stages
= stages
;
1646 struct anv_descriptor_slot
*p
= set_layout
->entries
;
1647 struct anv_descriptor_slot
*sampler
[VK_SHADER_STAGE_NUM
];
1648 struct anv_descriptor_slot
*surface
[VK_SHADER_STAGE_NUM
];
1649 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
1650 set_layout
->stage
[s
].surface_count
= surface_count
[s
];
1651 set_layout
->stage
[s
].surface_start
= surface
[s
] = p
;
1652 p
+= surface_count
[s
];
1653 set_layout
->stage
[s
].sampler_count
= sampler_count
[s
];
1654 set_layout
->stage
[s
].sampler_start
= sampler
[s
] = p
;
1655 p
+= sampler_count
[s
];
1658 uint32_t descriptor
= 0;
1659 int8_t dynamic_slot
= 0;
1661 for (uint32_t i
= 0; i
< pCreateInfo
->count
; i
++) {
1662 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1663 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1664 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1665 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1666 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1667 sampler
[s
]->index
= descriptor
+ j
;
1668 sampler
[s
]->dynamic_slot
= -1;
1676 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1677 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1678 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1686 switch (pCreateInfo
->pBinding
[i
].descriptorType
) {
1687 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1688 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1689 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1690 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1691 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1692 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1693 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1694 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1695 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1696 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1697 for_each_bit(s
, pCreateInfo
->pBinding
[i
].stageFlags
)
1698 for (uint32_t j
= 0; j
< pCreateInfo
->pBinding
[i
].arraySize
; j
++) {
1699 surface
[s
]->index
= descriptor
+ j
;
1701 surface
[s
]->dynamic_slot
= dynamic_slot
+ j
;
1703 surface
[s
]->dynamic_slot
= -1;
1712 dynamic_slot
+= pCreateInfo
->pBinding
[i
].arraySize
;
1714 descriptor
+= pCreateInfo
->pBinding
[i
].arraySize
;
1717 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
1722 VkResult
anv_DestroyDescriptorSetLayout(
1724 VkDescriptorSetLayout _set_layout
)
1726 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1727 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
1729 anv_device_free(device
, set_layout
);
1734 VkResult
anv_CreateDescriptorPool(
1736 VkDescriptorPoolUsage poolUsage
,
1738 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
1739 VkDescriptorPool
* pDescriptorPool
)
1741 anv_finishme("VkDescriptorPool is a stub");
1742 pDescriptorPool
->handle
= 1;
1746 VkResult
anv_DestroyDescriptorPool(
1748 VkDescriptorPool _pool
)
1750 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1754 VkResult
anv_ResetDescriptorPool(
1756 VkDescriptorPool descriptorPool
)
1758 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1763 anv_descriptor_set_create(struct anv_device
*device
,
1764 const struct anv_descriptor_set_layout
*layout
,
1765 struct anv_descriptor_set
**out_set
)
1767 struct anv_descriptor_set
*set
;
1768 size_t size
= sizeof(*set
) + layout
->count
* sizeof(set
->descriptors
[0]);
1770 set
= anv_device_alloc(device
, size
, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1772 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1774 /* A descriptor set may not be 100% filled. Clear the set so we can can
1775 * later detect holes in it.
1777 memset(set
, 0, size
);
1785 anv_descriptor_set_destroy(struct anv_device
*device
,
1786 struct anv_descriptor_set
*set
)
1788 anv_device_free(device
, set
);
1791 VkResult
anv_AllocDescriptorSets(
1793 VkDescriptorPool descriptorPool
,
1794 VkDescriptorSetUsage setUsage
,
1796 const VkDescriptorSetLayout
* pSetLayouts
,
1797 VkDescriptorSet
* pDescriptorSets
,
1800 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1803 struct anv_descriptor_set
*set
;
1805 for (uint32_t i
= 0; i
< count
; i
++) {
1806 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
, pSetLayouts
[i
]);
1808 result
= anv_descriptor_set_create(device
, layout
, &set
);
1809 if (result
!= VK_SUCCESS
) {
1814 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
1822 VkResult
anv_FreeDescriptorSets(
1824 VkDescriptorPool descriptorPool
,
1826 const VkDescriptorSet
* pDescriptorSets
)
1828 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1830 for (uint32_t i
= 0; i
< count
; i
++) {
1831 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
1833 anv_descriptor_set_destroy(device
, set
);
1839 VkResult
anv_UpdateDescriptorSets(
1841 uint32_t writeCount
,
1842 const VkWriteDescriptorSet
* pDescriptorWrites
,
1844 const VkCopyDescriptorSet
* pDescriptorCopies
)
1846 for (uint32_t i
= 0; i
< writeCount
; i
++) {
1847 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1848 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->destSet
);
1850 switch (write
->descriptorType
) {
1851 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1852 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1853 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1854 set
->descriptors
[write
->destBinding
+ j
].sampler
=
1855 anv_sampler_from_handle(write
->pDescriptors
[j
].sampler
);
1858 if (write
->descriptorType
== VK_DESCRIPTOR_TYPE_SAMPLER
)
1863 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1864 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1865 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1866 ANV_FROM_HANDLE(anv_image_view
, iview
,
1867 write
->pDescriptors
[j
].imageView
);
1868 set
->descriptors
[write
->destBinding
+ j
].view
= &iview
->view
;
1872 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1873 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1874 anv_finishme("texel buffers not implemented");
1877 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1878 anv_finishme("input attachments not implemented");
1881 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1882 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1883 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1884 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1885 for (uint32_t j
= 0; j
< write
->count
; j
++) {
1886 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1887 write
->pDescriptors
[j
].bufferView
);
1888 set
->descriptors
[write
->destBinding
+ j
].view
= &bview
->view
;
1896 for (uint32_t i
= 0; i
< copyCount
; i
++) {
1897 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
1898 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->destSet
);
1899 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->destSet
);
1900 for (uint32_t j
= 0; j
< copy
->count
; j
++) {
1901 dest
->descriptors
[copy
->destBinding
+ j
] =
1902 src
->descriptors
[copy
->srcBinding
+ j
];
1909 // State object functions
1911 static inline int64_t
1912 clamp_int64(int64_t x
, int64_t min
, int64_t max
)
1922 VkResult
anv_CreateDynamicViewportState(
1924 const VkDynamicViewportStateCreateInfo
* pCreateInfo
,
1925 VkDynamicViewportState
* pState
)
1927 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1928 struct anv_dynamic_vp_state
*state
;
1930 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
);
1932 state
= anv_device_alloc(device
, sizeof(*state
), 8,
1933 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
1935 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1937 unsigned count
= pCreateInfo
->viewportAndScissorCount
;
1938 state
->sf_clip_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1940 state
->cc_vp
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1942 state
->scissor
= anv_state_pool_alloc(&device
->dynamic_state_pool
,
1945 for (uint32_t i
= 0; i
< pCreateInfo
->viewportAndScissorCount
; i
++) {
1946 const VkViewport
*vp
= &pCreateInfo
->pViewports
[i
];
1947 const VkRect2D
*s
= &pCreateInfo
->pScissors
[i
];
1949 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport
= {
1950 .ViewportMatrixElementm00
= vp
->width
/ 2,
1951 .ViewportMatrixElementm11
= vp
->height
/ 2,
1952 .ViewportMatrixElementm22
= (vp
->maxDepth
- vp
->minDepth
) / 2,
1953 .ViewportMatrixElementm30
= vp
->originX
+ vp
->width
/ 2,
1954 .ViewportMatrixElementm31
= vp
->originY
+ vp
->height
/ 2,
1955 .ViewportMatrixElementm32
= (vp
->maxDepth
+ vp
->minDepth
) / 2,
1956 .XMinClipGuardband
= -1.0f
,
1957 .XMaxClipGuardband
= 1.0f
,
1958 .YMinClipGuardband
= -1.0f
,
1959 .YMaxClipGuardband
= 1.0f
,
1960 .XMinViewPort
= vp
->originX
,
1961 .XMaxViewPort
= vp
->originX
+ vp
->width
- 1,
1962 .YMinViewPort
= vp
->originY
,
1963 .YMaxViewPort
= vp
->originY
+ vp
->height
- 1,
1966 struct GEN8_CC_VIEWPORT cc_viewport
= {
1967 .MinimumDepth
= vp
->minDepth
,
1968 .MaximumDepth
= vp
->maxDepth
1971 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1972 * ymax < ymin for empty clips. In case clip x, y, width height are all
1973 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1974 * what we want. Just special case empty clips and produce a canonical
1976 static const struct GEN8_SCISSOR_RECT empty_scissor
= {
1977 .ScissorRectangleYMin
= 1,
1978 .ScissorRectangleXMin
= 1,
1979 .ScissorRectangleYMax
= 0,
1980 .ScissorRectangleXMax
= 0
1983 const int max
= 0xffff;
1984 struct GEN8_SCISSOR_RECT scissor
= {
1985 /* Do this math using int64_t so overflow gets clamped correctly. */
1986 .ScissorRectangleYMin
= clamp_int64(s
->offset
.y
, 0, max
),
1987 .ScissorRectangleXMin
= clamp_int64(s
->offset
.x
, 0, max
),
1988 .ScissorRectangleYMax
= clamp_int64((uint64_t) s
->offset
.y
+ s
->extent
.height
- 1, 0, max
),
1989 .ScissorRectangleXMax
= clamp_int64((uint64_t) s
->offset
.x
+ s
->extent
.width
- 1, 0, max
)
1992 GEN8_SF_CLIP_VIEWPORT_pack(NULL
, state
->sf_clip_vp
.map
+ i
* 64, &sf_clip_viewport
);
1993 GEN8_CC_VIEWPORT_pack(NULL
, state
->cc_vp
.map
+ i
* 32, &cc_viewport
);
1995 if (s
->extent
.width
<= 0 || s
->extent
.height
<= 0) {
1996 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &empty_scissor
);
1998 GEN8_SCISSOR_RECT_pack(NULL
, state
->scissor
.map
+ i
* 32, &scissor
);
2002 *pState
= anv_dynamic_vp_state_to_handle(state
);
2007 VkResult
anv_DestroyDynamicViewportState(
2009 VkDynamicViewportState _vp_state
)
2011 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2012 ANV_FROM_HANDLE(anv_dynamic_vp_state
, vp_state
, _vp_state
);
2014 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->sf_clip_vp
);
2015 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->cc_vp
);
2016 anv_state_pool_free(&device
->dynamic_state_pool
, vp_state
->scissor
);
2018 anv_device_free(device
, vp_state
);
2023 VkResult
anv_CreateDynamicRasterState(
2025 const VkDynamicRasterStateCreateInfo
* pCreateInfo
,
2026 VkDynamicRasterState
* pState
)
2028 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2029 struct anv_dynamic_rs_state
*state
;
2031 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_RASTER_STATE_CREATE_INFO
);
2033 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2034 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2036 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2038 struct GEN8_3DSTATE_SF sf
= {
2039 GEN8_3DSTATE_SF_header
,
2040 .LineWidth
= pCreateInfo
->lineWidth
,
2043 GEN8_3DSTATE_SF_pack(NULL
, state
->state_sf
, &sf
);
2045 bool enable_bias
= pCreateInfo
->depthBias
!= 0.0f
||
2046 pCreateInfo
->slopeScaledDepthBias
!= 0.0f
;
2047 struct GEN8_3DSTATE_RASTER raster
= {
2048 .GlobalDepthOffsetEnableSolid
= enable_bias
,
2049 .GlobalDepthOffsetEnableWireframe
= enable_bias
,
2050 .GlobalDepthOffsetEnablePoint
= enable_bias
,
2051 .GlobalDepthOffsetConstant
= pCreateInfo
->depthBias
,
2052 .GlobalDepthOffsetScale
= pCreateInfo
->slopeScaledDepthBias
,
2053 .GlobalDepthOffsetClamp
= pCreateInfo
->depthBiasClamp
2056 GEN8_3DSTATE_RASTER_pack(NULL
, state
->state_raster
, &raster
);
2058 *pState
= anv_dynamic_rs_state_to_handle(state
);
2063 VkResult
anv_DestroyDynamicRasterState(
2065 VkDynamicRasterState _rs_state
)
2067 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2068 ANV_FROM_HANDLE(anv_dynamic_rs_state
, rs_state
, _rs_state
);
2070 anv_device_free(device
, rs_state
);
2075 VkResult
anv_CreateDynamicColorBlendState(
2077 const VkDynamicColorBlendStateCreateInfo
* pCreateInfo
,
2078 VkDynamicColorBlendState
* pState
)
2080 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2081 struct anv_dynamic_cb_state
*state
;
2083 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO
);
2085 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2086 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2088 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2090 struct GEN8_COLOR_CALC_STATE color_calc_state
= {
2091 .BlendConstantColorRed
= pCreateInfo
->blendConst
[0],
2092 .BlendConstantColorGreen
= pCreateInfo
->blendConst
[1],
2093 .BlendConstantColorBlue
= pCreateInfo
->blendConst
[2],
2094 .BlendConstantColorAlpha
= pCreateInfo
->blendConst
[3]
2097 GEN8_COLOR_CALC_STATE_pack(NULL
, state
->state_color_calc
, &color_calc_state
);
2099 *pState
= anv_dynamic_cb_state_to_handle(state
);
2104 VkResult
anv_DestroyDynamicColorBlendState(
2106 VkDynamicColorBlendState _cb_state
)
2108 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2109 ANV_FROM_HANDLE(anv_dynamic_cb_state
, cb_state
, _cb_state
);
2111 anv_device_free(device
, cb_state
);
2116 VkResult
anv_CreateDynamicDepthStencilState(
2118 const VkDynamicDepthStencilStateCreateInfo
* pCreateInfo
,
2119 VkDynamicDepthStencilState
* pState
)
2121 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2122 struct anv_dynamic_ds_state
*state
;
2124 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DYNAMIC_DEPTH_STENCIL_STATE_CREATE_INFO
);
2126 state
= anv_device_alloc(device
, sizeof(*state
), 8,
2127 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2129 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2131 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
2132 GEN8_3DSTATE_WM_DEPTH_STENCIL_header
,
2134 /* Is this what we need to do? */
2135 .StencilBufferWriteEnable
= pCreateInfo
->stencilWriteMask
!= 0,
2137 .StencilTestMask
= pCreateInfo
->stencilReadMask
& 0xff,
2138 .StencilWriteMask
= pCreateInfo
->stencilWriteMask
& 0xff,
2140 .BackfaceStencilTestMask
= pCreateInfo
->stencilReadMask
& 0xff,
2141 .BackfaceStencilWriteMask
= pCreateInfo
->stencilWriteMask
& 0xff,
2144 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, state
->state_wm_depth_stencil
,
2147 struct GEN8_COLOR_CALC_STATE color_calc_state
= {
2148 .StencilReferenceValue
= pCreateInfo
->stencilFrontRef
,
2149 .BackFaceStencilReferenceValue
= pCreateInfo
->stencilBackRef
2152 GEN8_COLOR_CALC_STATE_pack(NULL
, state
->state_color_calc
, &color_calc_state
);
2154 *pState
= anv_dynamic_ds_state_to_handle(state
);
2159 VkResult
anv_DestroyDynamicDepthStencilState(
2161 VkDynamicDepthStencilState _ds_state
)
2163 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2164 ANV_FROM_HANDLE(anv_dynamic_ds_state
, ds_state
, _ds_state
);
2166 anv_device_free(device
, ds_state
);
2171 // Command buffer functions
2173 VkResult
anv_CreateCommandPool(
2175 const VkCmdPoolCreateInfo
* pCreateInfo
,
2176 VkCmdPool
* pCmdPool
)
2178 pCmdPool
->handle
= 7;
2180 stub_return(VK_SUCCESS
);
2183 VkResult
anv_DestroyCommandPool(
2187 stub_return(VK_SUCCESS
);
2190 VkResult
anv_ResetCommandPool(
2193 VkCmdPoolResetFlags flags
)
2195 stub_return(VK_UNSUPPORTED
);
2198 VkResult
anv_CreateFramebuffer(
2200 const VkFramebufferCreateInfo
* pCreateInfo
,
2201 VkFramebuffer
* pFramebuffer
)
2203 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2204 struct anv_framebuffer
*framebuffer
;
2206 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
);
2208 size_t size
= sizeof(*framebuffer
) +
2209 sizeof(struct anv_attachment_view
*) * pCreateInfo
->attachmentCount
;
2210 framebuffer
= anv_device_alloc(device
, size
, 8,
2211 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2212 if (framebuffer
== NULL
)
2213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2215 framebuffer
->attachment_count
= pCreateInfo
->attachmentCount
;
2216 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2217 ANV_FROM_HANDLE(anv_attachment_view
, view
,
2218 pCreateInfo
->pAttachments
[i
].view
);
2220 framebuffer
->attachments
[i
] = view
;
2223 framebuffer
->width
= pCreateInfo
->width
;
2224 framebuffer
->height
= pCreateInfo
->height
;
2225 framebuffer
->layers
= pCreateInfo
->layers
;
2227 anv_CreateDynamicViewportState(anv_device_to_handle(device
),
2228 &(VkDynamicViewportStateCreateInfo
) {
2229 .sType
= VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO
,
2230 .viewportAndScissorCount
= 1,
2231 .pViewports
= (VkViewport
[]) {
2235 .width
= pCreateInfo
->width
,
2236 .height
= pCreateInfo
->height
,
2241 .pScissors
= (VkRect2D
[]) {
2243 { pCreateInfo
->width
, pCreateInfo
->height
} },
2246 &framebuffer
->vp_state
);
2248 *pFramebuffer
= anv_framebuffer_to_handle(framebuffer
);
2253 VkResult
anv_DestroyFramebuffer(
2257 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2258 ANV_FROM_HANDLE(anv_framebuffer
, fb
, _fb
);
2260 anv_DestroyDynamicViewportState(anv_device_to_handle(device
),
2262 anv_device_free(device
, fb
);
2267 VkResult
anv_CreateRenderPass(
2269 const VkRenderPassCreateInfo
* pCreateInfo
,
2270 VkRenderPass
* pRenderPass
)
2272 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2273 struct anv_render_pass
*pass
;
2276 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
);
2278 size
= sizeof(*pass
) +
2279 pCreateInfo
->subpassCount
* sizeof(struct anv_subpass
);
2280 pass
= anv_device_alloc(device
, size
, 8,
2281 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2283 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
2285 /* Clear the subpasses along with the parent pass. This required because
2286 * each array member of anv_subpass must be a valid pointer if not NULL.
2288 memset(pass
, 0, size
);
2290 pass
->attachment_count
= pCreateInfo
->attachmentCount
;
2291 pass
->subpass_count
= pCreateInfo
->subpassCount
;
2293 size
= pCreateInfo
->attachmentCount
* sizeof(*pass
->attachments
);
2294 pass
->attachments
= anv_device_alloc(device
, size
, 8,
2295 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2296 for (uint32_t i
= 0; i
< pCreateInfo
->attachmentCount
; i
++) {
2297 pass
->attachments
[i
].format
= pCreateInfo
->pAttachments
[i
].format
;
2298 pass
->attachments
[i
].samples
= pCreateInfo
->pAttachments
[i
].samples
;
2299 pass
->attachments
[i
].load_op
= pCreateInfo
->pAttachments
[i
].loadOp
;
2300 pass
->attachments
[i
].stencil_load_op
= pCreateInfo
->pAttachments
[i
].stencilLoadOp
;
2301 // pass->attachments[i].store_op = pCreateInfo->pAttachments[i].storeOp;
2302 // pass->attachments[i].stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2305 for (uint32_t i
= 0; i
< pCreateInfo
->subpassCount
; i
++) {
2306 const VkSubpassDescription
*desc
= &pCreateInfo
->pSubpasses
[i
];
2307 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2309 subpass
->input_count
= desc
->inputCount
;
2310 subpass
->color_count
= desc
->colorCount
;
2312 if (desc
->inputCount
> 0) {
2313 subpass
->input_attachments
=
2314 anv_device_alloc(device
, desc
->inputCount
* sizeof(uint32_t),
2315 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2317 for (uint32_t j
= 0; j
< desc
->inputCount
; j
++) {
2318 subpass
->input_attachments
[j
]
2319 = desc
->inputAttachments
[j
].attachment
;
2323 if (desc
->colorCount
> 0) {
2324 subpass
->color_attachments
=
2325 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2326 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2328 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2329 subpass
->color_attachments
[j
]
2330 = desc
->colorAttachments
[j
].attachment
;
2334 if (desc
->resolveAttachments
) {
2335 subpass
->resolve_attachments
=
2336 anv_device_alloc(device
, desc
->colorCount
* sizeof(uint32_t),
2337 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
2339 for (uint32_t j
= 0; j
< desc
->colorCount
; j
++) {
2340 subpass
->resolve_attachments
[j
]
2341 = desc
->resolveAttachments
[j
].attachment
;
2345 subpass
->depth_stencil_attachment
= desc
->depthStencilAttachment
.attachment
;
2348 *pRenderPass
= anv_render_pass_to_handle(pass
);
2353 VkResult
anv_DestroyRenderPass(
2357 ANV_FROM_HANDLE(anv_device
, device
, _device
);
2358 ANV_FROM_HANDLE(anv_render_pass
, pass
, _pass
);
2360 anv_device_free(device
, pass
->attachments
);
2362 for (uint32_t i
= 0; i
< pass
->subpass_count
; i
++) {
2363 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2364 * Don't free the null arrays.
2366 struct anv_subpass
*subpass
= &pass
->subpasses
[i
];
2368 anv_device_free(device
, subpass
->input_attachments
);
2369 anv_device_free(device
, subpass
->color_attachments
);
2370 anv_device_free(device
, subpass
->resolve_attachments
);
2373 anv_device_free(device
, pass
);
2378 VkResult
anv_GetRenderAreaGranularity(
2380 VkRenderPass renderPass
,
2381 VkExtent2D
* pGranularity
)
2383 *pGranularity
= (VkExtent2D
) { 1, 1 };
2388 void vkCmdDbgMarkerBegin(
2389 VkCmdBuffer cmdBuffer
,
2390 const char* pMarker
)
2391 __attribute__ ((visibility ("default")));
2393 void vkCmdDbgMarkerEnd(
2394 VkCmdBuffer cmdBuffer
)
2395 __attribute__ ((visibility ("default")));
2397 void vkCmdDbgMarkerBegin(
2398 VkCmdBuffer cmdBuffer
,
2399 const char* pMarker
)
2403 void vkCmdDbgMarkerEnd(
2404 VkCmdBuffer cmdBuffer
)