vk/0.170.2: Rename fields in VkClearColorValue
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static VkResult
37 anv_physical_device_init(struct anv_physical_device *device,
38 struct anv_instance *instance,
39 const char *path)
40 {
41 VkResult result;
42 int fd;
43
44 fd = open(path, O_RDWR | O_CLOEXEC);
45 if (fd < 0)
46 return vk_errorf(VK_ERROR_UNAVAILABLE, "failed to open %s: %m", path);
47
48 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
49 device->instance = instance;
50 device->path = path;
51
52 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
53 if (!device->chipset_id) {
54 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get chipset id: %m");
55 goto fail;
56 }
57
58 device->name = brw_get_device_name(device->chipset_id);
59 device->info = brw_get_device_info(device->chipset_id, -1);
60 if (!device->info) {
61 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get device info");
62 goto fail;
63 }
64
65 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
66 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get aperture size: %m");
67 goto fail;
68 }
69
70 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
71 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing gem wait");
72 goto fail;
73 }
74
75 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
76 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing execbuf2");
77 goto fail;
78 }
79
80 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
81 result = vk_errorf(VK_ERROR_UNAVAILABLE, "non-llc gpu");
82 goto fail;
83 }
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91 return result;
92 }
93
94 static void *default_alloc(
95 void* pUserData,
96 size_t size,
97 size_t alignment,
98 VkSystemAllocType allocType)
99 {
100 return malloc(size);
101 }
102
103 static void default_free(
104 void* pUserData,
105 void* pMem)
106 {
107 free(pMem);
108 }
109
110 static const VkAllocCallbacks default_alloc_callbacks = {
111 .pUserData = NULL,
112 .pfnAlloc = default_alloc,
113 .pfnFree = default_free
114 };
115
116 static const VkExtensionProperties global_extensions[] = {
117 {
118 .extName = "VK_WSI_swapchain",
119 .specVersion = 12
120 },
121 };
122
123 static const VkExtensionProperties device_extensions[] = {
124 {
125 .extName = "VK_WSI_device_swapchain",
126 .specVersion = 12
127 },
128 };
129
130
131 VkResult anv_CreateInstance(
132 const VkInstanceCreateInfo* pCreateInfo,
133 VkInstance* pInstance)
134 {
135 struct anv_instance *instance;
136 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
137 void *user_data = NULL;
138
139 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
140
141 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
142 bool found = false;
143 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
144 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
145 global_extensions[j].extName) == 0) {
146 found = true;
147 break;
148 }
149 }
150 if (!found)
151 return vk_error(VK_ERROR_INVALID_EXTENSION);
152 }
153
154 if (pCreateInfo->pAllocCb) {
155 alloc_callbacks = pCreateInfo->pAllocCb;
156 user_data = pCreateInfo->pAllocCb->pUserData;
157 }
158 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
159 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
160 if (!instance)
161 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
162
163 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
164 instance->pAllocUserData = alloc_callbacks->pUserData;
165 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
166 instance->pfnFree = alloc_callbacks->pfnFree;
167 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
168 instance->physicalDeviceCount = 0;
169
170 _mesa_locale_init();
171
172 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
173
174 anv_init_wsi(instance);
175
176 *pInstance = anv_instance_to_handle(instance);
177
178 return VK_SUCCESS;
179 }
180
181 void anv_DestroyInstance(
182 VkInstance _instance)
183 {
184 ANV_FROM_HANDLE(anv_instance, instance, _instance);
185
186 anv_finish_wsi(instance);
187
188 VG(VALGRIND_DESTROY_MEMPOOL(instance));
189
190 _mesa_locale_fini();
191
192 instance->pfnFree(instance->pAllocUserData, instance);
193 }
194
195 void *
196 anv_instance_alloc(struct anv_instance *instance, size_t size,
197 size_t alignment, VkSystemAllocType allocType)
198 {
199 void *mem = instance->pfnAlloc(instance->pAllocUserData,
200 size, alignment, allocType);
201 if (mem) {
202 VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
203 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
204 }
205 return mem;
206 }
207
208 void
209 anv_instance_free(struct anv_instance *instance, void *mem)
210 {
211 if (mem == NULL)
212 return;
213
214 VG(VALGRIND_MEMPOOL_FREE(instance, mem));
215
216 instance->pfnFree(instance->pAllocUserData, mem);
217 }
218
219 VkResult anv_EnumeratePhysicalDevices(
220 VkInstance _instance,
221 uint32_t* pPhysicalDeviceCount,
222 VkPhysicalDevice* pPhysicalDevices)
223 {
224 ANV_FROM_HANDLE(anv_instance, instance, _instance);
225 VkResult result;
226
227 if (instance->physicalDeviceCount == 0) {
228 result = anv_physical_device_init(&instance->physicalDevice,
229 instance, "/dev/dri/renderD128");
230 if (result != VK_SUCCESS)
231 return result;
232
233 instance->physicalDeviceCount = 1;
234 }
235
236 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
237 * otherwise it's an inout parameter.
238 *
239 * The Vulkan spec (git aaed022) says:
240 *
241 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
242 * that is initialized with the number of devices the application is
243 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
244 * an array of at least this many VkPhysicalDevice handles [...].
245 *
246 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
247 * overwrites the contents of the variable pointed to by
248 * pPhysicalDeviceCount with the number of physical devices in in the
249 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
250 * pPhysicalDeviceCount with the number of physical handles written to
251 * pPhysicalDevices.
252 */
253 if (!pPhysicalDevices) {
254 *pPhysicalDeviceCount = instance->physicalDeviceCount;
255 } else if (*pPhysicalDeviceCount >= 1) {
256 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
257 *pPhysicalDeviceCount = 1;
258 } else {
259 *pPhysicalDeviceCount = 0;
260 }
261
262 return VK_SUCCESS;
263 }
264
265 VkResult anv_GetPhysicalDeviceFeatures(
266 VkPhysicalDevice physicalDevice,
267 VkPhysicalDeviceFeatures* pFeatures)
268 {
269 anv_finishme("Get correct values for PhysicalDeviceFeatures");
270
271 *pFeatures = (VkPhysicalDeviceFeatures) {
272 .robustBufferAccess = false,
273 .fullDrawIndexUint32 = false,
274 .imageCubeArray = false,
275 .independentBlend = false,
276 .geometryShader = true,
277 .tessellationShader = false,
278 .sampleRateShading = false,
279 .dualSourceBlend = true,
280 .logicOp = true,
281 .instancedDrawIndirect = true,
282 .depthClip = false,
283 .depthBiasClamp = false,
284 .fillModeNonSolid = true,
285 .depthBounds = false,
286 .wideLines = true,
287 .largePoints = true,
288 .textureCompressionETC2 = true,
289 .textureCompressionASTC_LDR = true,
290 .textureCompressionBC = true,
291 .pipelineStatisticsQuery = true,
292 .vertexSideEffects = false,
293 .tessellationSideEffects = false,
294 .geometrySideEffects = false,
295 .fragmentSideEffects = false,
296 .shaderTessellationPointSize = false,
297 .shaderGeometryPointSize = true,
298 .shaderTextureGatherExtended = true,
299 .shaderStorageImageExtendedFormats = false,
300 .shaderStorageImageMultisample = false,
301 .shaderStorageBufferArrayConstantIndexing = false,
302 .shaderStorageImageArrayConstantIndexing = false,
303 .shaderUniformBufferArrayDynamicIndexing = true,
304 .shaderSampledImageArrayDynamicIndexing = false,
305 .shaderStorageBufferArrayDynamicIndexing = false,
306 .shaderStorageImageArrayDynamicIndexing = false,
307 .shaderClipDistance = false,
308 .shaderCullDistance = false,
309 .shaderFloat64 = false,
310 .shaderInt64 = false,
311 .shaderFloat16 = false,
312 .shaderInt16 = false,
313 };
314
315 return VK_SUCCESS;
316 }
317
318 VkResult anv_GetPhysicalDeviceLimits(
319 VkPhysicalDevice physicalDevice,
320 VkPhysicalDeviceLimits* pLimits)
321 {
322 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
323 const struct brw_device_info *devinfo = physical_device->info;
324
325 anv_finishme("Get correct values for PhysicalDeviceLimits");
326
327 *pLimits = (VkPhysicalDeviceLimits) {
328 .maxImageDimension1D = (1 << 14),
329 .maxImageDimension2D = (1 << 14),
330 .maxImageDimension3D = (1 << 10),
331 .maxImageDimensionCube = (1 << 14),
332 .maxImageArrayLayers = (1 << 10),
333 .maxTexelBufferSize = (1 << 14),
334 .maxUniformBufferSize = UINT32_MAX,
335 .maxStorageBufferSize = UINT32_MAX,
336 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
337 .maxMemoryAllocationCount = UINT32_MAX,
338 .bufferImageGranularity = 64, /* A cache line */
339 .maxBoundDescriptorSets = MAX_SETS,
340 .maxDescriptorSets = UINT32_MAX,
341 .maxPerStageDescriptorSamplers = 64,
342 .maxPerStageDescriptorUniformBuffers = 64,
343 .maxPerStageDescriptorStorageBuffers = 64,
344 .maxPerStageDescriptorSampledImages = 64,
345 .maxPerStageDescriptorStorageImages = 64,
346 .maxDescriptorSetSamplers = 256,
347 .maxDescriptorSetUniformBuffers = 256,
348 .maxDescriptorSetStorageBuffers = 256,
349 .maxDescriptorSetSampledImages = 256,
350 .maxDescriptorSetStorageImages = 256,
351 .maxVertexInputAttributes = 32,
352 .maxVertexInputAttributeOffset = 256,
353 .maxVertexInputBindingStride = 256,
354 .maxVertexOutputComponents = 32,
355 .maxTessGenLevel = 0,
356 .maxTessPatchSize = 0,
357 .maxTessControlPerVertexInputComponents = 0,
358 .maxTessControlPerVertexOutputComponents = 0,
359 .maxTessControlPerPatchOutputComponents = 0,
360 .maxTessControlTotalOutputComponents = 0,
361 .maxTessEvaluationInputComponents = 0,
362 .maxTessEvaluationOutputComponents = 0,
363 .maxGeometryShaderInvocations = 6,
364 .maxGeometryInputComponents = 16,
365 .maxGeometryOutputComponents = 16,
366 .maxGeometryOutputVertices = 16,
367 .maxGeometryTotalOutputComponents = 16,
368 .maxFragmentInputComponents = 16,
369 .maxFragmentOutputBuffers = 8,
370 .maxFragmentDualSourceBuffers = 2,
371 .maxFragmentCombinedOutputResources = 8,
372 .maxComputeSharedMemorySize = 1024,
373 .maxComputeWorkGroupCount = {
374 16 * devinfo->max_cs_threads,
375 16 * devinfo->max_cs_threads,
376 16 * devinfo->max_cs_threads,
377 },
378 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
379 .maxComputeWorkGroupSize = {
380 16 * devinfo->max_cs_threads,
381 16 * devinfo->max_cs_threads,
382 16 * devinfo->max_cs_threads,
383 },
384 .subPixelPrecisionBits = 4 /* FIXME */,
385 .subTexelPrecisionBits = 4 /* FIXME */,
386 .mipmapPrecisionBits = 4 /* FIXME */,
387 .maxDrawIndexedIndexValue = UINT32_MAX,
388 .maxDrawIndirectInstanceCount = UINT32_MAX,
389 .primitiveRestartForPatches = UINT32_MAX,
390 .maxSamplerLodBias = 16,
391 .maxSamplerAnisotropy = 16,
392 .maxViewports = 16,
393 .maxDynamicViewportStates = UINT32_MAX,
394 .maxViewportDimensions = { (1 << 14), (1 << 14) },
395 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
396 .viewportSubPixelBits = 13, /* We take a float? */
397 .minMemoryMapAlignment = 64, /* A cache line */
398 .minTexelBufferOffsetAlignment = 1,
399 .minUniformBufferOffsetAlignment = 1,
400 .minStorageBufferOffsetAlignment = 1,
401 .minTexelOffset = 0, /* FIXME */
402 .maxTexelOffset = 0, /* FIXME */
403 .minTexelGatherOffset = 0, /* FIXME */
404 .maxTexelGatherOffset = 0, /* FIXME */
405 .minInterpolationOffset = 0, /* FIXME */
406 .maxInterpolationOffset = 0, /* FIXME */
407 .subPixelInterpolationOffsetBits = 0, /* FIXME */
408 .maxFramebufferWidth = (1 << 14),
409 .maxFramebufferHeight = (1 << 14),
410 .maxFramebufferLayers = (1 << 10),
411 .maxFramebufferColorSamples = 8,
412 .maxFramebufferDepthSamples = 8,
413 .maxFramebufferStencilSamples = 8,
414 .maxColorAttachments = MAX_RTS,
415 .maxSampledImageColorSamples = 8,
416 .maxSampledImageDepthSamples = 8,
417 .maxSampledImageIntegerSamples = 1,
418 .maxStorageImageSamples = 1,
419 .maxSampleMaskWords = 1,
420 .timestampFrequency = 1000 * 1000 * 1000 / 80,
421 .maxClipDistances = 0 /* FIXME */,
422 .maxCullDistances = 0 /* FIXME */,
423 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
424 .pointSizeRange = { 0.125, 255.875 },
425 .lineWidthRange = { 0.0, 7.9921875 },
426 .pointSizeGranularity = (1.0 / 8.0),
427 .lineWidthGranularity = (1.0 / 128.0),
428 };
429
430 return VK_SUCCESS;
431 }
432
433 VkResult anv_GetPhysicalDeviceProperties(
434 VkPhysicalDevice physicalDevice,
435 VkPhysicalDeviceProperties* pProperties)
436 {
437 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
438
439 *pProperties = (VkPhysicalDeviceProperties) {
440 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
441 .driverVersion = 1,
442 .vendorId = 0x8086,
443 .deviceId = pdevice->chipset_id,
444 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
445 };
446
447 strcpy(pProperties->deviceName, pdevice->name);
448 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
449 "anv-%s", MESA_GIT_SHA1 + 4);
450
451 return VK_SUCCESS;
452 }
453
454 VkResult anv_GetPhysicalDeviceQueueFamilyProperties(
455 VkPhysicalDevice physicalDevice,
456 uint32_t* pCount,
457 VkQueueFamilyProperties* pQueueFamilyProperties)
458 {
459 if (pQueueFamilyProperties == NULL) {
460 *pCount = 1;
461 }
462
463 assert(*pCount >= 1);
464
465 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
466 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
467 VK_QUEUE_COMPUTE_BIT |
468 VK_QUEUE_DMA_BIT,
469 .queueCount = 1,
470 .supportsTimestamps = true,
471 };
472
473 return VK_SUCCESS;
474 }
475
476 VkResult anv_GetPhysicalDeviceMemoryProperties(
477 VkPhysicalDevice physicalDevice,
478 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
479 {
480 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
481 VkDeviceSize heap_size;
482
483 /* Reserve some wiggle room for the driver by exposing only 75% of the
484 * aperture to the heap.
485 */
486 heap_size = 3 * physical_device->aperture_size / 4;
487
488 /* The property flags below are valid only for llc platforms. */
489 pMemoryProperties->memoryTypeCount = 1;
490 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
491 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
492 .heapIndex = 1,
493 };
494
495 pMemoryProperties->memoryHeapCount = 1;
496 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
497 .size = heap_size,
498 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
499 };
500
501 return VK_SUCCESS;
502 }
503
504 PFN_vkVoidFunction anv_GetInstanceProcAddr(
505 VkInstance instance,
506 const char* pName)
507 {
508 return anv_lookup_entrypoint(pName);
509 }
510
511 PFN_vkVoidFunction anv_GetDeviceProcAddr(
512 VkDevice device,
513 const char* pName)
514 {
515 return anv_lookup_entrypoint(pName);
516 }
517
518 static VkResult
519 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
520 {
521 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
522 queue->device = device;
523 queue->pool = &device->surface_state_pool;
524
525 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
526 if (queue->completed_serial.map == NULL)
527 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
528
529 *(uint32_t *)queue->completed_serial.map = 0;
530 queue->next_serial = 1;
531
532 return VK_SUCCESS;
533 }
534
535 static void
536 anv_queue_finish(struct anv_queue *queue)
537 {
538 #ifdef HAVE_VALGRIND
539 /* This gets torn down with the device so we only need to do this if
540 * valgrind is present.
541 */
542 anv_state_pool_free(queue->pool, queue->completed_serial);
543 #endif
544 }
545
546 static void
547 anv_device_init_border_colors(struct anv_device *device)
548 {
549 static const VkClearColorValue border_colors[] = {
550 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
551 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
552 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
553 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
554 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
555 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
556 };
557
558 device->border_colors =
559 anv_state_pool_alloc(&device->dynamic_state_pool,
560 sizeof(border_colors), 32);
561 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
562 }
563
564 VkResult anv_CreateDevice(
565 VkPhysicalDevice physicalDevice,
566 const VkDeviceCreateInfo* pCreateInfo,
567 VkDevice* pDevice)
568 {
569 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
570 struct anv_instance *instance = physical_device->instance;
571 struct anv_device *device;
572
573 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
574
575 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
576 bool found = false;
577 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
578 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
579 device_extensions[j].extName) == 0) {
580 found = true;
581 break;
582 }
583 }
584 if (!found)
585 return vk_error(VK_ERROR_INVALID_EXTENSION);
586 }
587
588 anv_set_dispatch_gen(physical_device->info->gen);
589
590 device = anv_instance_alloc(instance, sizeof(*device), 8,
591 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
592 if (!device)
593 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
594
595 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
596 device->instance = physical_device->instance;
597
598 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
599 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
600 if (device->fd == -1)
601 goto fail_device;
602
603 device->context_id = anv_gem_create_context(device);
604 if (device->context_id == -1)
605 goto fail_fd;
606
607 pthread_mutex_init(&device->mutex, NULL);
608
609 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
610
611 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
612
613 anv_state_pool_init(&device->dynamic_state_pool,
614 &device->dynamic_state_block_pool);
615
616 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
617 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
618
619 anv_state_pool_init(&device->surface_state_pool,
620 &device->surface_state_block_pool);
621
622 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
623
624 device->info = *physical_device->info;
625
626 device->compiler = anv_compiler_create(device);
627
628 anv_queue_init(device, &device->queue);
629
630 anv_device_init_meta(device);
631
632 anv_device_init_border_colors(device);
633
634 *pDevice = anv_device_to_handle(device);
635
636 return VK_SUCCESS;
637
638 fail_fd:
639 close(device->fd);
640 fail_device:
641 anv_device_free(device, device);
642
643 return vk_error(VK_ERROR_UNAVAILABLE);
644 }
645
646 void anv_DestroyDevice(
647 VkDevice _device)
648 {
649 ANV_FROM_HANDLE(anv_device, device, _device);
650
651 anv_compiler_destroy(device->compiler);
652
653 anv_queue_finish(&device->queue);
654
655 anv_device_finish_meta(device);
656
657 #ifdef HAVE_VALGRIND
658 /* We only need to free these to prevent valgrind errors. The backing
659 * BO will go away in a couple of lines so we don't actually leak.
660 */
661 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
662 #endif
663
664 anv_bo_pool_finish(&device->batch_bo_pool);
665 anv_state_pool_finish(&device->dynamic_state_pool);
666 anv_block_pool_finish(&device->dynamic_state_block_pool);
667 anv_block_pool_finish(&device->instruction_block_pool);
668 anv_state_pool_finish(&device->surface_state_pool);
669 anv_block_pool_finish(&device->surface_state_block_pool);
670 anv_block_pool_finish(&device->scratch_block_pool);
671
672 close(device->fd);
673
674 anv_instance_free(device->instance, device);
675 }
676
677 VkResult anv_EnumerateInstanceExtensionProperties(
678 const char* pLayerName,
679 uint32_t* pCount,
680 VkExtensionProperties* pProperties)
681 {
682 if (pProperties == NULL) {
683 *pCount = ARRAY_SIZE(global_extensions);
684 return VK_SUCCESS;
685 }
686
687 assert(*pCount >= ARRAY_SIZE(global_extensions));
688
689 *pCount = ARRAY_SIZE(global_extensions);
690 memcpy(pProperties, global_extensions, sizeof(global_extensions));
691
692 return VK_SUCCESS;
693 }
694
695 VkResult anv_EnumerateDeviceExtensionProperties(
696 VkPhysicalDevice physicalDevice,
697 const char* pLayerName,
698 uint32_t* pCount,
699 VkExtensionProperties* pProperties)
700 {
701 if (pProperties == NULL) {
702 *pCount = ARRAY_SIZE(device_extensions);
703 return VK_SUCCESS;
704 }
705
706 assert(*pCount >= ARRAY_SIZE(device_extensions));
707
708 *pCount = ARRAY_SIZE(device_extensions);
709 memcpy(pProperties, device_extensions, sizeof(device_extensions));
710
711 return VK_SUCCESS;
712 }
713
714 VkResult anv_EnumerateInstanceLayerProperties(
715 uint32_t* pCount,
716 VkLayerProperties* pProperties)
717 {
718 if (pProperties == NULL) {
719 *pCount = 0;
720 return VK_SUCCESS;
721 }
722
723 /* None supported at this time */
724 return vk_error(VK_ERROR_INVALID_LAYER);
725 }
726
727 VkResult anv_EnumerateDeviceLayerProperties(
728 VkPhysicalDevice physicalDevice,
729 uint32_t* pCount,
730 VkLayerProperties* pProperties)
731 {
732 if (pProperties == NULL) {
733 *pCount = 0;
734 return VK_SUCCESS;
735 }
736
737 /* None supported at this time */
738 return vk_error(VK_ERROR_INVALID_LAYER);
739 }
740
741 VkResult anv_GetDeviceQueue(
742 VkDevice _device,
743 uint32_t queueNodeIndex,
744 uint32_t queueIndex,
745 VkQueue* pQueue)
746 {
747 ANV_FROM_HANDLE(anv_device, device, _device);
748
749 assert(queueIndex == 0);
750
751 *pQueue = anv_queue_to_handle(&device->queue);
752
753 return VK_SUCCESS;
754 }
755
756 VkResult anv_QueueSubmit(
757 VkQueue _queue,
758 uint32_t cmdBufferCount,
759 const VkCmdBuffer* pCmdBuffers,
760 VkFence _fence)
761 {
762 ANV_FROM_HANDLE(anv_queue, queue, _queue);
763 ANV_FROM_HANDLE(anv_fence, fence, _fence);
764 struct anv_device *device = queue->device;
765 int ret;
766
767 for (uint32_t i = 0; i < cmdBufferCount; i++) {
768 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
769
770 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
771
772 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
773 if (ret != 0)
774 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
775
776 if (fence) {
777 ret = anv_gem_execbuffer(device, &fence->execbuf);
778 if (ret != 0)
779 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
780 }
781
782 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
783 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
784 }
785
786 return VK_SUCCESS;
787 }
788
789 VkResult anv_QueueWaitIdle(
790 VkQueue _queue)
791 {
792 ANV_FROM_HANDLE(anv_queue, queue, _queue);
793
794 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
795 }
796
797 VkResult anv_DeviceWaitIdle(
798 VkDevice _device)
799 {
800 ANV_FROM_HANDLE(anv_device, device, _device);
801 struct anv_state state;
802 struct anv_batch batch;
803 struct drm_i915_gem_execbuffer2 execbuf;
804 struct drm_i915_gem_exec_object2 exec2_objects[1];
805 struct anv_bo *bo = NULL;
806 VkResult result;
807 int64_t timeout;
808 int ret;
809
810 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
811 bo = &device->dynamic_state_pool.block_pool->bo;
812 batch.start = batch.next = state.map;
813 batch.end = state.map + 32;
814 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
815 anv_batch_emit(&batch, GEN7_MI_NOOP);
816
817 exec2_objects[0].handle = bo->gem_handle;
818 exec2_objects[0].relocation_count = 0;
819 exec2_objects[0].relocs_ptr = 0;
820 exec2_objects[0].alignment = 0;
821 exec2_objects[0].offset = bo->offset;
822 exec2_objects[0].flags = 0;
823 exec2_objects[0].rsvd1 = 0;
824 exec2_objects[0].rsvd2 = 0;
825
826 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
827 execbuf.buffer_count = 1;
828 execbuf.batch_start_offset = state.offset;
829 execbuf.batch_len = batch.next - state.map;
830 execbuf.cliprects_ptr = 0;
831 execbuf.num_cliprects = 0;
832 execbuf.DR1 = 0;
833 execbuf.DR4 = 0;
834
835 execbuf.flags =
836 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
837 execbuf.rsvd1 = device->context_id;
838 execbuf.rsvd2 = 0;
839
840 ret = anv_gem_execbuffer(device, &execbuf);
841 if (ret != 0) {
842 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
843 goto fail;
844 }
845
846 timeout = INT64_MAX;
847 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
848 if (ret != 0) {
849 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
850 goto fail;
851 }
852
853 anv_state_pool_free(&device->dynamic_state_pool, state);
854
855 return VK_SUCCESS;
856
857 fail:
858 anv_state_pool_free(&device->dynamic_state_pool, state);
859
860 return result;
861 }
862
863 void *
864 anv_device_alloc(struct anv_device * device,
865 size_t size,
866 size_t alignment,
867 VkSystemAllocType allocType)
868 {
869 return anv_instance_alloc(device->instance, size, alignment, allocType);
870 }
871
872 void
873 anv_device_free(struct anv_device * device,
874 void * mem)
875 {
876 anv_instance_free(device->instance, mem);
877 }
878
879 VkResult
880 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
881 {
882 bo->gem_handle = anv_gem_create(device, size);
883 if (!bo->gem_handle)
884 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
885
886 bo->map = NULL;
887 bo->index = 0;
888 bo->offset = 0;
889 bo->size = size;
890
891 return VK_SUCCESS;
892 }
893
894 VkResult anv_AllocMemory(
895 VkDevice _device,
896 const VkMemoryAllocInfo* pAllocInfo,
897 VkDeviceMemory* pMem)
898 {
899 ANV_FROM_HANDLE(anv_device, device, _device);
900 struct anv_device_memory *mem;
901 VkResult result;
902
903 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
904
905 if (pAllocInfo->memoryTypeIndex != 0) {
906 /* We support exactly one memory heap. */
907 return vk_error(VK_ERROR_INVALID_VALUE);
908 }
909
910 /* FINISHME: Fail if allocation request exceeds heap size. */
911
912 mem = anv_device_alloc(device, sizeof(*mem), 8,
913 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
914 if (mem == NULL)
915 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
916
917 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
918 if (result != VK_SUCCESS)
919 goto fail;
920
921 *pMem = anv_device_memory_to_handle(mem);
922
923 return VK_SUCCESS;
924
925 fail:
926 anv_device_free(device, mem);
927
928 return result;
929 }
930
931 void anv_FreeMemory(
932 VkDevice _device,
933 VkDeviceMemory _mem)
934 {
935 ANV_FROM_HANDLE(anv_device, device, _device);
936 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
937
938 if (mem->bo.map)
939 anv_gem_munmap(mem->bo.map, mem->bo.size);
940
941 if (mem->bo.gem_handle != 0)
942 anv_gem_close(device, mem->bo.gem_handle);
943
944 anv_device_free(device, mem);
945 }
946
947 VkResult anv_MapMemory(
948 VkDevice _device,
949 VkDeviceMemory _mem,
950 VkDeviceSize offset,
951 VkDeviceSize size,
952 VkMemoryMapFlags flags,
953 void** ppData)
954 {
955 ANV_FROM_HANDLE(anv_device, device, _device);
956 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
957
958 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
959 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
960 * at a time is valid. We could just mmap up front and return an offset
961 * pointer here, but that may exhaust virtual memory on 32 bit
962 * userspace. */
963
964 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
965 mem->map_size = size;
966
967 *ppData = mem->map;
968
969 return VK_SUCCESS;
970 }
971
972 void anv_UnmapMemory(
973 VkDevice _device,
974 VkDeviceMemory _mem)
975 {
976 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
977
978 anv_gem_munmap(mem->map, mem->map_size);
979 }
980
981 VkResult anv_FlushMappedMemoryRanges(
982 VkDevice device,
983 uint32_t memRangeCount,
984 const VkMappedMemoryRange* pMemRanges)
985 {
986 /* clflush here for !llc platforms */
987
988 return VK_SUCCESS;
989 }
990
991 VkResult anv_InvalidateMappedMemoryRanges(
992 VkDevice device,
993 uint32_t memRangeCount,
994 const VkMappedMemoryRange* pMemRanges)
995 {
996 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
997 }
998
999 VkResult anv_GetBufferMemoryRequirements(
1000 VkDevice device,
1001 VkBuffer _buffer,
1002 VkMemoryRequirements* pMemoryRequirements)
1003 {
1004 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1005
1006 /* The Vulkan spec (git aaed022) says:
1007 *
1008 * memoryTypeBits is a bitfield and contains one bit set for every
1009 * supported memory type for the resource. The bit `1<<i` is set if and
1010 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1011 * structure for the physical device is supported.
1012 *
1013 * We support exactly one memory type.
1014 */
1015 pMemoryRequirements->memoryTypeBits = 1;
1016
1017 pMemoryRequirements->size = buffer->size;
1018 pMemoryRequirements->alignment = 16;
1019
1020 return VK_SUCCESS;
1021 }
1022
1023 VkResult anv_GetImageMemoryRequirements(
1024 VkDevice device,
1025 VkImage _image,
1026 VkMemoryRequirements* pMemoryRequirements)
1027 {
1028 ANV_FROM_HANDLE(anv_image, image, _image);
1029
1030 /* The Vulkan spec (git aaed022) says:
1031 *
1032 * memoryTypeBits is a bitfield and contains one bit set for every
1033 * supported memory type for the resource. The bit `1<<i` is set if and
1034 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1035 * structure for the physical device is supported.
1036 *
1037 * We support exactly one memory type.
1038 */
1039 pMemoryRequirements->memoryTypeBits = 1;
1040
1041 pMemoryRequirements->size = image->size;
1042 pMemoryRequirements->alignment = image->alignment;
1043
1044 return VK_SUCCESS;
1045 }
1046
1047 VkResult anv_GetImageSparseMemoryRequirements(
1048 VkDevice device,
1049 VkImage image,
1050 uint32_t* pNumRequirements,
1051 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1052 {
1053 return vk_error(VK_UNSUPPORTED);
1054 }
1055
1056 VkResult anv_GetDeviceMemoryCommitment(
1057 VkDevice device,
1058 VkDeviceMemory memory,
1059 VkDeviceSize* pCommittedMemoryInBytes)
1060 {
1061 *pCommittedMemoryInBytes = 0;
1062 stub_return(VK_SUCCESS);
1063 }
1064
1065 VkResult anv_BindBufferMemory(
1066 VkDevice device,
1067 VkBuffer _buffer,
1068 VkDeviceMemory _mem,
1069 VkDeviceSize memOffset)
1070 {
1071 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1072 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1073
1074 buffer->bo = &mem->bo;
1075 buffer->offset = memOffset;
1076
1077 return VK_SUCCESS;
1078 }
1079
1080 VkResult anv_BindImageMemory(
1081 VkDevice device,
1082 VkImage _image,
1083 VkDeviceMemory _mem,
1084 VkDeviceSize memOffset)
1085 {
1086 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1087 ANV_FROM_HANDLE(anv_image, image, _image);
1088
1089 image->bo = &mem->bo;
1090 image->offset = memOffset;
1091
1092 return VK_SUCCESS;
1093 }
1094
1095 VkResult anv_QueueBindSparseBufferMemory(
1096 VkQueue queue,
1097 VkBuffer buffer,
1098 uint32_t numBindings,
1099 const VkSparseMemoryBindInfo* pBindInfo)
1100 {
1101 stub_return(VK_UNSUPPORTED);
1102 }
1103
1104 VkResult anv_QueueBindSparseImageOpaqueMemory(
1105 VkQueue queue,
1106 VkImage image,
1107 uint32_t numBindings,
1108 const VkSparseMemoryBindInfo* pBindInfo)
1109 {
1110 stub_return(VK_UNSUPPORTED);
1111 }
1112
1113 VkResult anv_QueueBindSparseImageMemory(
1114 VkQueue queue,
1115 VkImage image,
1116 uint32_t numBindings,
1117 const VkSparseImageMemoryBindInfo* pBindInfo)
1118 {
1119 stub_return(VK_UNSUPPORTED);
1120 }
1121
1122 VkResult anv_CreateFence(
1123 VkDevice _device,
1124 const VkFenceCreateInfo* pCreateInfo,
1125 VkFence* pFence)
1126 {
1127 ANV_FROM_HANDLE(anv_device, device, _device);
1128 struct anv_fence *fence;
1129 struct anv_batch batch;
1130 VkResult result;
1131
1132 const uint32_t fence_size = 128;
1133
1134 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1135
1136 fence = anv_device_alloc(device, sizeof(*fence), 8,
1137 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1138 if (fence == NULL)
1139 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1140
1141 result = anv_bo_init_new(&fence->bo, device, fence_size);
1142 if (result != VK_SUCCESS)
1143 goto fail;
1144
1145 fence->bo.map =
1146 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1147 batch.next = batch.start = fence->bo.map;
1148 batch.end = fence->bo.map + fence->bo.size;
1149 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1150 anv_batch_emit(&batch, GEN7_MI_NOOP);
1151
1152 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1153 fence->exec2_objects[0].relocation_count = 0;
1154 fence->exec2_objects[0].relocs_ptr = 0;
1155 fence->exec2_objects[0].alignment = 0;
1156 fence->exec2_objects[0].offset = fence->bo.offset;
1157 fence->exec2_objects[0].flags = 0;
1158 fence->exec2_objects[0].rsvd1 = 0;
1159 fence->exec2_objects[0].rsvd2 = 0;
1160
1161 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1162 fence->execbuf.buffer_count = 1;
1163 fence->execbuf.batch_start_offset = 0;
1164 fence->execbuf.batch_len = batch.next - fence->bo.map;
1165 fence->execbuf.cliprects_ptr = 0;
1166 fence->execbuf.num_cliprects = 0;
1167 fence->execbuf.DR1 = 0;
1168 fence->execbuf.DR4 = 0;
1169
1170 fence->execbuf.flags =
1171 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1172 fence->execbuf.rsvd1 = device->context_id;
1173 fence->execbuf.rsvd2 = 0;
1174
1175 *pFence = anv_fence_to_handle(fence);
1176
1177 return VK_SUCCESS;
1178
1179 fail:
1180 anv_device_free(device, fence);
1181
1182 return result;
1183 }
1184
1185 void anv_DestroyFence(
1186 VkDevice _device,
1187 VkFence _fence)
1188 {
1189 ANV_FROM_HANDLE(anv_device, device, _device);
1190 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1191
1192 anv_gem_munmap(fence->bo.map, fence->bo.size);
1193 anv_gem_close(device, fence->bo.gem_handle);
1194 anv_device_free(device, fence);
1195 }
1196
1197 VkResult anv_ResetFences(
1198 VkDevice _device,
1199 uint32_t fenceCount,
1200 const VkFence* pFences)
1201 {
1202 for (uint32_t i = 0; i < fenceCount; i++) {
1203 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1204 fence->ready = false;
1205 }
1206
1207 return VK_SUCCESS;
1208 }
1209
1210 VkResult anv_GetFenceStatus(
1211 VkDevice _device,
1212 VkFence _fence)
1213 {
1214 ANV_FROM_HANDLE(anv_device, device, _device);
1215 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1216 int64_t t = 0;
1217 int ret;
1218
1219 if (fence->ready)
1220 return VK_SUCCESS;
1221
1222 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1223 if (ret == 0) {
1224 fence->ready = true;
1225 return VK_SUCCESS;
1226 }
1227
1228 return VK_NOT_READY;
1229 }
1230
1231 VkResult anv_WaitForFences(
1232 VkDevice _device,
1233 uint32_t fenceCount,
1234 const VkFence* pFences,
1235 VkBool32 waitAll,
1236 uint64_t timeout)
1237 {
1238 ANV_FROM_HANDLE(anv_device, device, _device);
1239 int64_t t = timeout;
1240 int ret;
1241
1242 /* FIXME: handle !waitAll */
1243
1244 for (uint32_t i = 0; i < fenceCount; i++) {
1245 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1246 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1247 if (ret == -1 && errno == ETIME)
1248 return VK_TIMEOUT;
1249 else if (ret == -1)
1250 return vk_errorf(VK_ERROR_UNKNOWN, "gem wait failed: %m");
1251 }
1252
1253 return VK_SUCCESS;
1254 }
1255
1256 // Queue semaphore functions
1257
1258 VkResult anv_CreateSemaphore(
1259 VkDevice device,
1260 const VkSemaphoreCreateInfo* pCreateInfo,
1261 VkSemaphore* pSemaphore)
1262 {
1263 stub_return(VK_UNSUPPORTED);
1264 }
1265
1266 void anv_DestroySemaphore(
1267 VkDevice device,
1268 VkSemaphore semaphore)
1269 {
1270 stub();
1271 }
1272
1273 VkResult anv_QueueSignalSemaphore(
1274 VkQueue queue,
1275 VkSemaphore semaphore)
1276 {
1277 stub_return(VK_UNSUPPORTED);
1278 }
1279
1280 VkResult anv_QueueWaitSemaphore(
1281 VkQueue queue,
1282 VkSemaphore semaphore)
1283 {
1284 stub_return(VK_UNSUPPORTED);
1285 }
1286
1287 // Event functions
1288
1289 VkResult anv_CreateEvent(
1290 VkDevice device,
1291 const VkEventCreateInfo* pCreateInfo,
1292 VkEvent* pEvent)
1293 {
1294 stub_return(VK_UNSUPPORTED);
1295 }
1296
1297 void anv_DestroyEvent(
1298 VkDevice device,
1299 VkEvent event)
1300 {
1301 stub();
1302 }
1303
1304 VkResult anv_GetEventStatus(
1305 VkDevice device,
1306 VkEvent event)
1307 {
1308 stub_return(VK_UNSUPPORTED);
1309 }
1310
1311 VkResult anv_SetEvent(
1312 VkDevice device,
1313 VkEvent event)
1314 {
1315 stub_return(VK_UNSUPPORTED);
1316 }
1317
1318 VkResult anv_ResetEvent(
1319 VkDevice device,
1320 VkEvent event)
1321 {
1322 stub_return(VK_UNSUPPORTED);
1323 }
1324
1325 // Buffer functions
1326
1327 VkResult anv_CreateBuffer(
1328 VkDevice _device,
1329 const VkBufferCreateInfo* pCreateInfo,
1330 VkBuffer* pBuffer)
1331 {
1332 ANV_FROM_HANDLE(anv_device, device, _device);
1333 struct anv_buffer *buffer;
1334
1335 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1336
1337 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1338 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1339 if (buffer == NULL)
1340 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1341
1342 buffer->size = pCreateInfo->size;
1343 buffer->bo = NULL;
1344 buffer->offset = 0;
1345
1346 *pBuffer = anv_buffer_to_handle(buffer);
1347
1348 return VK_SUCCESS;
1349 }
1350
1351 void anv_DestroyBuffer(
1352 VkDevice _device,
1353 VkBuffer _buffer)
1354 {
1355 ANV_FROM_HANDLE(anv_device, device, _device);
1356 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1357
1358 anv_device_free(device, buffer);
1359 }
1360
1361 void
1362 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1363 const struct anv_format *format,
1364 uint32_t offset, uint32_t range)
1365 {
1366 switch (device->info.gen) {
1367 case 7:
1368 gen7_fill_buffer_surface_state(state, format, offset, range);
1369 break;
1370 case 8:
1371 gen8_fill_buffer_surface_state(state, format, offset, range);
1372 break;
1373 default:
1374 unreachable("unsupported gen\n");
1375 }
1376 }
1377
1378 VkResult
1379 anv_buffer_view_create(
1380 struct anv_device * device,
1381 const VkBufferViewCreateInfo* pCreateInfo,
1382 struct anv_buffer_view ** bview_out)
1383 {
1384 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1385 struct anv_buffer_view *bview;
1386
1387 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1388
1389 bview = anv_device_alloc(device, sizeof(*bview), 8,
1390 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1391 if (bview == NULL)
1392 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1393
1394 *bview = (struct anv_buffer_view) {
1395 .bo = buffer->bo,
1396 .offset = buffer->offset + pCreateInfo->offset,
1397 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1398 .format = anv_format_for_vk_format(pCreateInfo->format),
1399 .range = pCreateInfo->range,
1400 };
1401
1402 *bview_out = bview;
1403
1404 return VK_SUCCESS;
1405 }
1406
1407 void anv_DestroyBufferView(
1408 VkDevice _device,
1409 VkBufferView _bview)
1410 {
1411 ANV_FROM_HANDLE(anv_device, device, _device);
1412 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1413
1414 anv_state_pool_free(&device->surface_state_pool, bview->surface_state);
1415 anv_device_free(device, bview);
1416 }
1417
1418 void anv_DestroySampler(
1419 VkDevice _device,
1420 VkSampler _sampler)
1421 {
1422 ANV_FROM_HANDLE(anv_device, device, _device);
1423 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1424
1425 anv_device_free(device, sampler);
1426 }
1427
1428 // Descriptor set functions
1429
1430 VkResult anv_CreateDescriptorSetLayout(
1431 VkDevice _device,
1432 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1433 VkDescriptorSetLayout* pSetLayout)
1434 {
1435 ANV_FROM_HANDLE(anv_device, device, _device);
1436 struct anv_descriptor_set_layout *set_layout;
1437
1438 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1439
1440 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1441 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1442 uint32_t num_dynamic_buffers = 0;
1443 uint32_t count = 0;
1444 VkShaderStageFlags stages = 0;
1445 uint32_t s;
1446
1447 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1448 switch (pCreateInfo->pBinding[i].descriptorType) {
1449 case VK_DESCRIPTOR_TYPE_SAMPLER:
1450 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1451 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1452 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1453 break;
1454 default:
1455 break;
1456 }
1457
1458 switch (pCreateInfo->pBinding[i].descriptorType) {
1459 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1460 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1461 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1462 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1463 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1464 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1465 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1466 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1467 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1468 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1469 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1470 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1471 break;
1472 default:
1473 break;
1474 }
1475
1476 switch (pCreateInfo->pBinding[i].descriptorType) {
1477 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1478 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1479 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 stages |= pCreateInfo->pBinding[i].stageFlags;
1486 count += pCreateInfo->pBinding[i].arraySize;
1487 }
1488
1489 uint32_t sampler_total = 0;
1490 uint32_t surface_total = 0;
1491 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1492 sampler_total += sampler_count[s];
1493 surface_total += surface_count[s];
1494 }
1495
1496 size_t size = sizeof(*set_layout) +
1497 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1498 set_layout = anv_device_alloc(device, size, 8,
1499 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1500 if (!set_layout)
1501 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1502
1503 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1504 set_layout->count = count;
1505 set_layout->shader_stages = stages;
1506
1507 struct anv_descriptor_slot *p = set_layout->entries;
1508 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1509 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1510 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1511 set_layout->stage[s].surface_count = surface_count[s];
1512 set_layout->stage[s].surface_start = surface[s] = p;
1513 p += surface_count[s];
1514 set_layout->stage[s].sampler_count = sampler_count[s];
1515 set_layout->stage[s].sampler_start = sampler[s] = p;
1516 p += sampler_count[s];
1517 }
1518
1519 uint32_t descriptor = 0;
1520 int8_t dynamic_slot = 0;
1521 bool is_dynamic;
1522 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1523 switch (pCreateInfo->pBinding[i].descriptorType) {
1524 case VK_DESCRIPTOR_TYPE_SAMPLER:
1525 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1526 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1527 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1528 sampler[s]->index = descriptor + j;
1529 sampler[s]->dynamic_slot = -1;
1530 sampler[s]++;
1531 }
1532 break;
1533 default:
1534 break;
1535 }
1536
1537 switch (pCreateInfo->pBinding[i].descriptorType) {
1538 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1539 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1540 is_dynamic = true;
1541 break;
1542 default:
1543 is_dynamic = false;
1544 break;
1545 }
1546
1547 switch (pCreateInfo->pBinding[i].descriptorType) {
1548 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1549 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1550 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1551 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1552 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1553 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1554 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1555 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1556 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1557 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1558 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1559 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1560 surface[s]->index = descriptor + j;
1561 if (is_dynamic)
1562 surface[s]->dynamic_slot = dynamic_slot + j;
1563 else
1564 surface[s]->dynamic_slot = -1;
1565 surface[s]++;
1566 }
1567 break;
1568 default:
1569 break;
1570 }
1571
1572 if (is_dynamic)
1573 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1574
1575 descriptor += pCreateInfo->pBinding[i].arraySize;
1576 }
1577
1578 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1579
1580 return VK_SUCCESS;
1581 }
1582
1583 void anv_DestroyDescriptorSetLayout(
1584 VkDevice _device,
1585 VkDescriptorSetLayout _set_layout)
1586 {
1587 ANV_FROM_HANDLE(anv_device, device, _device);
1588 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1589
1590 anv_device_free(device, set_layout);
1591 }
1592
1593 VkResult anv_CreateDescriptorPool(
1594 VkDevice device,
1595 VkDescriptorPoolUsage poolUsage,
1596 uint32_t maxSets,
1597 const VkDescriptorPoolCreateInfo* pCreateInfo,
1598 VkDescriptorPool* pDescriptorPool)
1599 {
1600 anv_finishme("VkDescriptorPool is a stub");
1601 pDescriptorPool->handle = 1;
1602 return VK_SUCCESS;
1603 }
1604
1605 void anv_DestroyDescriptorPool(
1606 VkDevice _device,
1607 VkDescriptorPool _pool)
1608 {
1609 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1610 }
1611
1612 VkResult anv_ResetDescriptorPool(
1613 VkDevice device,
1614 VkDescriptorPool descriptorPool)
1615 {
1616 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1617 return VK_SUCCESS;
1618 }
1619
1620 VkResult
1621 anv_descriptor_set_create(struct anv_device *device,
1622 const struct anv_descriptor_set_layout *layout,
1623 struct anv_descriptor_set **out_set)
1624 {
1625 struct anv_descriptor_set *set;
1626 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1627
1628 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1629 if (!set)
1630 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1631
1632 /* A descriptor set may not be 100% filled. Clear the set so we can can
1633 * later detect holes in it.
1634 */
1635 memset(set, 0, size);
1636
1637 *out_set = set;
1638
1639 return VK_SUCCESS;
1640 }
1641
1642 void
1643 anv_descriptor_set_destroy(struct anv_device *device,
1644 struct anv_descriptor_set *set)
1645 {
1646 anv_device_free(device, set);
1647 }
1648
1649 VkResult anv_AllocDescriptorSets(
1650 VkDevice _device,
1651 VkDescriptorPool descriptorPool,
1652 VkDescriptorSetUsage setUsage,
1653 uint32_t count,
1654 const VkDescriptorSetLayout* pSetLayouts,
1655 VkDescriptorSet* pDescriptorSets)
1656 {
1657 ANV_FROM_HANDLE(anv_device, device, _device);
1658
1659 VkResult result = VK_SUCCESS;
1660 struct anv_descriptor_set *set;
1661 uint32_t i;
1662
1663 for (i = 0; i < count; i++) {
1664 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1665
1666 result = anv_descriptor_set_create(device, layout, &set);
1667 if (result != VK_SUCCESS)
1668 break;
1669
1670 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1671 }
1672
1673 if (result != VK_SUCCESS)
1674 anv_FreeDescriptorSets(_device, descriptorPool, i, pDescriptorSets);
1675
1676 return result;
1677 }
1678
1679 VkResult anv_FreeDescriptorSets(
1680 VkDevice _device,
1681 VkDescriptorPool descriptorPool,
1682 uint32_t count,
1683 const VkDescriptorSet* pDescriptorSets)
1684 {
1685 ANV_FROM_HANDLE(anv_device, device, _device);
1686
1687 for (uint32_t i = 0; i < count; i++) {
1688 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1689
1690 anv_descriptor_set_destroy(device, set);
1691 }
1692
1693 return VK_SUCCESS;
1694 }
1695
1696 VkResult anv_UpdateDescriptorSets(
1697 VkDevice device,
1698 uint32_t writeCount,
1699 const VkWriteDescriptorSet* pDescriptorWrites,
1700 uint32_t copyCount,
1701 const VkCopyDescriptorSet* pDescriptorCopies)
1702 {
1703 for (uint32_t i = 0; i < writeCount; i++) {
1704 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1705 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1706
1707 switch (write->descriptorType) {
1708 case VK_DESCRIPTOR_TYPE_SAMPLER:
1709 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1710 for (uint32_t j = 0; j < write->count; j++) {
1711 ANV_FROM_HANDLE(anv_sampler, sampler,
1712 write->pDescriptors[j].sampler);
1713
1714 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1715 .type = ANV_DESCRIPTOR_TYPE_SAMPLER,
1716 .sampler = sampler,
1717 };
1718 }
1719
1720 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1721 break;
1722
1723 /* fallthrough */
1724
1725 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1726 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1727 for (uint32_t j = 0; j < write->count; j++) {
1728 ANV_FROM_HANDLE(anv_image_view, iview,
1729 write->pDescriptors[j].imageView);
1730
1731 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1732 .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
1733 .image_view = iview,
1734 };
1735 }
1736 break;
1737
1738 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1739 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1740 anv_finishme("texel buffers not implemented");
1741 break;
1742
1743 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1744 anv_finishme("input attachments not implemented");
1745 break;
1746
1747 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1748 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1749 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1750 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1751 for (uint32_t j = 0; j < write->count; j++) {
1752 ANV_FROM_HANDLE(anv_buffer_view, bview,
1753 write->pDescriptors[j].bufferView);
1754
1755 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1756 .type = ANV_DESCRIPTOR_TYPE_BUFFER_VIEW,
1757 .buffer_view = bview,
1758 };
1759 }
1760
1761 default:
1762 break;
1763 }
1764 }
1765
1766 for (uint32_t i = 0; i < copyCount; i++) {
1767 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1768 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1769 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1770 for (uint32_t j = 0; j < copy->count; j++) {
1771 dest->descriptors[copy->destBinding + j] =
1772 src->descriptors[copy->srcBinding + j];
1773 }
1774 }
1775
1776 return VK_SUCCESS;
1777 }
1778
1779 // State object functions
1780
1781 static inline int64_t
1782 clamp_int64(int64_t x, int64_t min, int64_t max)
1783 {
1784 if (x < min)
1785 return min;
1786 else if (x < max)
1787 return x;
1788 else
1789 return max;
1790 }
1791
1792 VkResult anv_CreateDynamicViewportState(
1793 VkDevice _device,
1794 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1795 VkDynamicViewportState* pState)
1796 {
1797 ANV_FROM_HANDLE(anv_device, device, _device);
1798 struct anv_dynamic_vp_state *state;
1799
1800 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1801
1802 state = anv_device_alloc(device, sizeof(*state), 8,
1803 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1804 if (state == NULL)
1805 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1806
1807 unsigned count = pCreateInfo->viewportAndScissorCount;
1808 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1809 count * 64, 64);
1810 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1811 count * 8, 32);
1812 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1813 count * 32, 32);
1814
1815 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1816 const VkViewport *vp = &pCreateInfo->pViewports[i];
1817 const VkRect2D *s = &pCreateInfo->pScissors[i];
1818
1819 /* The gen7 state struct has just the matrix and guardband fields, the
1820 * gen8 struct adds the min/max viewport fields. */
1821 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1822 .ViewportMatrixElementm00 = vp->width / 2,
1823 .ViewportMatrixElementm11 = vp->height / 2,
1824 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1825 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1826 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1827 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1828 .XMinClipGuardband = -1.0f,
1829 .XMaxClipGuardband = 1.0f,
1830 .YMinClipGuardband = -1.0f,
1831 .YMaxClipGuardband = 1.0f,
1832 .XMinViewPort = vp->originX,
1833 .XMaxViewPort = vp->originX + vp->width - 1,
1834 .YMinViewPort = vp->originY,
1835 .YMaxViewPort = vp->originY + vp->height - 1,
1836 };
1837
1838 struct GEN7_CC_VIEWPORT cc_viewport = {
1839 .MinimumDepth = vp->minDepth,
1840 .MaximumDepth = vp->maxDepth
1841 };
1842
1843 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1844 * ymax < ymin for empty clips. In case clip x, y, width height are all
1845 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1846 * what we want. Just special case empty clips and produce a canonical
1847 * empty clip. */
1848 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1849 .ScissorRectangleYMin = 1,
1850 .ScissorRectangleXMin = 1,
1851 .ScissorRectangleYMax = 0,
1852 .ScissorRectangleXMax = 0
1853 };
1854
1855 const int max = 0xffff;
1856 struct GEN7_SCISSOR_RECT scissor = {
1857 /* Do this math using int64_t so overflow gets clamped correctly. */
1858 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1859 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1860 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1861 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1862 };
1863
1864 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1865 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1866
1867 if (s->extent.width <= 0 || s->extent.height <= 0) {
1868 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1869 } else {
1870 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1871 }
1872 }
1873
1874 *pState = anv_dynamic_vp_state_to_handle(state);
1875
1876 return VK_SUCCESS;
1877 }
1878
1879 void anv_DestroyDynamicViewportState(
1880 VkDevice _device,
1881 VkDynamicViewportState _vp_state)
1882 {
1883 ANV_FROM_HANDLE(anv_device, device, _device);
1884 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1885
1886 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1887 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1888 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1889
1890 anv_device_free(device, vp_state);
1891 }
1892
1893 void anv_DestroyDynamicRasterState(
1894 VkDevice _device,
1895 VkDynamicRasterState _rs_state)
1896 {
1897 ANV_FROM_HANDLE(anv_device, device, _device);
1898 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1899
1900 anv_device_free(device, rs_state);
1901 }
1902
1903 VkResult anv_CreateDynamicColorBlendState(
1904 VkDevice _device,
1905 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1906 VkDynamicColorBlendState* pState)
1907 {
1908 ANV_FROM_HANDLE(anv_device, device, _device);
1909 struct anv_dynamic_cb_state *state;
1910
1911 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1912
1913 state = anv_device_alloc(device, sizeof(*state), 8,
1914 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1915 if (state == NULL)
1916 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1917
1918 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1919 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1920 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1921 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1922 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1923 };
1924
1925 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1926
1927 *pState = anv_dynamic_cb_state_to_handle(state);
1928
1929 return VK_SUCCESS;
1930 }
1931
1932 void anv_DestroyDynamicColorBlendState(
1933 VkDevice _device,
1934 VkDynamicColorBlendState _cb_state)
1935 {
1936 ANV_FROM_HANDLE(anv_device, device, _device);
1937 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1938
1939 anv_device_free(device, cb_state);
1940 }
1941
1942 void anv_DestroyDynamicDepthStencilState(
1943 VkDevice _device,
1944 VkDynamicDepthStencilState _ds_state)
1945 {
1946 ANV_FROM_HANDLE(anv_device, device, _device);
1947 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1948
1949 anv_device_free(device, ds_state);
1950 }
1951
1952 VkResult anv_CreateFramebuffer(
1953 VkDevice _device,
1954 const VkFramebufferCreateInfo* pCreateInfo,
1955 VkFramebuffer* pFramebuffer)
1956 {
1957 ANV_FROM_HANDLE(anv_device, device, _device);
1958 struct anv_framebuffer *framebuffer;
1959
1960 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1961
1962 size_t size = sizeof(*framebuffer) +
1963 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1964 framebuffer = anv_device_alloc(device, size, 8,
1965 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1966 if (framebuffer == NULL)
1967 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1968
1969 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1970 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1971 ANV_FROM_HANDLE(anv_attachment_view, aview,
1972 pCreateInfo->pAttachments[i].view);
1973
1974 framebuffer->attachments[i] = aview;
1975 }
1976
1977 framebuffer->width = pCreateInfo->width;
1978 framebuffer->height = pCreateInfo->height;
1979 framebuffer->layers = pCreateInfo->layers;
1980
1981 anv_CreateDynamicViewportState(anv_device_to_handle(device),
1982 &(VkDynamicViewportStateCreateInfo) {
1983 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
1984 .viewportAndScissorCount = 1,
1985 .pViewports = (VkViewport[]) {
1986 {
1987 .originX = 0,
1988 .originY = 0,
1989 .width = pCreateInfo->width,
1990 .height = pCreateInfo->height,
1991 .minDepth = 0,
1992 .maxDepth = 1
1993 },
1994 },
1995 .pScissors = (VkRect2D[]) {
1996 { { 0, 0 },
1997 { pCreateInfo->width, pCreateInfo->height } },
1998 }
1999 },
2000 &framebuffer->vp_state);
2001
2002 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2003
2004 return VK_SUCCESS;
2005 }
2006
2007 void anv_DestroyFramebuffer(
2008 VkDevice _device,
2009 VkFramebuffer _fb)
2010 {
2011 ANV_FROM_HANDLE(anv_device, device, _device);
2012 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2013
2014 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
2015 fb->vp_state);
2016 anv_device_free(device, fb);
2017 }
2018
2019 VkResult anv_CreateRenderPass(
2020 VkDevice _device,
2021 const VkRenderPassCreateInfo* pCreateInfo,
2022 VkRenderPass* pRenderPass)
2023 {
2024 ANV_FROM_HANDLE(anv_device, device, _device);
2025 struct anv_render_pass *pass;
2026 size_t size;
2027 size_t attachments_offset;
2028
2029 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2030
2031 size = sizeof(*pass);
2032 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2033 attachments_offset = size;
2034 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2035
2036 pass = anv_device_alloc(device, size, 8,
2037 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2038 if (pass == NULL)
2039 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2040
2041 /* Clear the subpasses along with the parent pass. This required because
2042 * each array member of anv_subpass must be a valid pointer if not NULL.
2043 */
2044 memset(pass, 0, size);
2045 pass->attachment_count = pCreateInfo->attachmentCount;
2046 pass->subpass_count = pCreateInfo->subpassCount;
2047 pass->attachments = (void *) pass + attachments_offset;
2048
2049 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2050 struct anv_render_pass_attachment *att = &pass->attachments[i];
2051
2052 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2053 att->samples = pCreateInfo->pAttachments[i].samples;
2054 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2055 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2056 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2057 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2058
2059 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2060 if (anv_format_is_color(att->format)) {
2061 ++pass->num_color_clear_attachments;
2062 } else if (att->format->depth_format) {
2063 pass->has_depth_clear_attachment = true;
2064 }
2065 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2066 assert(att->format->has_stencil);
2067 pass->has_stencil_clear_attachment = true;
2068 }
2069 }
2070
2071 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2072 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2073 struct anv_subpass *subpass = &pass->subpasses[i];
2074
2075 subpass->input_count = desc->inputCount;
2076 subpass->color_count = desc->colorCount;
2077
2078 if (desc->inputCount > 0) {
2079 subpass->input_attachments =
2080 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2081 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2082
2083 for (uint32_t j = 0; j < desc->inputCount; j++) {
2084 subpass->input_attachments[j]
2085 = desc->pInputAttachments[j].attachment;
2086 }
2087 }
2088
2089 if (desc->colorCount > 0) {
2090 subpass->color_attachments =
2091 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2092 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2093
2094 for (uint32_t j = 0; j < desc->colorCount; j++) {
2095 subpass->color_attachments[j]
2096 = desc->pColorAttachments[j].attachment;
2097 }
2098 }
2099
2100 if (desc->pResolveAttachments) {
2101 subpass->resolve_attachments =
2102 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2103 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2104
2105 for (uint32_t j = 0; j < desc->colorCount; j++) {
2106 subpass->resolve_attachments[j]
2107 = desc->pResolveAttachments[j].attachment;
2108 }
2109 }
2110
2111 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2112 }
2113
2114 *pRenderPass = anv_render_pass_to_handle(pass);
2115
2116 return VK_SUCCESS;
2117 }
2118
2119 void anv_DestroyRenderPass(
2120 VkDevice _device,
2121 VkRenderPass _pass)
2122 {
2123 ANV_FROM_HANDLE(anv_device, device, _device);
2124 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2125
2126 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2127 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2128 * Don't free the null arrays.
2129 */
2130 struct anv_subpass *subpass = &pass->subpasses[i];
2131
2132 anv_device_free(device, subpass->input_attachments);
2133 anv_device_free(device, subpass->color_attachments);
2134 anv_device_free(device, subpass->resolve_attachments);
2135 }
2136
2137 anv_device_free(device, pass);
2138 }
2139
2140 VkResult anv_GetRenderAreaGranularity(
2141 VkDevice device,
2142 VkRenderPass renderPass,
2143 VkExtent2D* pGranularity)
2144 {
2145 *pGranularity = (VkExtent2D) { 1, 1 };
2146
2147 return VK_SUCCESS;
2148 }
2149
2150 void vkCmdDbgMarkerBegin(
2151 VkCmdBuffer cmdBuffer,
2152 const char* pMarker)
2153 __attribute__ ((visibility ("default")));
2154
2155 void vkCmdDbgMarkerEnd(
2156 VkCmdBuffer cmdBuffer)
2157 __attribute__ ((visibility ("default")));
2158
2159 void vkCmdDbgMarkerBegin(
2160 VkCmdBuffer cmdBuffer,
2161 const char* pMarker)
2162 {
2163 }
2164
2165 void vkCmdDbgMarkerEnd(
2166 VkCmdBuffer cmdBuffer)
2167 {
2168 }