fdc6f8e10341e0c4d3d3e2708ac4806eb98e6c42
[mesa.git] / src / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "mesa/main/git_sha1.h"
32 #include "util/strtod.h"
33
34 struct anv_dispatch_table dtable;
35
36 static VkResult
37 anv_physical_device_init(struct anv_physical_device *device,
38 struct anv_instance *instance,
39 const char *path)
40 {
41 VkResult result;
42 int fd;
43
44 fd = open(path, O_RDWR | O_CLOEXEC);
45 if (fd < 0)
46 return vk_errorf(VK_ERROR_UNAVAILABLE, "failed to open %s: %m", path);
47
48 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
49 device->instance = instance;
50 device->path = path;
51
52 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
53 if (!device->chipset_id) {
54 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get chipset id: %m");
55 goto fail;
56 }
57
58 device->name = brw_get_device_name(device->chipset_id);
59 device->info = brw_get_device_info(device->chipset_id, -1);
60 if (!device->info) {
61 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get device info");
62 goto fail;
63 }
64
65 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
66 result = vk_errorf(VK_ERROR_UNAVAILABLE, "failed to get aperture size: %m");
67 goto fail;
68 }
69
70 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
71 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing gem wait");
72 goto fail;
73 }
74
75 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
76 result = vk_errorf(VK_ERROR_UNAVAILABLE, "kernel missing execbuf2");
77 goto fail;
78 }
79
80 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC)) {
81 result = vk_errorf(VK_ERROR_UNAVAILABLE, "non-llc gpu");
82 goto fail;
83 }
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91 return result;
92 }
93
94 static void *default_alloc(
95 void* pUserData,
96 size_t size,
97 size_t alignment,
98 VkSystemAllocType allocType)
99 {
100 return malloc(size);
101 }
102
103 static void default_free(
104 void* pUserData,
105 void* pMem)
106 {
107 free(pMem);
108 }
109
110 static const VkAllocCallbacks default_alloc_callbacks = {
111 .pUserData = NULL,
112 .pfnAlloc = default_alloc,
113 .pfnFree = default_free
114 };
115
116 static const VkExtensionProperties global_extensions[] = {
117 {
118 .extName = "VK_WSI_swapchain",
119 .specVersion = 12
120 },
121 };
122
123 static const VkExtensionProperties device_extensions[] = {
124 {
125 .extName = "VK_WSI_device_swapchain",
126 .specVersion = 12
127 },
128 };
129
130
131 VkResult anv_CreateInstance(
132 const VkInstanceCreateInfo* pCreateInfo,
133 VkInstance* pInstance)
134 {
135 struct anv_instance *instance;
136 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
137 void *user_data = NULL;
138
139 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
140
141 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
142 bool found = false;
143 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
144 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
145 global_extensions[j].extName) == 0) {
146 found = true;
147 break;
148 }
149 }
150 if (!found)
151 return vk_error(VK_ERROR_INVALID_EXTENSION);
152 }
153
154 if (pCreateInfo->pAllocCb) {
155 alloc_callbacks = pCreateInfo->pAllocCb;
156 user_data = pCreateInfo->pAllocCb->pUserData;
157 }
158 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
159 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
160 if (!instance)
161 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
162
163 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
164 instance->pAllocUserData = alloc_callbacks->pUserData;
165 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
166 instance->pfnFree = alloc_callbacks->pfnFree;
167 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
168 instance->physicalDeviceCount = 0;
169
170 _mesa_locale_init();
171
172 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
173
174 anv_init_wsi(instance);
175
176 *pInstance = anv_instance_to_handle(instance);
177
178 return VK_SUCCESS;
179 }
180
181 VkResult anv_DestroyInstance(
182 VkInstance _instance)
183 {
184 ANV_FROM_HANDLE(anv_instance, instance, _instance);
185
186 anv_finish_wsi(instance);
187
188 VG(VALGRIND_DESTROY_MEMPOOL(instance));
189
190 _mesa_locale_fini();
191
192 instance->pfnFree(instance->pAllocUserData, instance);
193
194 return VK_SUCCESS;
195 }
196
197 void *
198 anv_instance_alloc(struct anv_instance *instance, size_t size,
199 size_t alignment, VkSystemAllocType allocType)
200 {
201 void *mem = instance->pfnAlloc(instance->pAllocUserData,
202 size, alignment, allocType);
203 if (mem) {
204 VG(VALGRIND_MEMPOOL_ALLOC(instance, mem, size));
205 VG(VALGRIND_MAKE_MEM_UNDEFINED(mem, size));
206 }
207 return mem;
208 }
209
210 void
211 anv_instance_free(struct anv_instance *instance, void *mem)
212 {
213 if (mem == NULL)
214 return;
215
216 VG(VALGRIND_MEMPOOL_FREE(instance, mem));
217
218 instance->pfnFree(instance->pAllocUserData, mem);
219 }
220
221 VkResult anv_EnumeratePhysicalDevices(
222 VkInstance _instance,
223 uint32_t* pPhysicalDeviceCount,
224 VkPhysicalDevice* pPhysicalDevices)
225 {
226 ANV_FROM_HANDLE(anv_instance, instance, _instance);
227 VkResult result;
228
229 if (instance->physicalDeviceCount == 0) {
230 result = anv_physical_device_init(&instance->physicalDevice,
231 instance, "/dev/dri/renderD128");
232 if (result != VK_SUCCESS)
233 return result;
234
235 instance->physicalDeviceCount = 1;
236 }
237
238 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
239 * otherwise it's an inout parameter.
240 *
241 * The Vulkan spec (git aaed022) says:
242 *
243 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
244 * that is initialized with the number of devices the application is
245 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
246 * an array of at least this many VkPhysicalDevice handles [...].
247 *
248 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
249 * overwrites the contents of the variable pointed to by
250 * pPhysicalDeviceCount with the number of physical devices in in the
251 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
252 * pPhysicalDeviceCount with the number of physical handles written to
253 * pPhysicalDevices.
254 */
255 if (!pPhysicalDevices) {
256 *pPhysicalDeviceCount = instance->physicalDeviceCount;
257 } else if (*pPhysicalDeviceCount >= 1) {
258 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
259 *pPhysicalDeviceCount = 1;
260 } else {
261 *pPhysicalDeviceCount = 0;
262 }
263
264 return VK_SUCCESS;
265 }
266
267 VkResult anv_GetPhysicalDeviceFeatures(
268 VkPhysicalDevice physicalDevice,
269 VkPhysicalDeviceFeatures* pFeatures)
270 {
271 anv_finishme("Get correct values for PhysicalDeviceFeatures");
272
273 *pFeatures = (VkPhysicalDeviceFeatures) {
274 .robustBufferAccess = false,
275 .fullDrawIndexUint32 = false,
276 .imageCubeArray = false,
277 .independentBlend = false,
278 .geometryShader = true,
279 .tessellationShader = false,
280 .sampleRateShading = false,
281 .dualSourceBlend = true,
282 .logicOp = true,
283 .instancedDrawIndirect = true,
284 .depthClip = false,
285 .depthBiasClamp = false,
286 .fillModeNonSolid = true,
287 .depthBounds = false,
288 .wideLines = true,
289 .largePoints = true,
290 .textureCompressionETC2 = true,
291 .textureCompressionASTC_LDR = true,
292 .textureCompressionBC = true,
293 .pipelineStatisticsQuery = true,
294 .vertexSideEffects = false,
295 .tessellationSideEffects = false,
296 .geometrySideEffects = false,
297 .fragmentSideEffects = false,
298 .shaderTessellationPointSize = false,
299 .shaderGeometryPointSize = true,
300 .shaderTextureGatherExtended = true,
301 .shaderStorageImageExtendedFormats = false,
302 .shaderStorageImageMultisample = false,
303 .shaderStorageBufferArrayConstantIndexing = false,
304 .shaderStorageImageArrayConstantIndexing = false,
305 .shaderUniformBufferArrayDynamicIndexing = true,
306 .shaderSampledImageArrayDynamicIndexing = false,
307 .shaderStorageBufferArrayDynamicIndexing = false,
308 .shaderStorageImageArrayDynamicIndexing = false,
309 .shaderClipDistance = false,
310 .shaderCullDistance = false,
311 .shaderFloat64 = false,
312 .shaderInt64 = false,
313 .shaderFloat16 = false,
314 .shaderInt16 = false,
315 };
316
317 return VK_SUCCESS;
318 }
319
320 VkResult anv_GetPhysicalDeviceLimits(
321 VkPhysicalDevice physicalDevice,
322 VkPhysicalDeviceLimits* pLimits)
323 {
324 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
325 const struct brw_device_info *devinfo = physical_device->info;
326
327 anv_finishme("Get correct values for PhysicalDeviceLimits");
328
329 *pLimits = (VkPhysicalDeviceLimits) {
330 .maxImageDimension1D = (1 << 14),
331 .maxImageDimension2D = (1 << 14),
332 .maxImageDimension3D = (1 << 10),
333 .maxImageDimensionCube = (1 << 14),
334 .maxImageArrayLayers = (1 << 10),
335 .maxTexelBufferSize = (1 << 14),
336 .maxUniformBufferSize = UINT32_MAX,
337 .maxStorageBufferSize = UINT32_MAX,
338 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
339 .maxMemoryAllocationCount = UINT32_MAX,
340 .bufferImageGranularity = 64, /* A cache line */
341 .maxBoundDescriptorSets = MAX_SETS,
342 .maxDescriptorSets = UINT32_MAX,
343 .maxPerStageDescriptorSamplers = 64,
344 .maxPerStageDescriptorUniformBuffers = 64,
345 .maxPerStageDescriptorStorageBuffers = 64,
346 .maxPerStageDescriptorSampledImages = 64,
347 .maxPerStageDescriptorStorageImages = 64,
348 .maxDescriptorSetSamplers = 256,
349 .maxDescriptorSetUniformBuffers = 256,
350 .maxDescriptorSetStorageBuffers = 256,
351 .maxDescriptorSetSampledImages = 256,
352 .maxDescriptorSetStorageImages = 256,
353 .maxVertexInputAttributes = 32,
354 .maxVertexInputAttributeOffset = 256,
355 .maxVertexInputBindingStride = 256,
356 .maxVertexOutputComponents = 32,
357 .maxTessGenLevel = 0,
358 .maxTessPatchSize = 0,
359 .maxTessControlPerVertexInputComponents = 0,
360 .maxTessControlPerVertexOutputComponents = 0,
361 .maxTessControlPerPatchOutputComponents = 0,
362 .maxTessControlTotalOutputComponents = 0,
363 .maxTessEvaluationInputComponents = 0,
364 .maxTessEvaluationOutputComponents = 0,
365 .maxGeometryShaderInvocations = 6,
366 .maxGeometryInputComponents = 16,
367 .maxGeometryOutputComponents = 16,
368 .maxGeometryOutputVertices = 16,
369 .maxGeometryTotalOutputComponents = 16,
370 .maxFragmentInputComponents = 16,
371 .maxFragmentOutputBuffers = 8,
372 .maxFragmentDualSourceBuffers = 2,
373 .maxFragmentCombinedOutputResources = 8,
374 .maxComputeSharedMemorySize = 1024,
375 .maxComputeWorkGroupCount = {
376 16 * devinfo->max_cs_threads,
377 16 * devinfo->max_cs_threads,
378 16 * devinfo->max_cs_threads,
379 },
380 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
381 .maxComputeWorkGroupSize = {
382 16 * devinfo->max_cs_threads,
383 16 * devinfo->max_cs_threads,
384 16 * devinfo->max_cs_threads,
385 },
386 .subPixelPrecisionBits = 4 /* FIXME */,
387 .subTexelPrecisionBits = 4 /* FIXME */,
388 .mipmapPrecisionBits = 4 /* FIXME */,
389 .maxDrawIndexedIndexValue = UINT32_MAX,
390 .maxDrawIndirectInstanceCount = UINT32_MAX,
391 .primitiveRestartForPatches = UINT32_MAX,
392 .maxSamplerLodBias = 16,
393 .maxSamplerAnisotropy = 16,
394 .maxViewports = 16,
395 .maxDynamicViewportStates = UINT32_MAX,
396 .maxViewportDimensions = { (1 << 14), (1 << 14) },
397 .viewportBoundsRange = { -1.0, 1.0 }, /* FIXME */
398 .viewportSubPixelBits = 13, /* We take a float? */
399 .minMemoryMapAlignment = 64, /* A cache line */
400 .minTexelBufferOffsetAlignment = 1,
401 .minUniformBufferOffsetAlignment = 1,
402 .minStorageBufferOffsetAlignment = 1,
403 .minTexelOffset = 0, /* FIXME */
404 .maxTexelOffset = 0, /* FIXME */
405 .minTexelGatherOffset = 0, /* FIXME */
406 .maxTexelGatherOffset = 0, /* FIXME */
407 .minInterpolationOffset = 0, /* FIXME */
408 .maxInterpolationOffset = 0, /* FIXME */
409 .subPixelInterpolationOffsetBits = 0, /* FIXME */
410 .maxFramebufferWidth = (1 << 14),
411 .maxFramebufferHeight = (1 << 14),
412 .maxFramebufferLayers = (1 << 10),
413 .maxFramebufferColorSamples = 8,
414 .maxFramebufferDepthSamples = 8,
415 .maxFramebufferStencilSamples = 8,
416 .maxColorAttachments = MAX_RTS,
417 .maxSampledImageColorSamples = 8,
418 .maxSampledImageDepthSamples = 8,
419 .maxSampledImageIntegerSamples = 1,
420 .maxStorageImageSamples = 1,
421 .maxSampleMaskWords = 1,
422 .timestampFrequency = 1000 * 1000 * 1000 / 80,
423 .maxClipDistances = 0 /* FIXME */,
424 .maxCullDistances = 0 /* FIXME */,
425 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
426 .pointSizeRange = { 0.125, 255.875 },
427 .lineWidthRange = { 0.0, 7.9921875 },
428 .pointSizeGranularity = (1.0 / 8.0),
429 .lineWidthGranularity = (1.0 / 128.0),
430 };
431
432 return VK_SUCCESS;
433 }
434
435 VkResult anv_GetPhysicalDeviceProperties(
436 VkPhysicalDevice physicalDevice,
437 VkPhysicalDeviceProperties* pProperties)
438 {
439 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
440
441 *pProperties = (VkPhysicalDeviceProperties) {
442 .apiVersion = VK_MAKE_VERSION(0, 138, 1),
443 .driverVersion = 1,
444 .vendorId = 0x8086,
445 .deviceId = pdevice->chipset_id,
446 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
447 };
448
449 strcpy(pProperties->deviceName, pdevice->name);
450 snprintf((char *)pProperties->pipelineCacheUUID, VK_UUID_LENGTH,
451 "anv-%s", MESA_GIT_SHA1 + 4);
452
453 return VK_SUCCESS;
454 }
455
456 VkResult anv_GetPhysicalDeviceQueueCount(
457 VkPhysicalDevice physicalDevice,
458 uint32_t* pCount)
459 {
460 *pCount = 1;
461
462 return VK_SUCCESS;
463 }
464
465 VkResult anv_GetPhysicalDeviceQueueProperties(
466 VkPhysicalDevice physicalDevice,
467 uint32_t count,
468 VkPhysicalDeviceQueueProperties* pQueueProperties)
469 {
470 assert(count == 1);
471
472 *pQueueProperties = (VkPhysicalDeviceQueueProperties) {
473 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
474 VK_QUEUE_COMPUTE_BIT |
475 VK_QUEUE_DMA_BIT,
476 .queueCount = 1,
477 .supportsTimestamps = true,
478 };
479
480 return VK_SUCCESS;
481 }
482
483 VkResult anv_GetPhysicalDeviceMemoryProperties(
484 VkPhysicalDevice physicalDevice,
485 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
486 {
487 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
488 VkDeviceSize heap_size;
489
490 /* Reserve some wiggle room for the driver by exposing only 75% of the
491 * aperture to the heap.
492 */
493 heap_size = 3 * physical_device->aperture_size / 4;
494
495 /* The property flags below are valid only for llc platforms. */
496 pMemoryProperties->memoryTypeCount = 1;
497 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
498 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
499 .heapIndex = 1,
500 };
501
502 pMemoryProperties->memoryHeapCount = 1;
503 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
504 .size = heap_size,
505 .flags = VK_MEMORY_HEAP_HOST_LOCAL,
506 };
507
508 return VK_SUCCESS;
509 }
510
511 PFN_vkVoidFunction anv_GetInstanceProcAddr(
512 VkInstance instance,
513 const char* pName)
514 {
515 return anv_lookup_entrypoint(pName);
516 }
517
518 PFN_vkVoidFunction anv_GetDeviceProcAddr(
519 VkDevice device,
520 const char* pName)
521 {
522 return anv_lookup_entrypoint(pName);
523 }
524
525 static VkResult
526 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
527 {
528 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
529 queue->device = device;
530 queue->pool = &device->surface_state_pool;
531
532 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
533 if (queue->completed_serial.map == NULL)
534 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
535
536 *(uint32_t *)queue->completed_serial.map = 0;
537 queue->next_serial = 1;
538
539 return VK_SUCCESS;
540 }
541
542 static void
543 anv_queue_finish(struct anv_queue *queue)
544 {
545 #ifdef HAVE_VALGRIND
546 /* This gets torn down with the device so we only need to do this if
547 * valgrind is present.
548 */
549 anv_state_pool_free(queue->pool, queue->completed_serial);
550 #endif
551 }
552
553 static void
554 anv_device_init_border_colors(struct anv_device *device)
555 {
556 static const VkClearColorValue border_colors[] = {
557 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 0.0 } },
558 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .f32 = { 0.0, 0.0, 0.0, 1.0 } },
559 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .f32 = { 1.0, 1.0, 1.0, 1.0 } },
560 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .u32 = { 0, 0, 0, 0 } },
561 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .u32 = { 0, 0, 0, 1 } },
562 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .u32 = { 1, 1, 1, 1 } },
563 };
564
565 device->border_colors =
566 anv_state_pool_alloc(&device->dynamic_state_pool,
567 sizeof(border_colors), 32);
568 memcpy(device->border_colors.map, border_colors, sizeof(border_colors));
569 }
570
571 VkResult anv_CreateDevice(
572 VkPhysicalDevice physicalDevice,
573 const VkDeviceCreateInfo* pCreateInfo,
574 VkDevice* pDevice)
575 {
576 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
577 struct anv_instance *instance = physical_device->instance;
578 struct anv_device *device;
579
580 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
581
582 for (uint32_t i = 0; i < pCreateInfo->extensionCount; i++) {
583 bool found = false;
584 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
585 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
586 device_extensions[j].extName) == 0) {
587 found = true;
588 break;
589 }
590 }
591 if (!found)
592 return vk_error(VK_ERROR_INVALID_EXTENSION);
593 }
594
595 anv_set_dispatch_gen(physical_device->info->gen);
596
597 device = anv_instance_alloc(instance, sizeof(*device), 8,
598 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
599 if (!device)
600 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
601
602 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
603 device->instance = physical_device->instance;
604
605 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
606 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
607 if (device->fd == -1)
608 goto fail_device;
609
610 device->context_id = anv_gem_create_context(device);
611 if (device->context_id == -1)
612 goto fail_fd;
613
614 pthread_mutex_init(&device->mutex, NULL);
615
616 anv_bo_pool_init(&device->batch_bo_pool, device, ANV_CMD_BUFFER_BATCH_SIZE);
617
618 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
619
620 anv_state_pool_init(&device->dynamic_state_pool,
621 &device->dynamic_state_block_pool);
622
623 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
624 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
625
626 anv_state_pool_init(&device->surface_state_pool,
627 &device->surface_state_block_pool);
628
629 anv_block_pool_init(&device->scratch_block_pool, device, 0x10000);
630
631 device->info = *physical_device->info;
632
633 device->compiler = anv_compiler_create(device);
634
635 anv_queue_init(device, &device->queue);
636
637 anv_device_init_meta(device);
638
639 anv_device_init_border_colors(device);
640
641 *pDevice = anv_device_to_handle(device);
642
643 return VK_SUCCESS;
644
645 fail_fd:
646 close(device->fd);
647 fail_device:
648 anv_device_free(device, device);
649
650 return vk_error(VK_ERROR_UNAVAILABLE);
651 }
652
653 VkResult anv_DestroyDevice(
654 VkDevice _device)
655 {
656 ANV_FROM_HANDLE(anv_device, device, _device);
657
658 anv_compiler_destroy(device->compiler);
659
660 anv_queue_finish(&device->queue);
661
662 anv_device_finish_meta(device);
663
664 #ifdef HAVE_VALGRIND
665 /* We only need to free these to prevent valgrind errors. The backing
666 * BO will go away in a couple of lines so we don't actually leak.
667 */
668 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
669 #endif
670
671 anv_bo_pool_finish(&device->batch_bo_pool);
672 anv_state_pool_finish(&device->dynamic_state_pool);
673 anv_block_pool_finish(&device->dynamic_state_block_pool);
674 anv_block_pool_finish(&device->instruction_block_pool);
675 anv_state_pool_finish(&device->surface_state_pool);
676 anv_block_pool_finish(&device->surface_state_block_pool);
677 anv_block_pool_finish(&device->scratch_block_pool);
678
679 close(device->fd);
680
681 anv_instance_free(device->instance, device);
682
683 return VK_SUCCESS;
684 }
685
686 VkResult anv_GetGlobalExtensionProperties(
687 const char* pLayerName,
688 uint32_t* pCount,
689 VkExtensionProperties* pProperties)
690 {
691 if (pProperties == NULL) {
692 *pCount = ARRAY_SIZE(global_extensions);
693 return VK_SUCCESS;
694 }
695
696 assert(*pCount >= ARRAY_SIZE(global_extensions));
697
698 *pCount = ARRAY_SIZE(global_extensions);
699 memcpy(pProperties, global_extensions, sizeof(global_extensions));
700
701 return VK_SUCCESS;
702 }
703
704 VkResult anv_GetPhysicalDeviceExtensionProperties(
705 VkPhysicalDevice physicalDevice,
706 const char* pLayerName,
707 uint32_t* pCount,
708 VkExtensionProperties* pProperties)
709 {
710 if (pProperties == NULL) {
711 *pCount = ARRAY_SIZE(device_extensions);
712 return VK_SUCCESS;
713 }
714
715 assert(*pCount >= ARRAY_SIZE(device_extensions));
716
717 *pCount = ARRAY_SIZE(device_extensions);
718 memcpy(pProperties, device_extensions, sizeof(device_extensions));
719
720 return VK_SUCCESS;
721 }
722
723 VkResult anv_GetGlobalLayerProperties(
724 uint32_t* pCount,
725 VkLayerProperties* pProperties)
726 {
727 if (pProperties == NULL) {
728 *pCount = 0;
729 return VK_SUCCESS;
730 }
731
732 /* None supported at this time */
733 return vk_error(VK_ERROR_INVALID_LAYER);
734 }
735
736 VkResult anv_GetPhysicalDeviceLayerProperties(
737 VkPhysicalDevice physicalDevice,
738 uint32_t* pCount,
739 VkLayerProperties* pProperties)
740 {
741 if (pProperties == NULL) {
742 *pCount = 0;
743 return VK_SUCCESS;
744 }
745
746 /* None supported at this time */
747 return vk_error(VK_ERROR_INVALID_LAYER);
748 }
749
750 VkResult anv_GetDeviceQueue(
751 VkDevice _device,
752 uint32_t queueNodeIndex,
753 uint32_t queueIndex,
754 VkQueue* pQueue)
755 {
756 ANV_FROM_HANDLE(anv_device, device, _device);
757
758 assert(queueIndex == 0);
759
760 *pQueue = anv_queue_to_handle(&device->queue);
761
762 return VK_SUCCESS;
763 }
764
765 VkResult anv_QueueSubmit(
766 VkQueue _queue,
767 uint32_t cmdBufferCount,
768 const VkCmdBuffer* pCmdBuffers,
769 VkFence _fence)
770 {
771 ANV_FROM_HANDLE(anv_queue, queue, _queue);
772 ANV_FROM_HANDLE(anv_fence, fence, _fence);
773 struct anv_device *device = queue->device;
774 int ret;
775
776 for (uint32_t i = 0; i < cmdBufferCount; i++) {
777 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCmdBuffers[i]);
778
779 assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
780
781 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
782 if (ret != 0)
783 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
784
785 if (fence) {
786 ret = anv_gem_execbuffer(device, &fence->execbuf);
787 if (ret != 0)
788 return vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
789 }
790
791 for (uint32_t i = 0; i < cmd_buffer->execbuf2.bo_count; i++)
792 cmd_buffer->execbuf2.bos[i]->offset = cmd_buffer->execbuf2.objects[i].offset;
793 }
794
795 return VK_SUCCESS;
796 }
797
798 VkResult anv_QueueWaitIdle(
799 VkQueue _queue)
800 {
801 ANV_FROM_HANDLE(anv_queue, queue, _queue);
802
803 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
804 }
805
806 VkResult anv_DeviceWaitIdle(
807 VkDevice _device)
808 {
809 ANV_FROM_HANDLE(anv_device, device, _device);
810 struct anv_state state;
811 struct anv_batch batch;
812 struct drm_i915_gem_execbuffer2 execbuf;
813 struct drm_i915_gem_exec_object2 exec2_objects[1];
814 struct anv_bo *bo = NULL;
815 VkResult result;
816 int64_t timeout;
817 int ret;
818
819 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
820 bo = &device->dynamic_state_pool.block_pool->bo;
821 batch.start = batch.next = state.map;
822 batch.end = state.map + 32;
823 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
824 anv_batch_emit(&batch, GEN7_MI_NOOP);
825
826 exec2_objects[0].handle = bo->gem_handle;
827 exec2_objects[0].relocation_count = 0;
828 exec2_objects[0].relocs_ptr = 0;
829 exec2_objects[0].alignment = 0;
830 exec2_objects[0].offset = bo->offset;
831 exec2_objects[0].flags = 0;
832 exec2_objects[0].rsvd1 = 0;
833 exec2_objects[0].rsvd2 = 0;
834
835 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
836 execbuf.buffer_count = 1;
837 execbuf.batch_start_offset = state.offset;
838 execbuf.batch_len = batch.next - state.map;
839 execbuf.cliprects_ptr = 0;
840 execbuf.num_cliprects = 0;
841 execbuf.DR1 = 0;
842 execbuf.DR4 = 0;
843
844 execbuf.flags =
845 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
846 execbuf.rsvd1 = device->context_id;
847 execbuf.rsvd2 = 0;
848
849 ret = anv_gem_execbuffer(device, &execbuf);
850 if (ret != 0) {
851 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
852 goto fail;
853 }
854
855 timeout = INT64_MAX;
856 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
857 if (ret != 0) {
858 result = vk_errorf(VK_ERROR_UNKNOWN, "execbuf2 failed: %m");
859 goto fail;
860 }
861
862 anv_state_pool_free(&device->dynamic_state_pool, state);
863
864 return VK_SUCCESS;
865
866 fail:
867 anv_state_pool_free(&device->dynamic_state_pool, state);
868
869 return result;
870 }
871
872 void *
873 anv_device_alloc(struct anv_device * device,
874 size_t size,
875 size_t alignment,
876 VkSystemAllocType allocType)
877 {
878 return anv_instance_alloc(device->instance, size, alignment, allocType);
879 }
880
881 void
882 anv_device_free(struct anv_device * device,
883 void * mem)
884 {
885 anv_instance_free(device->instance, mem);
886 }
887
888 VkResult
889 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
890 {
891 bo->gem_handle = anv_gem_create(device, size);
892 if (!bo->gem_handle)
893 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
894
895 bo->map = NULL;
896 bo->index = 0;
897 bo->offset = 0;
898 bo->size = size;
899
900 return VK_SUCCESS;
901 }
902
903 VkResult anv_AllocMemory(
904 VkDevice _device,
905 const VkMemoryAllocInfo* pAllocInfo,
906 VkDeviceMemory* pMem)
907 {
908 ANV_FROM_HANDLE(anv_device, device, _device);
909 struct anv_device_memory *mem;
910 VkResult result;
911
912 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
913
914 if (pAllocInfo->memoryTypeIndex != 0) {
915 /* We support exactly one memory heap. */
916 return vk_error(VK_ERROR_INVALID_VALUE);
917 }
918
919 /* FINISHME: Fail if allocation request exceeds heap size. */
920
921 mem = anv_device_alloc(device, sizeof(*mem), 8,
922 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
923 if (mem == NULL)
924 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
925
926 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
927 if (result != VK_SUCCESS)
928 goto fail;
929
930 *pMem = anv_device_memory_to_handle(mem);
931
932 return VK_SUCCESS;
933
934 fail:
935 anv_device_free(device, mem);
936
937 return result;
938 }
939
940 VkResult anv_FreeMemory(
941 VkDevice _device,
942 VkDeviceMemory _mem)
943 {
944 ANV_FROM_HANDLE(anv_device, device, _device);
945 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
946
947 if (mem->bo.map)
948 anv_gem_munmap(mem->bo.map, mem->bo.size);
949
950 if (mem->bo.gem_handle != 0)
951 anv_gem_close(device, mem->bo.gem_handle);
952
953 anv_device_free(device, mem);
954
955 return VK_SUCCESS;
956 }
957
958 VkResult anv_MapMemory(
959 VkDevice _device,
960 VkDeviceMemory _mem,
961 VkDeviceSize offset,
962 VkDeviceSize size,
963 VkMemoryMapFlags flags,
964 void** ppData)
965 {
966 ANV_FROM_HANDLE(anv_device, device, _device);
967 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
968
969 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
970 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
971 * at a time is valid. We could just mmap up front and return an offset
972 * pointer here, but that may exhaust virtual memory on 32 bit
973 * userspace. */
974
975 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
976 mem->map_size = size;
977
978 *ppData = mem->map;
979
980 return VK_SUCCESS;
981 }
982
983 VkResult anv_UnmapMemory(
984 VkDevice _device,
985 VkDeviceMemory _mem)
986 {
987 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
988
989 anv_gem_munmap(mem->map, mem->map_size);
990
991 return VK_SUCCESS;
992 }
993
994 VkResult anv_FlushMappedMemoryRanges(
995 VkDevice device,
996 uint32_t memRangeCount,
997 const VkMappedMemoryRange* pMemRanges)
998 {
999 /* clflush here for !llc platforms */
1000
1001 return VK_SUCCESS;
1002 }
1003
1004 VkResult anv_InvalidateMappedMemoryRanges(
1005 VkDevice device,
1006 uint32_t memRangeCount,
1007 const VkMappedMemoryRange* pMemRanges)
1008 {
1009 return anv_FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
1010 }
1011
1012 VkResult anv_GetBufferMemoryRequirements(
1013 VkDevice device,
1014 VkBuffer _buffer,
1015 VkMemoryRequirements* pMemoryRequirements)
1016 {
1017 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1018
1019 /* The Vulkan spec (git aaed022) says:
1020 *
1021 * memoryTypeBits is a bitfield and contains one bit set for every
1022 * supported memory type for the resource. The bit `1<<i` is set if and
1023 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1024 * structure for the physical device is supported.
1025 *
1026 * We support exactly one memory type.
1027 */
1028 pMemoryRequirements->memoryTypeBits = 1;
1029
1030 pMemoryRequirements->size = buffer->size;
1031 pMemoryRequirements->alignment = 16;
1032
1033 return VK_SUCCESS;
1034 }
1035
1036 VkResult anv_GetImageMemoryRequirements(
1037 VkDevice device,
1038 VkImage _image,
1039 VkMemoryRequirements* pMemoryRequirements)
1040 {
1041 ANV_FROM_HANDLE(anv_image, image, _image);
1042
1043 /* The Vulkan spec (git aaed022) says:
1044 *
1045 * memoryTypeBits is a bitfield and contains one bit set for every
1046 * supported memory type for the resource. The bit `1<<i` is set if and
1047 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1048 * structure for the physical device is supported.
1049 *
1050 * We support exactly one memory type.
1051 */
1052 pMemoryRequirements->memoryTypeBits = 1;
1053
1054 pMemoryRequirements->size = image->size;
1055 pMemoryRequirements->alignment = image->alignment;
1056
1057 return VK_SUCCESS;
1058 }
1059
1060 VkResult anv_GetImageSparseMemoryRequirements(
1061 VkDevice device,
1062 VkImage image,
1063 uint32_t* pNumRequirements,
1064 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1065 {
1066 return vk_error(VK_UNSUPPORTED);
1067 }
1068
1069 VkResult anv_GetDeviceMemoryCommitment(
1070 VkDevice device,
1071 VkDeviceMemory memory,
1072 VkDeviceSize* pCommittedMemoryInBytes)
1073 {
1074 *pCommittedMemoryInBytes = 0;
1075 stub_return(VK_SUCCESS);
1076 }
1077
1078 VkResult anv_BindBufferMemory(
1079 VkDevice device,
1080 VkBuffer _buffer,
1081 VkDeviceMemory _mem,
1082 VkDeviceSize memOffset)
1083 {
1084 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1085 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1086
1087 buffer->bo = &mem->bo;
1088 buffer->offset = memOffset;
1089
1090 return VK_SUCCESS;
1091 }
1092
1093 VkResult anv_BindImageMemory(
1094 VkDevice device,
1095 VkImage _image,
1096 VkDeviceMemory _mem,
1097 VkDeviceSize memOffset)
1098 {
1099 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1100 ANV_FROM_HANDLE(anv_image, image, _image);
1101
1102 image->bo = &mem->bo;
1103 image->offset = memOffset;
1104
1105 return VK_SUCCESS;
1106 }
1107
1108 VkResult anv_QueueBindSparseBufferMemory(
1109 VkQueue queue,
1110 VkBuffer buffer,
1111 uint32_t numBindings,
1112 const VkSparseMemoryBindInfo* pBindInfo)
1113 {
1114 stub_return(VK_UNSUPPORTED);
1115 }
1116
1117 VkResult anv_QueueBindSparseImageOpaqueMemory(
1118 VkQueue queue,
1119 VkImage image,
1120 uint32_t numBindings,
1121 const VkSparseMemoryBindInfo* pBindInfo)
1122 {
1123 stub_return(VK_UNSUPPORTED);
1124 }
1125
1126 VkResult anv_QueueBindSparseImageMemory(
1127 VkQueue queue,
1128 VkImage image,
1129 uint32_t numBindings,
1130 const VkSparseImageMemoryBindInfo* pBindInfo)
1131 {
1132 stub_return(VK_UNSUPPORTED);
1133 }
1134
1135 VkResult anv_CreateFence(
1136 VkDevice _device,
1137 const VkFenceCreateInfo* pCreateInfo,
1138 VkFence* pFence)
1139 {
1140 ANV_FROM_HANDLE(anv_device, device, _device);
1141 struct anv_fence *fence;
1142 struct anv_batch batch;
1143 VkResult result;
1144
1145 const uint32_t fence_size = 128;
1146
1147 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1148
1149 fence = anv_device_alloc(device, sizeof(*fence), 8,
1150 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1151 if (fence == NULL)
1152 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1153
1154 result = anv_bo_init_new(&fence->bo, device, fence_size);
1155 if (result != VK_SUCCESS)
1156 goto fail;
1157
1158 fence->bo.map =
1159 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1160 batch.next = batch.start = fence->bo.map;
1161 batch.end = fence->bo.map + fence->bo.size;
1162 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END);
1163 anv_batch_emit(&batch, GEN7_MI_NOOP);
1164
1165 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1166 fence->exec2_objects[0].relocation_count = 0;
1167 fence->exec2_objects[0].relocs_ptr = 0;
1168 fence->exec2_objects[0].alignment = 0;
1169 fence->exec2_objects[0].offset = fence->bo.offset;
1170 fence->exec2_objects[0].flags = 0;
1171 fence->exec2_objects[0].rsvd1 = 0;
1172 fence->exec2_objects[0].rsvd2 = 0;
1173
1174 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1175 fence->execbuf.buffer_count = 1;
1176 fence->execbuf.batch_start_offset = 0;
1177 fence->execbuf.batch_len = batch.next - fence->bo.map;
1178 fence->execbuf.cliprects_ptr = 0;
1179 fence->execbuf.num_cliprects = 0;
1180 fence->execbuf.DR1 = 0;
1181 fence->execbuf.DR4 = 0;
1182
1183 fence->execbuf.flags =
1184 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1185 fence->execbuf.rsvd1 = device->context_id;
1186 fence->execbuf.rsvd2 = 0;
1187
1188 *pFence = anv_fence_to_handle(fence);
1189
1190 return VK_SUCCESS;
1191
1192 fail:
1193 anv_device_free(device, fence);
1194
1195 return result;
1196 }
1197
1198 VkResult anv_DestroyFence(
1199 VkDevice _device,
1200 VkFence _fence)
1201 {
1202 ANV_FROM_HANDLE(anv_device, device, _device);
1203 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1204
1205 anv_gem_munmap(fence->bo.map, fence->bo.size);
1206 anv_gem_close(device, fence->bo.gem_handle);
1207 anv_device_free(device, fence);
1208
1209 return VK_SUCCESS;
1210 }
1211
1212 VkResult anv_ResetFences(
1213 VkDevice _device,
1214 uint32_t fenceCount,
1215 const VkFence* pFences)
1216 {
1217 for (uint32_t i = 0; i < fenceCount; i++) {
1218 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1219 fence->ready = false;
1220 }
1221
1222 return VK_SUCCESS;
1223 }
1224
1225 VkResult anv_GetFenceStatus(
1226 VkDevice _device,
1227 VkFence _fence)
1228 {
1229 ANV_FROM_HANDLE(anv_device, device, _device);
1230 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1231 int64_t t = 0;
1232 int ret;
1233
1234 if (fence->ready)
1235 return VK_SUCCESS;
1236
1237 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1238 if (ret == 0) {
1239 fence->ready = true;
1240 return VK_SUCCESS;
1241 }
1242
1243 return VK_NOT_READY;
1244 }
1245
1246 VkResult anv_WaitForFences(
1247 VkDevice _device,
1248 uint32_t fenceCount,
1249 const VkFence* pFences,
1250 VkBool32 waitAll,
1251 uint64_t timeout)
1252 {
1253 ANV_FROM_HANDLE(anv_device, device, _device);
1254 int64_t t = timeout;
1255 int ret;
1256
1257 /* FIXME: handle !waitAll */
1258
1259 for (uint32_t i = 0; i < fenceCount; i++) {
1260 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1261 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1262 if (ret == -1 && errno == ETIME)
1263 return VK_TIMEOUT;
1264 else if (ret == -1)
1265 return vk_errorf(VK_ERROR_UNKNOWN, "gem wait failed: %m");
1266 }
1267
1268 return VK_SUCCESS;
1269 }
1270
1271 // Queue semaphore functions
1272
1273 VkResult anv_CreateSemaphore(
1274 VkDevice device,
1275 const VkSemaphoreCreateInfo* pCreateInfo,
1276 VkSemaphore* pSemaphore)
1277 {
1278 stub_return(VK_UNSUPPORTED);
1279 }
1280
1281 VkResult anv_DestroySemaphore(
1282 VkDevice device,
1283 VkSemaphore semaphore)
1284 {
1285 stub_return(VK_UNSUPPORTED);
1286 }
1287
1288 VkResult anv_QueueSignalSemaphore(
1289 VkQueue queue,
1290 VkSemaphore semaphore)
1291 {
1292 stub_return(VK_UNSUPPORTED);
1293 }
1294
1295 VkResult anv_QueueWaitSemaphore(
1296 VkQueue queue,
1297 VkSemaphore semaphore)
1298 {
1299 stub_return(VK_UNSUPPORTED);
1300 }
1301
1302 // Event functions
1303
1304 VkResult anv_CreateEvent(
1305 VkDevice device,
1306 const VkEventCreateInfo* pCreateInfo,
1307 VkEvent* pEvent)
1308 {
1309 stub_return(VK_UNSUPPORTED);
1310 }
1311
1312 VkResult anv_DestroyEvent(
1313 VkDevice device,
1314 VkEvent event)
1315 {
1316 stub_return(VK_UNSUPPORTED);
1317 }
1318
1319 VkResult anv_GetEventStatus(
1320 VkDevice device,
1321 VkEvent event)
1322 {
1323 stub_return(VK_UNSUPPORTED);
1324 }
1325
1326 VkResult anv_SetEvent(
1327 VkDevice device,
1328 VkEvent event)
1329 {
1330 stub_return(VK_UNSUPPORTED);
1331 }
1332
1333 VkResult anv_ResetEvent(
1334 VkDevice device,
1335 VkEvent event)
1336 {
1337 stub_return(VK_UNSUPPORTED);
1338 }
1339
1340 // Buffer functions
1341
1342 VkResult anv_CreateBuffer(
1343 VkDevice _device,
1344 const VkBufferCreateInfo* pCreateInfo,
1345 VkBuffer* pBuffer)
1346 {
1347 ANV_FROM_HANDLE(anv_device, device, _device);
1348 struct anv_buffer *buffer;
1349
1350 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1351
1352 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1353 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1354 if (buffer == NULL)
1355 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1356
1357 buffer->size = pCreateInfo->size;
1358 buffer->bo = NULL;
1359 buffer->offset = 0;
1360
1361 *pBuffer = anv_buffer_to_handle(buffer);
1362
1363 return VK_SUCCESS;
1364 }
1365
1366 VkResult anv_DestroyBuffer(
1367 VkDevice _device,
1368 VkBuffer _buffer)
1369 {
1370 ANV_FROM_HANDLE(anv_device, device, _device);
1371 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1372
1373 anv_device_free(device, buffer);
1374
1375 return VK_SUCCESS;
1376 }
1377
1378 void
1379 anv_fill_buffer_surface_state(struct anv_device *device, void *state,
1380 const struct anv_format *format,
1381 uint32_t offset, uint32_t range)
1382 {
1383 switch (device->info.gen) {
1384 case 7:
1385 gen7_fill_buffer_surface_state(state, format, offset, range);
1386 break;
1387 case 8:
1388 gen8_fill_buffer_surface_state(state, format, offset, range);
1389 break;
1390 default:
1391 unreachable("unsupported gen\n");
1392 }
1393 }
1394
1395 VkResult
1396 anv_buffer_view_create(
1397 struct anv_device * device,
1398 const VkBufferViewCreateInfo* pCreateInfo,
1399 struct anv_buffer_view ** bview_out)
1400 {
1401 ANV_FROM_HANDLE(anv_buffer, buffer, pCreateInfo->buffer);
1402 struct anv_buffer_view *bview;
1403
1404 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1405
1406 bview = anv_device_alloc(device, sizeof(*bview), 8,
1407 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1408 if (bview == NULL)
1409 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1410
1411 *bview = (struct anv_buffer_view) {
1412 .bo = buffer->bo,
1413 .offset = buffer->offset + pCreateInfo->offset,
1414 .surface_state = anv_state_pool_alloc(&device->surface_state_pool, 64, 64),
1415 .format = anv_format_for_vk_format(pCreateInfo->format),
1416 .range = pCreateInfo->range,
1417 };
1418
1419 *bview_out = bview;
1420
1421 return VK_SUCCESS;
1422 }
1423
1424 VkResult anv_DestroyBufferView(
1425 VkDevice _device,
1426 VkBufferView _bview)
1427 {
1428 ANV_FROM_HANDLE(anv_device, device, _device);
1429 ANV_FROM_HANDLE(anv_buffer_view, bview, _bview);
1430
1431 anv_state_pool_free(&device->surface_state_pool, bview->surface_state);
1432 anv_device_free(device, bview);
1433
1434 return VK_SUCCESS;
1435 }
1436
1437 VkResult anv_DestroySampler(
1438 VkDevice _device,
1439 VkSampler _sampler)
1440 {
1441 ANV_FROM_HANDLE(anv_device, device, _device);
1442 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1443
1444 anv_device_free(device, sampler);
1445
1446 return VK_SUCCESS;
1447 }
1448
1449 // Descriptor set functions
1450
1451 VkResult anv_CreateDescriptorSetLayout(
1452 VkDevice _device,
1453 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1454 VkDescriptorSetLayout* pSetLayout)
1455 {
1456 ANV_FROM_HANDLE(anv_device, device, _device);
1457 struct anv_descriptor_set_layout *set_layout;
1458
1459 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1460
1461 uint32_t sampler_count[VK_SHADER_STAGE_NUM] = { 0, };
1462 uint32_t surface_count[VK_SHADER_STAGE_NUM] = { 0, };
1463 uint32_t num_dynamic_buffers = 0;
1464 uint32_t count = 0;
1465 VkShaderStageFlags stages = 0;
1466 uint32_t s;
1467
1468 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1469 switch (pCreateInfo->pBinding[i].descriptorType) {
1470 case VK_DESCRIPTOR_TYPE_SAMPLER:
1471 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1472 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1473 sampler_count[s] += pCreateInfo->pBinding[i].arraySize;
1474 break;
1475 default:
1476 break;
1477 }
1478
1479 switch (pCreateInfo->pBinding[i].descriptorType) {
1480 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1481 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1482 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1483 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1484 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1485 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1486 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1487 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1488 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1489 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1490 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1491 surface_count[s] += pCreateInfo->pBinding[i].arraySize;
1492 break;
1493 default:
1494 break;
1495 }
1496
1497 switch (pCreateInfo->pBinding[i].descriptorType) {
1498 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1499 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1500 num_dynamic_buffers += pCreateInfo->pBinding[i].arraySize;
1501 break;
1502 default:
1503 break;
1504 }
1505
1506 stages |= pCreateInfo->pBinding[i].stageFlags;
1507 count += pCreateInfo->pBinding[i].arraySize;
1508 }
1509
1510 uint32_t sampler_total = 0;
1511 uint32_t surface_total = 0;
1512 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1513 sampler_total += sampler_count[s];
1514 surface_total += surface_count[s];
1515 }
1516
1517 size_t size = sizeof(*set_layout) +
1518 (sampler_total + surface_total) * sizeof(set_layout->entries[0]);
1519 set_layout = anv_device_alloc(device, size, 8,
1520 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1521 if (!set_layout)
1522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1523
1524 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1525 set_layout->count = count;
1526 set_layout->shader_stages = stages;
1527
1528 struct anv_descriptor_slot *p = set_layout->entries;
1529 struct anv_descriptor_slot *sampler[VK_SHADER_STAGE_NUM];
1530 struct anv_descriptor_slot *surface[VK_SHADER_STAGE_NUM];
1531 for (uint32_t s = 0; s < VK_SHADER_STAGE_NUM; s++) {
1532 set_layout->stage[s].surface_count = surface_count[s];
1533 set_layout->stage[s].surface_start = surface[s] = p;
1534 p += surface_count[s];
1535 set_layout->stage[s].sampler_count = sampler_count[s];
1536 set_layout->stage[s].sampler_start = sampler[s] = p;
1537 p += sampler_count[s];
1538 }
1539
1540 uint32_t descriptor = 0;
1541 int8_t dynamic_slot = 0;
1542 bool is_dynamic;
1543 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1544 switch (pCreateInfo->pBinding[i].descriptorType) {
1545 case VK_DESCRIPTOR_TYPE_SAMPLER:
1546 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1547 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1548 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1549 sampler[s]->index = descriptor + j;
1550 sampler[s]->dynamic_slot = -1;
1551 sampler[s]++;
1552 }
1553 break;
1554 default:
1555 break;
1556 }
1557
1558 switch (pCreateInfo->pBinding[i].descriptorType) {
1559 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1560 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1561 is_dynamic = true;
1562 break;
1563 default:
1564 is_dynamic = false;
1565 break;
1566 }
1567
1568 switch (pCreateInfo->pBinding[i].descriptorType) {
1569 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1570 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1571 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1572 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1573 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1574 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1575 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1576 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1577 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1578 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1579 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1580 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].arraySize; j++) {
1581 surface[s]->index = descriptor + j;
1582 if (is_dynamic)
1583 surface[s]->dynamic_slot = dynamic_slot + j;
1584 else
1585 surface[s]->dynamic_slot = -1;
1586 surface[s]++;
1587 }
1588 break;
1589 default:
1590 break;
1591 }
1592
1593 if (is_dynamic)
1594 dynamic_slot += pCreateInfo->pBinding[i].arraySize;
1595
1596 descriptor += pCreateInfo->pBinding[i].arraySize;
1597 }
1598
1599 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
1600
1601 return VK_SUCCESS;
1602 }
1603
1604 VkResult anv_DestroyDescriptorSetLayout(
1605 VkDevice _device,
1606 VkDescriptorSetLayout _set_layout)
1607 {
1608 ANV_FROM_HANDLE(anv_device, device, _device);
1609 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
1610
1611 anv_device_free(device, set_layout);
1612
1613 return VK_SUCCESS;
1614 }
1615
1616 VkResult anv_CreateDescriptorPool(
1617 VkDevice device,
1618 VkDescriptorPoolUsage poolUsage,
1619 uint32_t maxSets,
1620 const VkDescriptorPoolCreateInfo* pCreateInfo,
1621 VkDescriptorPool* pDescriptorPool)
1622 {
1623 anv_finishme("VkDescriptorPool is a stub");
1624 pDescriptorPool->handle = 1;
1625 return VK_SUCCESS;
1626 }
1627
1628 VkResult anv_DestroyDescriptorPool(
1629 VkDevice _device,
1630 VkDescriptorPool _pool)
1631 {
1632 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1633 return VK_SUCCESS;
1634 }
1635
1636 VkResult anv_ResetDescriptorPool(
1637 VkDevice device,
1638 VkDescriptorPool descriptorPool)
1639 {
1640 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
1641 return VK_SUCCESS;
1642 }
1643
1644 VkResult
1645 anv_descriptor_set_create(struct anv_device *device,
1646 const struct anv_descriptor_set_layout *layout,
1647 struct anv_descriptor_set **out_set)
1648 {
1649 struct anv_descriptor_set *set;
1650 size_t size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1651
1652 set = anv_device_alloc(device, size, 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1653 if (!set)
1654 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1655
1656 /* A descriptor set may not be 100% filled. Clear the set so we can can
1657 * later detect holes in it.
1658 */
1659 memset(set, 0, size);
1660
1661 *out_set = set;
1662
1663 return VK_SUCCESS;
1664 }
1665
1666 void
1667 anv_descriptor_set_destroy(struct anv_device *device,
1668 struct anv_descriptor_set *set)
1669 {
1670 anv_device_free(device, set);
1671 }
1672
1673 VkResult anv_AllocDescriptorSets(
1674 VkDevice _device,
1675 VkDescriptorPool descriptorPool,
1676 VkDescriptorSetUsage setUsage,
1677 uint32_t count,
1678 const VkDescriptorSetLayout* pSetLayouts,
1679 VkDescriptorSet* pDescriptorSets,
1680 uint32_t* pCount)
1681 {
1682 ANV_FROM_HANDLE(anv_device, device, _device);
1683
1684 VkResult result;
1685 struct anv_descriptor_set *set;
1686
1687 for (uint32_t i = 0; i < count; i++) {
1688 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout, pSetLayouts[i]);
1689
1690 result = anv_descriptor_set_create(device, layout, &set);
1691 if (result != VK_SUCCESS) {
1692 *pCount = i;
1693 return result;
1694 }
1695
1696 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
1697 }
1698
1699 *pCount = count;
1700
1701 return VK_SUCCESS;
1702 }
1703
1704 VkResult anv_FreeDescriptorSets(
1705 VkDevice _device,
1706 VkDescriptorPool descriptorPool,
1707 uint32_t count,
1708 const VkDescriptorSet* pDescriptorSets)
1709 {
1710 ANV_FROM_HANDLE(anv_device, device, _device);
1711
1712 for (uint32_t i = 0; i < count; i++) {
1713 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
1714
1715 anv_descriptor_set_destroy(device, set);
1716 }
1717
1718 return VK_SUCCESS;
1719 }
1720
1721 VkResult anv_UpdateDescriptorSets(
1722 VkDevice device,
1723 uint32_t writeCount,
1724 const VkWriteDescriptorSet* pDescriptorWrites,
1725 uint32_t copyCount,
1726 const VkCopyDescriptorSet* pDescriptorCopies)
1727 {
1728 for (uint32_t i = 0; i < writeCount; i++) {
1729 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1730 ANV_FROM_HANDLE(anv_descriptor_set, set, write->destSet);
1731
1732 switch (write->descriptorType) {
1733 case VK_DESCRIPTOR_TYPE_SAMPLER:
1734 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1735 for (uint32_t j = 0; j < write->count; j++) {
1736 ANV_FROM_HANDLE(anv_sampler, sampler,
1737 write->pDescriptors[j].sampler);
1738
1739 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1740 .type = ANV_DESCRIPTOR_TYPE_SAMPLER,
1741 .sampler = sampler,
1742 };
1743 }
1744
1745 if (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)
1746 break;
1747
1748 /* fallthrough */
1749
1750 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1751 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1752 for (uint32_t j = 0; j < write->count; j++) {
1753 ANV_FROM_HANDLE(anv_image_view, iview,
1754 write->pDescriptors[j].imageView);
1755
1756 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1757 .type = ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
1758 .image_view = iview,
1759 };
1760 }
1761 break;
1762
1763 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1764 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1765 anv_finishme("texel buffers not implemented");
1766 break;
1767
1768 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1769 anv_finishme("input attachments not implemented");
1770 break;
1771
1772 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1773 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1774 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1775 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1776 for (uint32_t j = 0; j < write->count; j++) {
1777 ANV_FROM_HANDLE(anv_buffer_view, bview,
1778 write->pDescriptors[j].bufferView);
1779
1780 set->descriptors[write->destBinding + j] = (struct anv_descriptor) {
1781 .type = ANV_DESCRIPTOR_TYPE_BUFFER_VIEW,
1782 .buffer_view = bview,
1783 };
1784 }
1785
1786 default:
1787 break;
1788 }
1789 }
1790
1791 for (uint32_t i = 0; i < copyCount; i++) {
1792 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1793 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->destSet);
1794 ANV_FROM_HANDLE(anv_descriptor_set, dest, copy->destSet);
1795 for (uint32_t j = 0; j < copy->count; j++) {
1796 dest->descriptors[copy->destBinding + j] =
1797 src->descriptors[copy->srcBinding + j];
1798 }
1799 }
1800
1801 return VK_SUCCESS;
1802 }
1803
1804 // State object functions
1805
1806 static inline int64_t
1807 clamp_int64(int64_t x, int64_t min, int64_t max)
1808 {
1809 if (x < min)
1810 return min;
1811 else if (x < max)
1812 return x;
1813 else
1814 return max;
1815 }
1816
1817 VkResult anv_CreateDynamicViewportState(
1818 VkDevice _device,
1819 const VkDynamicViewportStateCreateInfo* pCreateInfo,
1820 VkDynamicViewportState* pState)
1821 {
1822 ANV_FROM_HANDLE(anv_device, device, _device);
1823 struct anv_dynamic_vp_state *state;
1824
1825 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO);
1826
1827 state = anv_device_alloc(device, sizeof(*state), 8,
1828 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1829 if (state == NULL)
1830 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1831
1832 unsigned count = pCreateInfo->viewportAndScissorCount;
1833 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1834 count * 64, 64);
1835 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
1836 count * 8, 32);
1837 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
1838 count * 32, 32);
1839
1840 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1841 const VkViewport *vp = &pCreateInfo->pViewports[i];
1842 const VkRect2D *s = &pCreateInfo->pScissors[i];
1843
1844 /* The gen7 state struct has just the matrix and guardband fields, the
1845 * gen8 struct adds the min/max viewport fields. */
1846 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1847 .ViewportMatrixElementm00 = vp->width / 2,
1848 .ViewportMatrixElementm11 = vp->height / 2,
1849 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1850 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1851 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1852 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1853 .XMinClipGuardband = -1.0f,
1854 .XMaxClipGuardband = 1.0f,
1855 .YMinClipGuardband = -1.0f,
1856 .YMaxClipGuardband = 1.0f,
1857 .XMinViewPort = vp->originX,
1858 .XMaxViewPort = vp->originX + vp->width - 1,
1859 .YMinViewPort = vp->originY,
1860 .YMaxViewPort = vp->originY + vp->height - 1,
1861 };
1862
1863 struct GEN7_CC_VIEWPORT cc_viewport = {
1864 .MinimumDepth = vp->minDepth,
1865 .MaximumDepth = vp->maxDepth
1866 };
1867
1868 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1869 * ymax < ymin for empty clips. In case clip x, y, width height are all
1870 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1871 * what we want. Just special case empty clips and produce a canonical
1872 * empty clip. */
1873 static const struct GEN7_SCISSOR_RECT empty_scissor = {
1874 .ScissorRectangleYMin = 1,
1875 .ScissorRectangleXMin = 1,
1876 .ScissorRectangleYMax = 0,
1877 .ScissorRectangleXMax = 0
1878 };
1879
1880 const int max = 0xffff;
1881 struct GEN7_SCISSOR_RECT scissor = {
1882 /* Do this math using int64_t so overflow gets clamped correctly. */
1883 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1884 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1885 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1886 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1887 };
1888
1889 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1890 GEN7_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1891
1892 if (s->extent.width <= 0 || s->extent.height <= 0) {
1893 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1894 } else {
1895 GEN7_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1896 }
1897 }
1898
1899 *pState = anv_dynamic_vp_state_to_handle(state);
1900
1901 return VK_SUCCESS;
1902 }
1903
1904 VkResult anv_DestroyDynamicViewportState(
1905 VkDevice _device,
1906 VkDynamicViewportState _vp_state)
1907 {
1908 ANV_FROM_HANDLE(anv_device, device, _device);
1909 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, _vp_state);
1910
1911 anv_state_pool_free(&device->dynamic_state_pool, vp_state->sf_clip_vp);
1912 anv_state_pool_free(&device->dynamic_state_pool, vp_state->cc_vp);
1913 anv_state_pool_free(&device->dynamic_state_pool, vp_state->scissor);
1914
1915 anv_device_free(device, vp_state);
1916
1917 return VK_SUCCESS;
1918 }
1919
1920 VkResult anv_DestroyDynamicRasterState(
1921 VkDevice _device,
1922 VkDynamicRasterState _rs_state)
1923 {
1924 ANV_FROM_HANDLE(anv_device, device, _device);
1925 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, _rs_state);
1926
1927 anv_device_free(device, rs_state);
1928
1929 return VK_SUCCESS;
1930 }
1931
1932 VkResult anv_CreateDynamicColorBlendState(
1933 VkDevice _device,
1934 const VkDynamicColorBlendStateCreateInfo* pCreateInfo,
1935 VkDynamicColorBlendState* pState)
1936 {
1937 ANV_FROM_HANDLE(anv_device, device, _device);
1938 struct anv_dynamic_cb_state *state;
1939
1940 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_COLOR_BLEND_STATE_CREATE_INFO);
1941
1942 state = anv_device_alloc(device, sizeof(*state), 8,
1943 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1944 if (state == NULL)
1945 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1946
1947 struct GEN7_COLOR_CALC_STATE color_calc_state = {
1948 .BlendConstantColorRed = pCreateInfo->blendConst[0],
1949 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
1950 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
1951 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
1952 };
1953
1954 GEN7_COLOR_CALC_STATE_pack(NULL, state->color_calc_state, &color_calc_state);
1955
1956 *pState = anv_dynamic_cb_state_to_handle(state);
1957
1958 return VK_SUCCESS;
1959 }
1960
1961 VkResult anv_DestroyDynamicColorBlendState(
1962 VkDevice _device,
1963 VkDynamicColorBlendState _cb_state)
1964 {
1965 ANV_FROM_HANDLE(anv_device, device, _device);
1966 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, _cb_state);
1967
1968 anv_device_free(device, cb_state);
1969
1970 return VK_SUCCESS;
1971 }
1972
1973 VkResult anv_DestroyDynamicDepthStencilState(
1974 VkDevice _device,
1975 VkDynamicDepthStencilState _ds_state)
1976 {
1977 ANV_FROM_HANDLE(anv_device, device, _device);
1978 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, _ds_state);
1979
1980 anv_device_free(device, ds_state);
1981
1982 return VK_SUCCESS;
1983 }
1984
1985 VkResult anv_CreateFramebuffer(
1986 VkDevice _device,
1987 const VkFramebufferCreateInfo* pCreateInfo,
1988 VkFramebuffer* pFramebuffer)
1989 {
1990 ANV_FROM_HANDLE(anv_device, device, _device);
1991 struct anv_framebuffer *framebuffer;
1992
1993 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1994
1995 size_t size = sizeof(*framebuffer) +
1996 sizeof(struct anv_attachment_view *) * pCreateInfo->attachmentCount;
1997 framebuffer = anv_device_alloc(device, size, 8,
1998 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1999 if (framebuffer == NULL)
2000 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2001
2002 framebuffer->attachment_count = pCreateInfo->attachmentCount;
2003 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2004 ANV_FROM_HANDLE(anv_attachment_view, aview,
2005 pCreateInfo->pAttachments[i].view);
2006
2007 framebuffer->attachments[i] = aview;
2008 }
2009
2010 framebuffer->width = pCreateInfo->width;
2011 framebuffer->height = pCreateInfo->height;
2012 framebuffer->layers = pCreateInfo->layers;
2013
2014 anv_CreateDynamicViewportState(anv_device_to_handle(device),
2015 &(VkDynamicViewportStateCreateInfo) {
2016 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VIEWPORT_STATE_CREATE_INFO,
2017 .viewportAndScissorCount = 1,
2018 .pViewports = (VkViewport[]) {
2019 {
2020 .originX = 0,
2021 .originY = 0,
2022 .width = pCreateInfo->width,
2023 .height = pCreateInfo->height,
2024 .minDepth = 0,
2025 .maxDepth = 1
2026 },
2027 },
2028 .pScissors = (VkRect2D[]) {
2029 { { 0, 0 },
2030 { pCreateInfo->width, pCreateInfo->height } },
2031 }
2032 },
2033 &framebuffer->vp_state);
2034
2035 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2036
2037 return VK_SUCCESS;
2038 }
2039
2040 VkResult anv_DestroyFramebuffer(
2041 VkDevice _device,
2042 VkFramebuffer _fb)
2043 {
2044 ANV_FROM_HANDLE(anv_device, device, _device);
2045 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2046
2047 anv_DestroyDynamicViewportState(anv_device_to_handle(device),
2048 fb->vp_state);
2049 anv_device_free(device, fb);
2050
2051 return VK_SUCCESS;
2052 }
2053
2054 VkResult anv_CreateRenderPass(
2055 VkDevice _device,
2056 const VkRenderPassCreateInfo* pCreateInfo,
2057 VkRenderPass* pRenderPass)
2058 {
2059 ANV_FROM_HANDLE(anv_device, device, _device);
2060 struct anv_render_pass *pass;
2061 size_t size;
2062 size_t attachments_offset;
2063
2064 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2065
2066 size = sizeof(*pass);
2067 size += pCreateInfo->subpassCount * sizeof(pass->subpasses[0]);
2068 attachments_offset = size;
2069 size += pCreateInfo->attachmentCount * sizeof(pass->attachments[0]);
2070
2071 pass = anv_device_alloc(device, size, 8,
2072 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2073 if (pass == NULL)
2074 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2075
2076 /* Clear the subpasses along with the parent pass. This required because
2077 * each array member of anv_subpass must be a valid pointer if not NULL.
2078 */
2079 memset(pass, 0, size);
2080 pass->attachment_count = pCreateInfo->attachmentCount;
2081 pass->subpass_count = pCreateInfo->subpassCount;
2082 pass->attachments = (void *) pass + attachments_offset;
2083
2084 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
2085 struct anv_render_pass_attachment *att = &pass->attachments[i];
2086
2087 att->format = anv_format_for_vk_format(pCreateInfo->pAttachments[i].format);
2088 att->samples = pCreateInfo->pAttachments[i].samples;
2089 att->load_op = pCreateInfo->pAttachments[i].loadOp;
2090 att->stencil_load_op = pCreateInfo->pAttachments[i].stencilLoadOp;
2091 // att->store_op = pCreateInfo->pAttachments[i].storeOp;
2092 // att->stencil_store_op = pCreateInfo->pAttachments[i].stencilStoreOp;
2093
2094 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2095 if (anv_format_is_color(att->format)) {
2096 ++pass->num_color_clear_attachments;
2097 } else if (att->format->depth_format) {
2098 pass->has_depth_clear_attachment = true;
2099 }
2100 } else if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2101 assert(att->format->has_stencil);
2102 pass->has_stencil_clear_attachment = true;
2103 }
2104 }
2105
2106 for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
2107 const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
2108 struct anv_subpass *subpass = &pass->subpasses[i];
2109
2110 subpass->input_count = desc->inputCount;
2111 subpass->color_count = desc->colorCount;
2112
2113 if (desc->inputCount > 0) {
2114 subpass->input_attachments =
2115 anv_device_alloc(device, desc->inputCount * sizeof(uint32_t),
2116 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2117
2118 for (uint32_t j = 0; j < desc->inputCount; j++) {
2119 subpass->input_attachments[j]
2120 = desc->pInputAttachments[j].attachment;
2121 }
2122 }
2123
2124 if (desc->colorCount > 0) {
2125 subpass->color_attachments =
2126 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2127 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2128
2129 for (uint32_t j = 0; j < desc->colorCount; j++) {
2130 subpass->color_attachments[j]
2131 = desc->pColorAttachments[j].attachment;
2132 }
2133 }
2134
2135 if (desc->pResolveAttachments) {
2136 subpass->resolve_attachments =
2137 anv_device_alloc(device, desc->colorCount * sizeof(uint32_t),
2138 8, VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2139
2140 for (uint32_t j = 0; j < desc->colorCount; j++) {
2141 subpass->resolve_attachments[j]
2142 = desc->pResolveAttachments[j].attachment;
2143 }
2144 }
2145
2146 subpass->depth_stencil_attachment = desc->depthStencilAttachment.attachment;
2147 }
2148
2149 *pRenderPass = anv_render_pass_to_handle(pass);
2150
2151 return VK_SUCCESS;
2152 }
2153
2154 VkResult anv_DestroyRenderPass(
2155 VkDevice _device,
2156 VkRenderPass _pass)
2157 {
2158 ANV_FROM_HANDLE(anv_device, device, _device);
2159 ANV_FROM_HANDLE(anv_render_pass, pass, _pass);
2160
2161 for (uint32_t i = 0; i < pass->subpass_count; i++) {
2162 /* In VkSubpassCreateInfo, each of the attachment arrays may be null.
2163 * Don't free the null arrays.
2164 */
2165 struct anv_subpass *subpass = &pass->subpasses[i];
2166
2167 anv_device_free(device, subpass->input_attachments);
2168 anv_device_free(device, subpass->color_attachments);
2169 anv_device_free(device, subpass->resolve_attachments);
2170 }
2171
2172 anv_device_free(device, pass);
2173
2174 return VK_SUCCESS;
2175 }
2176
2177 VkResult anv_GetRenderAreaGranularity(
2178 VkDevice device,
2179 VkRenderPass renderPass,
2180 VkExtent2D* pGranularity)
2181 {
2182 *pGranularity = (VkExtent2D) { 1, 1 };
2183
2184 return VK_SUCCESS;
2185 }
2186
2187 void vkCmdDbgMarkerBegin(
2188 VkCmdBuffer cmdBuffer,
2189 const char* pMarker)
2190 __attribute__ ((visibility ("default")));
2191
2192 void vkCmdDbgMarkerEnd(
2193 VkCmdBuffer cmdBuffer)
2194 __attribute__ ((visibility ("default")));
2195
2196 void vkCmdDbgMarkerBegin(
2197 VkCmdBuffer cmdBuffer,
2198 const char* pMarker)
2199 {
2200 }
2201
2202 void vkCmdDbgMarkerEnd(
2203 VkCmdBuffer cmdBuffer)
2204 {
2205 }