anv: Rework fences
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "anv_timestamp.h"
32 #include "util/strtod.h"
33 #include "util/debug.h"
34
35 #include "genxml/gen7_pack.h"
36
37 struct anv_dispatch_table dtable;
38
39 static void
40 compiler_debug_log(void *data, const char *fmt, ...)
41 { }
42
43 static void
44 compiler_perf_log(void *data, const char *fmt, ...)
45 {
46 va_list args;
47 va_start(args, fmt);
48
49 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
50 vfprintf(stderr, fmt, args);
51
52 va_end(args);
53 }
54
55 static VkResult
56 anv_physical_device_init(struct anv_physical_device *device,
57 struct anv_instance *instance,
58 const char *path)
59 {
60 VkResult result;
61 int fd;
62
63 fd = open(path, O_RDWR | O_CLOEXEC);
64 if (fd < 0)
65 return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
66
67 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
68 device->instance = instance;
69
70 assert(strlen(path) < ARRAY_SIZE(device->path));
71 strncpy(device->path, path, ARRAY_SIZE(device->path));
72
73 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
74 if (!device->chipset_id) {
75 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
76 goto fail;
77 }
78
79 device->name = gen_get_device_name(device->chipset_id);
80 if (!gen_get_device_info(device->chipset_id, &device->info)) {
81 result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
82 goto fail;
83 }
84
85 if (device->info.is_haswell) {
86 fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
87 } else if (device->info.gen == 7 && !device->info.is_baytrail) {
88 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
89 } else if (device->info.gen == 7 && device->info.is_baytrail) {
90 fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
91 } else if (device->info.gen >= 8) {
92 /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
93 * supported as anything */
94 } else {
95 result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
96 "Vulkan not yet supported on %s", device->name);
97 goto fail;
98 }
99
100 device->cmd_parser_version = -1;
101 if (device->info.gen == 7) {
102 device->cmd_parser_version =
103 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
104 if (device->cmd_parser_version == -1) {
105 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
106 "failed to get command parser version");
107 goto fail;
108 }
109 }
110
111 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
112 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
113 "failed to get aperture size: %m");
114 goto fail;
115 }
116
117 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
118 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
119 "kernel missing gem wait");
120 goto fail;
121 }
122
123 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
124 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
125 "kernel missing execbuf2");
126 goto fail;
127 }
128
129 if (!device->info.has_llc &&
130 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
131 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
132 "kernel missing wc mmap");
133 goto fail;
134 }
135
136 bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
137
138 /* GENs prior to 8 do not support EU/Subslice info */
139 if (device->info.gen >= 8) {
140 device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
141 device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
142
143 /* Without this information, we cannot get the right Braswell
144 * brandstrings, and we have to use conservative numbers for GPGPU on
145 * many platforms, but otherwise, things will just work.
146 */
147 if (device->subslice_total < 1 || device->eu_total < 1) {
148 fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
149 " query GPU properties.\n");
150 }
151 } else if (device->info.gen == 7) {
152 device->subslice_total = 1 << (device->info.gt - 1);
153 }
154
155 if (device->info.is_cherryview &&
156 device->subslice_total > 0 && device->eu_total > 0) {
157 /* Logical CS threads = EUs per subslice * 7 threads per EU */
158 uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
159
160 /* Fuse configurations may give more threads than expected, never less. */
161 if (max_cs_threads > device->info.max_cs_threads)
162 device->info.max_cs_threads = max_cs_threads;
163 }
164
165 close(fd);
166
167 brw_process_intel_debug_variable();
168
169 device->compiler = brw_compiler_create(NULL, &device->info);
170 if (device->compiler == NULL) {
171 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
172 goto fail;
173 }
174 device->compiler->shader_debug_log = compiler_debug_log;
175 device->compiler->shader_perf_log = compiler_perf_log;
176
177 result = anv_init_wsi(device);
178 if (result != VK_SUCCESS)
179 goto fail;
180
181 /* XXX: Actually detect bit6 swizzling */
182 isl_device_init(&device->isl_dev, &device->info, swizzled);
183
184 return VK_SUCCESS;
185
186 fail:
187 close(fd);
188 return result;
189 }
190
191 static void
192 anv_physical_device_finish(struct anv_physical_device *device)
193 {
194 anv_finish_wsi(device);
195 ralloc_free(device->compiler);
196 }
197
198 static const VkExtensionProperties global_extensions[] = {
199 {
200 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
201 .specVersion = 25,
202 },
203 #ifdef VK_USE_PLATFORM_XCB_KHR
204 {
205 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
206 .specVersion = 6,
207 },
208 #endif
209 #ifdef VK_USE_PLATFORM_XLIB_KHR
210 {
211 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
212 .specVersion = 6,
213 },
214 #endif
215 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
216 {
217 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
218 .specVersion = 5,
219 },
220 #endif
221 };
222
223 static const VkExtensionProperties device_extensions[] = {
224 {
225 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
226 .specVersion = 68,
227 },
228 };
229
230 static void *
231 default_alloc_func(void *pUserData, size_t size, size_t align,
232 VkSystemAllocationScope allocationScope)
233 {
234 return malloc(size);
235 }
236
237 static void *
238 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
239 size_t align, VkSystemAllocationScope allocationScope)
240 {
241 return realloc(pOriginal, size);
242 }
243
244 static void
245 default_free_func(void *pUserData, void *pMemory)
246 {
247 free(pMemory);
248 }
249
250 static const VkAllocationCallbacks default_alloc = {
251 .pUserData = NULL,
252 .pfnAllocation = default_alloc_func,
253 .pfnReallocation = default_realloc_func,
254 .pfnFree = default_free_func,
255 };
256
257 VkResult anv_CreateInstance(
258 const VkInstanceCreateInfo* pCreateInfo,
259 const VkAllocationCallbacks* pAllocator,
260 VkInstance* pInstance)
261 {
262 struct anv_instance *instance;
263
264 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
265
266 uint32_t client_version;
267 if (pCreateInfo->pApplicationInfo &&
268 pCreateInfo->pApplicationInfo->apiVersion != 0) {
269 client_version = pCreateInfo->pApplicationInfo->apiVersion;
270 } else {
271 client_version = VK_MAKE_VERSION(1, 0, 0);
272 }
273
274 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
275 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
276 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
277 "Client requested version %d.%d.%d",
278 VK_VERSION_MAJOR(client_version),
279 VK_VERSION_MINOR(client_version),
280 VK_VERSION_PATCH(client_version));
281 }
282
283 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
284 bool found = false;
285 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
286 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
287 global_extensions[j].extensionName) == 0) {
288 found = true;
289 break;
290 }
291 }
292 if (!found)
293 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
294 }
295
296 instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
297 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
298 if (!instance)
299 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
300
301 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
302
303 if (pAllocator)
304 instance->alloc = *pAllocator;
305 else
306 instance->alloc = default_alloc;
307
308 instance->apiVersion = client_version;
309 instance->physicalDeviceCount = -1;
310
311 _mesa_locale_init();
312
313 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
314
315 *pInstance = anv_instance_to_handle(instance);
316
317 return VK_SUCCESS;
318 }
319
320 void anv_DestroyInstance(
321 VkInstance _instance,
322 const VkAllocationCallbacks* pAllocator)
323 {
324 ANV_FROM_HANDLE(anv_instance, instance, _instance);
325
326 if (instance->physicalDeviceCount > 0) {
327 /* We support at most one physical device. */
328 assert(instance->physicalDeviceCount == 1);
329 anv_physical_device_finish(&instance->physicalDevice);
330 }
331
332 VG(VALGRIND_DESTROY_MEMPOOL(instance));
333
334 _mesa_locale_fini();
335
336 vk_free(&instance->alloc, instance);
337 }
338
339 VkResult anv_EnumeratePhysicalDevices(
340 VkInstance _instance,
341 uint32_t* pPhysicalDeviceCount,
342 VkPhysicalDevice* pPhysicalDevices)
343 {
344 ANV_FROM_HANDLE(anv_instance, instance, _instance);
345 VkResult result;
346
347 if (instance->physicalDeviceCount < 0) {
348 char path[20];
349 for (unsigned i = 0; i < 8; i++) {
350 snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
351 result = anv_physical_device_init(&instance->physicalDevice,
352 instance, path);
353 if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
354 break;
355 }
356
357 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
358 instance->physicalDeviceCount = 0;
359 } else if (result == VK_SUCCESS) {
360 instance->physicalDeviceCount = 1;
361 } else {
362 return result;
363 }
364 }
365
366 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
367 * otherwise it's an inout parameter.
368 *
369 * The Vulkan spec (git aaed022) says:
370 *
371 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
372 * that is initialized with the number of devices the application is
373 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
374 * an array of at least this many VkPhysicalDevice handles [...].
375 *
376 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
377 * overwrites the contents of the variable pointed to by
378 * pPhysicalDeviceCount with the number of physical devices in in the
379 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
380 * pPhysicalDeviceCount with the number of physical handles written to
381 * pPhysicalDevices.
382 */
383 if (!pPhysicalDevices) {
384 *pPhysicalDeviceCount = instance->physicalDeviceCount;
385 } else if (*pPhysicalDeviceCount >= 1) {
386 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
387 *pPhysicalDeviceCount = 1;
388 } else if (*pPhysicalDeviceCount < instance->physicalDeviceCount) {
389 return VK_INCOMPLETE;
390 } else {
391 *pPhysicalDeviceCount = 0;
392 }
393
394 return VK_SUCCESS;
395 }
396
397 void anv_GetPhysicalDeviceFeatures(
398 VkPhysicalDevice physicalDevice,
399 VkPhysicalDeviceFeatures* pFeatures)
400 {
401 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
402
403 *pFeatures = (VkPhysicalDeviceFeatures) {
404 .robustBufferAccess = true,
405 .fullDrawIndexUint32 = true,
406 .imageCubeArray = false,
407 .independentBlend = true,
408 .geometryShader = true,
409 .tessellationShader = false,
410 .sampleRateShading = true,
411 .dualSrcBlend = true,
412 .logicOp = true,
413 .multiDrawIndirect = false,
414 .drawIndirectFirstInstance = false,
415 .depthClamp = true,
416 .depthBiasClamp = false,
417 .fillModeNonSolid = true,
418 .depthBounds = false,
419 .wideLines = true,
420 .largePoints = true,
421 .alphaToOne = true,
422 .multiViewport = true,
423 .samplerAnisotropy = true,
424 .textureCompressionETC2 = pdevice->info.gen >= 8 ||
425 pdevice->info.is_baytrail,
426 .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
427 .textureCompressionBC = true,
428 .occlusionQueryPrecise = true,
429 .pipelineStatisticsQuery = false,
430 .fragmentStoresAndAtomics = true,
431 .shaderTessellationAndGeometryPointSize = true,
432 .shaderImageGatherExtended = false,
433 .shaderStorageImageExtendedFormats = false,
434 .shaderStorageImageMultisample = false,
435 .shaderUniformBufferArrayDynamicIndexing = true,
436 .shaderSampledImageArrayDynamicIndexing = true,
437 .shaderStorageBufferArrayDynamicIndexing = true,
438 .shaderStorageImageArrayDynamicIndexing = true,
439 .shaderStorageImageReadWithoutFormat = false,
440 .shaderStorageImageWriteWithoutFormat = true,
441 .shaderClipDistance = false,
442 .shaderCullDistance = false,
443 .shaderFloat64 = false,
444 .shaderInt64 = false,
445 .shaderInt16 = false,
446 .alphaToOne = true,
447 .variableMultisampleRate = false,
448 .inheritedQueries = false,
449 };
450
451 /* We can't do image stores in vec4 shaders */
452 pFeatures->vertexPipelineStoresAndAtomics =
453 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
454 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
455 }
456
457 void
458 anv_device_get_cache_uuid(void *uuid)
459 {
460 memset(uuid, 0, VK_UUID_SIZE);
461 snprintf(uuid, VK_UUID_SIZE, "anv-%s", ANV_TIMESTAMP);
462 }
463
464 void anv_GetPhysicalDeviceProperties(
465 VkPhysicalDevice physicalDevice,
466 VkPhysicalDeviceProperties* pProperties)
467 {
468 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
469 const struct gen_device_info *devinfo = &pdevice->info;
470
471 const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
472
473 /* See assertions made when programming the buffer surface state. */
474 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
475 (1ul << 30) : (1ul << 27);
476
477 VkSampleCountFlags sample_counts =
478 isl_device_get_sample_counts(&pdevice->isl_dev);
479
480 VkPhysicalDeviceLimits limits = {
481 .maxImageDimension1D = (1 << 14),
482 .maxImageDimension2D = (1 << 14),
483 .maxImageDimension3D = (1 << 11),
484 .maxImageDimensionCube = (1 << 14),
485 .maxImageArrayLayers = (1 << 11),
486 .maxTexelBufferElements = 128 * 1024 * 1024,
487 .maxUniformBufferRange = (1ul << 27),
488 .maxStorageBufferRange = max_raw_buffer_sz,
489 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
490 .maxMemoryAllocationCount = UINT32_MAX,
491 .maxSamplerAllocationCount = 64 * 1024,
492 .bufferImageGranularity = 64, /* A cache line */
493 .sparseAddressSpaceSize = 0,
494 .maxBoundDescriptorSets = MAX_SETS,
495 .maxPerStageDescriptorSamplers = 64,
496 .maxPerStageDescriptorUniformBuffers = 64,
497 .maxPerStageDescriptorStorageBuffers = 64,
498 .maxPerStageDescriptorSampledImages = 64,
499 .maxPerStageDescriptorStorageImages = 64,
500 .maxPerStageDescriptorInputAttachments = 64,
501 .maxPerStageResources = 128,
502 .maxDescriptorSetSamplers = 256,
503 .maxDescriptorSetUniformBuffers = 256,
504 .maxDescriptorSetUniformBuffersDynamic = 256,
505 .maxDescriptorSetStorageBuffers = 256,
506 .maxDescriptorSetStorageBuffersDynamic = 256,
507 .maxDescriptorSetSampledImages = 256,
508 .maxDescriptorSetStorageImages = 256,
509 .maxDescriptorSetInputAttachments = 256,
510 .maxVertexInputAttributes = 32,
511 .maxVertexInputBindings = 32,
512 .maxVertexInputAttributeOffset = 2047,
513 .maxVertexInputBindingStride = 2048,
514 .maxVertexOutputComponents = 128,
515 .maxTessellationGenerationLevel = 0,
516 .maxTessellationPatchSize = 0,
517 .maxTessellationControlPerVertexInputComponents = 0,
518 .maxTessellationControlPerVertexOutputComponents = 0,
519 .maxTessellationControlPerPatchOutputComponents = 0,
520 .maxTessellationControlTotalOutputComponents = 0,
521 .maxTessellationEvaluationInputComponents = 0,
522 .maxTessellationEvaluationOutputComponents = 0,
523 .maxGeometryShaderInvocations = 32,
524 .maxGeometryInputComponents = 64,
525 .maxGeometryOutputComponents = 128,
526 .maxGeometryOutputVertices = 256,
527 .maxGeometryTotalOutputComponents = 1024,
528 .maxFragmentInputComponents = 128,
529 .maxFragmentOutputAttachments = 8,
530 .maxFragmentDualSrcAttachments = 2,
531 .maxFragmentCombinedOutputResources = 8,
532 .maxComputeSharedMemorySize = 32768,
533 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
534 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
535 .maxComputeWorkGroupSize = {
536 16 * devinfo->max_cs_threads,
537 16 * devinfo->max_cs_threads,
538 16 * devinfo->max_cs_threads,
539 },
540 .subPixelPrecisionBits = 4 /* FIXME */,
541 .subTexelPrecisionBits = 4 /* FIXME */,
542 .mipmapPrecisionBits = 4 /* FIXME */,
543 .maxDrawIndexedIndexValue = UINT32_MAX,
544 .maxDrawIndirectCount = UINT32_MAX,
545 .maxSamplerLodBias = 16,
546 .maxSamplerAnisotropy = 16,
547 .maxViewports = MAX_VIEWPORTS,
548 .maxViewportDimensions = { (1 << 14), (1 << 14) },
549 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
550 .viewportSubPixelBits = 13, /* We take a float? */
551 .minMemoryMapAlignment = 4096, /* A page */
552 .minTexelBufferOffsetAlignment = 1,
553 .minUniformBufferOffsetAlignment = 1,
554 .minStorageBufferOffsetAlignment = 1,
555 .minTexelOffset = -8,
556 .maxTexelOffset = 7,
557 .minTexelGatherOffset = -8,
558 .maxTexelGatherOffset = 7,
559 .minInterpolationOffset = -0.5,
560 .maxInterpolationOffset = 0.4375,
561 .subPixelInterpolationOffsetBits = 4,
562 .maxFramebufferWidth = (1 << 14),
563 .maxFramebufferHeight = (1 << 14),
564 .maxFramebufferLayers = (1 << 10),
565 .framebufferColorSampleCounts = sample_counts,
566 .framebufferDepthSampleCounts = sample_counts,
567 .framebufferStencilSampleCounts = sample_counts,
568 .framebufferNoAttachmentsSampleCounts = sample_counts,
569 .maxColorAttachments = MAX_RTS,
570 .sampledImageColorSampleCounts = sample_counts,
571 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
572 .sampledImageDepthSampleCounts = sample_counts,
573 .sampledImageStencilSampleCounts = sample_counts,
574 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
575 .maxSampleMaskWords = 1,
576 .timestampComputeAndGraphics = false,
577 .timestampPeriod = time_stamp_base,
578 .maxClipDistances = 0 /* FIXME */,
579 .maxCullDistances = 0 /* FIXME */,
580 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
581 .discreteQueuePriorities = 1,
582 .pointSizeRange = { 0.125, 255.875 },
583 .lineWidthRange = { 0.0, 7.9921875 },
584 .pointSizeGranularity = (1.0 / 8.0),
585 .lineWidthGranularity = (1.0 / 128.0),
586 .strictLines = false, /* FINISHME */
587 .standardSampleLocations = true,
588 .optimalBufferCopyOffsetAlignment = 128,
589 .optimalBufferCopyRowPitchAlignment = 128,
590 .nonCoherentAtomSize = 64,
591 };
592
593 *pProperties = (VkPhysicalDeviceProperties) {
594 .apiVersion = VK_MAKE_VERSION(1, 0, 5),
595 .driverVersion = 1,
596 .vendorID = 0x8086,
597 .deviceID = pdevice->chipset_id,
598 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
599 .limits = limits,
600 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
601 };
602
603 strcpy(pProperties->deviceName, pdevice->name);
604 anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
605 }
606
607 void anv_GetPhysicalDeviceQueueFamilyProperties(
608 VkPhysicalDevice physicalDevice,
609 uint32_t* pCount,
610 VkQueueFamilyProperties* pQueueFamilyProperties)
611 {
612 if (pQueueFamilyProperties == NULL) {
613 *pCount = 1;
614 return;
615 }
616
617 assert(*pCount >= 1);
618
619 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
620 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
621 VK_QUEUE_COMPUTE_BIT |
622 VK_QUEUE_TRANSFER_BIT,
623 .queueCount = 1,
624 .timestampValidBits = 36, /* XXX: Real value here */
625 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
626 };
627 }
628
629 void anv_GetPhysicalDeviceMemoryProperties(
630 VkPhysicalDevice physicalDevice,
631 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
632 {
633 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
634 VkDeviceSize heap_size;
635
636 /* Reserve some wiggle room for the driver by exposing only 75% of the
637 * aperture to the heap.
638 */
639 heap_size = 3 * physical_device->aperture_size / 4;
640
641 if (physical_device->info.has_llc) {
642 /* Big core GPUs share LLC with the CPU and thus one memory type can be
643 * both cached and coherent at the same time.
644 */
645 pMemoryProperties->memoryTypeCount = 1;
646 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
647 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
648 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
649 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
650 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
651 .heapIndex = 0,
652 };
653 } else {
654 /* The spec requires that we expose a host-visible, coherent memory
655 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
656 * to give the application a choice between cached, but not coherent and
657 * coherent but uncached (WC though).
658 */
659 pMemoryProperties->memoryTypeCount = 2;
660 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
661 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
662 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
663 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
664 .heapIndex = 0,
665 };
666 pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
667 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
668 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
669 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
670 .heapIndex = 0,
671 };
672 }
673
674 pMemoryProperties->memoryHeapCount = 1;
675 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
676 .size = heap_size,
677 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
678 };
679 }
680
681 PFN_vkVoidFunction anv_GetInstanceProcAddr(
682 VkInstance instance,
683 const char* pName)
684 {
685 return anv_lookup_entrypoint(NULL, pName);
686 }
687
688 /* With version 1+ of the loader interface the ICD should expose
689 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
690 */
691 PUBLIC
692 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
693 VkInstance instance,
694 const char* pName);
695
696 PUBLIC
697 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
698 VkInstance instance,
699 const char* pName)
700 {
701 return anv_GetInstanceProcAddr(instance, pName);
702 }
703
704 PFN_vkVoidFunction anv_GetDeviceProcAddr(
705 VkDevice _device,
706 const char* pName)
707 {
708 ANV_FROM_HANDLE(anv_device, device, _device);
709 return anv_lookup_entrypoint(&device->info, pName);
710 }
711
712 static VkResult
713 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
714 {
715 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
716 queue->device = device;
717 queue->pool = &device->surface_state_pool;
718
719 return VK_SUCCESS;
720 }
721
722 static void
723 anv_queue_finish(struct anv_queue *queue)
724 {
725 }
726
727 static struct anv_state
728 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
729 {
730 struct anv_state state;
731
732 state = anv_state_pool_alloc(pool, size, align);
733 memcpy(state.map, p, size);
734
735 if (!pool->block_pool->device->info.has_llc)
736 anv_state_clflush(state);
737
738 return state;
739 }
740
741 struct gen8_border_color {
742 union {
743 float float32[4];
744 uint32_t uint32[4];
745 };
746 /* Pad out to 64 bytes */
747 uint32_t _pad[12];
748 };
749
750 static void
751 anv_device_init_border_colors(struct anv_device *device)
752 {
753 static const struct gen8_border_color border_colors[] = {
754 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
755 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
756 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
757 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
758 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
759 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
760 };
761
762 device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
763 sizeof(border_colors), 64,
764 border_colors);
765 }
766
767 VkResult
768 anv_device_submit_simple_batch(struct anv_device *device,
769 struct anv_batch *batch)
770 {
771 struct drm_i915_gem_execbuffer2 execbuf;
772 struct drm_i915_gem_exec_object2 exec2_objects[1];
773 struct anv_bo bo, *exec_bos[1];
774 VkResult result = VK_SUCCESS;
775 uint32_t size;
776 int64_t timeout;
777 int ret;
778
779 /* Kernel driver requires 8 byte aligned batch length */
780 size = align_u32(batch->next - batch->start, 8);
781 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
782 if (result != VK_SUCCESS)
783 return result;
784
785 memcpy(bo.map, batch->start, size);
786 if (!device->info.has_llc)
787 anv_clflush_range(bo.map, size);
788
789 exec_bos[0] = &bo;
790 exec2_objects[0].handle = bo.gem_handle;
791 exec2_objects[0].relocation_count = 0;
792 exec2_objects[0].relocs_ptr = 0;
793 exec2_objects[0].alignment = 0;
794 exec2_objects[0].offset = bo.offset;
795 exec2_objects[0].flags = 0;
796 exec2_objects[0].rsvd1 = 0;
797 exec2_objects[0].rsvd2 = 0;
798
799 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
800 execbuf.buffer_count = 1;
801 execbuf.batch_start_offset = 0;
802 execbuf.batch_len = size;
803 execbuf.cliprects_ptr = 0;
804 execbuf.num_cliprects = 0;
805 execbuf.DR1 = 0;
806 execbuf.DR4 = 0;
807
808 execbuf.flags =
809 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
810 execbuf.rsvd1 = device->context_id;
811 execbuf.rsvd2 = 0;
812
813 result = anv_device_execbuf(device, &execbuf, exec_bos);
814 if (result != VK_SUCCESS)
815 goto fail;
816
817 timeout = INT64_MAX;
818 ret = anv_gem_wait(device, bo.gem_handle, &timeout);
819 if (ret != 0) {
820 /* We don't know the real error. */
821 result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
822 goto fail;
823 }
824
825 fail:
826 anv_bo_pool_free(&device->batch_bo_pool, &bo);
827
828 return result;
829 }
830
831 VkResult anv_CreateDevice(
832 VkPhysicalDevice physicalDevice,
833 const VkDeviceCreateInfo* pCreateInfo,
834 const VkAllocationCallbacks* pAllocator,
835 VkDevice* pDevice)
836 {
837 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
838 VkResult result;
839 struct anv_device *device;
840
841 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
842
843 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
844 bool found = false;
845 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
846 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
847 device_extensions[j].extensionName) == 0) {
848 found = true;
849 break;
850 }
851 }
852 if (!found)
853 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
854 }
855
856 device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
857 sizeof(*device), 8,
858 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
859 if (!device)
860 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
861
862 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
863 device->instance = physical_device->instance;
864 device->chipset_id = physical_device->chipset_id;
865
866 if (pAllocator)
867 device->alloc = *pAllocator;
868 else
869 device->alloc = physical_device->instance->alloc;
870
871 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
872 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
873 if (device->fd == -1) {
874 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
875 goto fail_device;
876 }
877
878 device->context_id = anv_gem_create_context(device);
879 if (device->context_id == -1) {
880 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
881 goto fail_fd;
882 }
883
884 device->info = physical_device->info;
885 device->isl_dev = physical_device->isl_dev;
886
887 /* On Broadwell and later, we can use batch chaining to more efficiently
888 * implement growing command buffers. Prior to Haswell, the kernel
889 * command parser gets in the way and we have to fall back to growing
890 * the batch.
891 */
892 device->can_chain_batches = device->info.gen >= 8;
893
894 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
895 pCreateInfo->pEnabledFeatures->robustBufferAccess;
896
897 pthread_mutex_init(&device->mutex, NULL);
898
899 pthread_condattr_t condattr;
900 pthread_condattr_init(&condattr);
901 pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC);
902 pthread_cond_init(&device->queue_submit, NULL);
903 pthread_condattr_destroy(&condattr);
904
905 anv_bo_pool_init(&device->batch_bo_pool, device);
906
907 anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
908
909 anv_state_pool_init(&device->dynamic_state_pool,
910 &device->dynamic_state_block_pool);
911
912 anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
913 anv_state_pool_init(&device->instruction_state_pool,
914 &device->instruction_block_pool);
915
916 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
917
918 anv_state_pool_init(&device->surface_state_pool,
919 &device->surface_state_block_pool);
920
921 anv_bo_init_new(&device->workaround_bo, device, 1024);
922
923 anv_scratch_pool_init(device, &device->scratch_pool);
924
925 anv_queue_init(device, &device->queue);
926
927 switch (device->info.gen) {
928 case 7:
929 if (!device->info.is_haswell)
930 result = gen7_init_device_state(device);
931 else
932 result = gen75_init_device_state(device);
933 break;
934 case 8:
935 result = gen8_init_device_state(device);
936 break;
937 case 9:
938 result = gen9_init_device_state(device);
939 break;
940 default:
941 /* Shouldn't get here as we don't create physical devices for any other
942 * gens. */
943 unreachable("unhandled gen");
944 }
945 if (result != VK_SUCCESS)
946 goto fail_fd;
947
948 anv_device_init_blorp(device);
949
950 anv_device_init_border_colors(device);
951
952 *pDevice = anv_device_to_handle(device);
953
954 return VK_SUCCESS;
955
956 fail_fd:
957 close(device->fd);
958 fail_device:
959 vk_free(&device->alloc, device);
960
961 return result;
962 }
963
964 void anv_DestroyDevice(
965 VkDevice _device,
966 const VkAllocationCallbacks* pAllocator)
967 {
968 ANV_FROM_HANDLE(anv_device, device, _device);
969
970 anv_queue_finish(&device->queue);
971
972 anv_device_finish_blorp(device);
973
974 #ifdef HAVE_VALGRIND
975 /* We only need to free these to prevent valgrind errors. The backing
976 * BO will go away in a couple of lines so we don't actually leak.
977 */
978 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
979 #endif
980
981 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
982 anv_gem_close(device, device->workaround_bo.gem_handle);
983
984 anv_bo_pool_finish(&device->batch_bo_pool);
985 anv_state_pool_finish(&device->dynamic_state_pool);
986 anv_block_pool_finish(&device->dynamic_state_block_pool);
987 anv_state_pool_finish(&device->instruction_state_pool);
988 anv_block_pool_finish(&device->instruction_block_pool);
989 anv_state_pool_finish(&device->surface_state_pool);
990 anv_block_pool_finish(&device->surface_state_block_pool);
991 anv_scratch_pool_finish(device, &device->scratch_pool);
992
993 close(device->fd);
994
995 pthread_mutex_destroy(&device->mutex);
996
997 vk_free(&device->alloc, device);
998 }
999
1000 VkResult anv_EnumerateInstanceExtensionProperties(
1001 const char* pLayerName,
1002 uint32_t* pPropertyCount,
1003 VkExtensionProperties* pProperties)
1004 {
1005 if (pProperties == NULL) {
1006 *pPropertyCount = ARRAY_SIZE(global_extensions);
1007 return VK_SUCCESS;
1008 }
1009
1010 assert(*pPropertyCount >= ARRAY_SIZE(global_extensions));
1011
1012 *pPropertyCount = ARRAY_SIZE(global_extensions);
1013 memcpy(pProperties, global_extensions, sizeof(global_extensions));
1014
1015 return VK_SUCCESS;
1016 }
1017
1018 VkResult anv_EnumerateDeviceExtensionProperties(
1019 VkPhysicalDevice physicalDevice,
1020 const char* pLayerName,
1021 uint32_t* pPropertyCount,
1022 VkExtensionProperties* pProperties)
1023 {
1024 if (pProperties == NULL) {
1025 *pPropertyCount = ARRAY_SIZE(device_extensions);
1026 return VK_SUCCESS;
1027 }
1028
1029 assert(*pPropertyCount >= ARRAY_SIZE(device_extensions));
1030
1031 *pPropertyCount = ARRAY_SIZE(device_extensions);
1032 memcpy(pProperties, device_extensions, sizeof(device_extensions));
1033
1034 return VK_SUCCESS;
1035 }
1036
1037 VkResult anv_EnumerateInstanceLayerProperties(
1038 uint32_t* pPropertyCount,
1039 VkLayerProperties* pProperties)
1040 {
1041 if (pProperties == NULL) {
1042 *pPropertyCount = 0;
1043 return VK_SUCCESS;
1044 }
1045
1046 /* None supported at this time */
1047 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1048 }
1049
1050 VkResult anv_EnumerateDeviceLayerProperties(
1051 VkPhysicalDevice physicalDevice,
1052 uint32_t* pPropertyCount,
1053 VkLayerProperties* pProperties)
1054 {
1055 if (pProperties == NULL) {
1056 *pPropertyCount = 0;
1057 return VK_SUCCESS;
1058 }
1059
1060 /* None supported at this time */
1061 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1062 }
1063
1064 void anv_GetDeviceQueue(
1065 VkDevice _device,
1066 uint32_t queueNodeIndex,
1067 uint32_t queueIndex,
1068 VkQueue* pQueue)
1069 {
1070 ANV_FROM_HANDLE(anv_device, device, _device);
1071
1072 assert(queueIndex == 0);
1073
1074 *pQueue = anv_queue_to_handle(&device->queue);
1075 }
1076
1077 VkResult
1078 anv_device_execbuf(struct anv_device *device,
1079 struct drm_i915_gem_execbuffer2 *execbuf,
1080 struct anv_bo **execbuf_bos)
1081 {
1082 int ret = anv_gem_execbuffer(device, execbuf);
1083 if (ret != 0) {
1084 /* We don't know the real error. */
1085 return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
1086 }
1087
1088 struct drm_i915_gem_exec_object2 *objects = (void *)execbuf->buffers_ptr;
1089 for (uint32_t k = 0; k < execbuf->buffer_count; k++)
1090 execbuf_bos[k]->offset = objects[k].offset;
1091
1092 return VK_SUCCESS;
1093 }
1094
1095 VkResult anv_QueueSubmit(
1096 VkQueue _queue,
1097 uint32_t submitCount,
1098 const VkSubmitInfo* pSubmits,
1099 VkFence _fence)
1100 {
1101 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1102 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1103 struct anv_device *device = queue->device;
1104 VkResult result = VK_SUCCESS;
1105
1106 /* We lock around QueueSubmit for three main reasons:
1107 *
1108 * 1) When a block pool is resized, we create a new gem handle with a
1109 * different size and, in the case of surface states, possibly a
1110 * different center offset but we re-use the same anv_bo struct when
1111 * we do so. If this happens in the middle of setting up an execbuf,
1112 * we could end up with our list of BOs out of sync with our list of
1113 * gem handles.
1114 *
1115 * 2) The algorithm we use for building the list of unique buffers isn't
1116 * thread-safe. While the client is supposed to syncronize around
1117 * QueueSubmit, this would be extremely difficult to debug if it ever
1118 * came up in the wild due to a broken app. It's better to play it
1119 * safe and just lock around QueueSubmit.
1120 *
1121 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1122 * userspace. Due to the fact that the surface state buffer is shared
1123 * between batches, we can't afford to have that happen from multiple
1124 * threads at the same time. Even though the user is supposed to
1125 * ensure this doesn't happen, we play it safe as in (2) above.
1126 *
1127 * Since the only other things that ever take the device lock such as block
1128 * pool resize only rarely happen, this will almost never be contended so
1129 * taking a lock isn't really an expensive operation in this case.
1130 */
1131 pthread_mutex_lock(&device->mutex);
1132
1133 for (uint32_t i = 0; i < submitCount; i++) {
1134 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1135 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1136 pSubmits[i].pCommandBuffers[j]);
1137 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1138
1139 result = anv_cmd_buffer_execbuf(device, cmd_buffer);
1140 if (result != VK_SUCCESS)
1141 goto out;
1142 }
1143 }
1144
1145 if (fence) {
1146 struct anv_bo *fence_bo = &fence->bo;
1147 result = anv_device_execbuf(device, &fence->execbuf, &fence_bo);
1148 if (result != VK_SUCCESS)
1149 goto out;
1150
1151 /* Update the fence and wake up any waiters */
1152 assert(fence->state == ANV_FENCE_STATE_RESET);
1153 fence->state = ANV_FENCE_STATE_SUBMITTED;
1154 pthread_cond_broadcast(&device->queue_submit);
1155 }
1156
1157 out:
1158 pthread_mutex_unlock(&device->mutex);
1159
1160 return result;
1161 }
1162
1163 VkResult anv_QueueWaitIdle(
1164 VkQueue _queue)
1165 {
1166 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1167
1168 return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
1169 }
1170
1171 VkResult anv_DeviceWaitIdle(
1172 VkDevice _device)
1173 {
1174 ANV_FROM_HANDLE(anv_device, device, _device);
1175 struct anv_batch batch;
1176
1177 uint32_t cmds[8];
1178 batch.start = batch.next = cmds;
1179 batch.end = (void *) cmds + sizeof(cmds);
1180
1181 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1182 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1183
1184 return anv_device_submit_simple_batch(device, &batch);
1185 }
1186
1187 VkResult
1188 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1189 {
1190 uint32_t gem_handle = anv_gem_create(device, size);
1191 if (!gem_handle)
1192 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1193
1194 anv_bo_init(bo, gem_handle, size);
1195
1196 return VK_SUCCESS;
1197 }
1198
1199 VkResult anv_AllocateMemory(
1200 VkDevice _device,
1201 const VkMemoryAllocateInfo* pAllocateInfo,
1202 const VkAllocationCallbacks* pAllocator,
1203 VkDeviceMemory* pMem)
1204 {
1205 ANV_FROM_HANDLE(anv_device, device, _device);
1206 struct anv_device_memory *mem;
1207 VkResult result;
1208
1209 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1210
1211 if (pAllocateInfo->allocationSize == 0) {
1212 /* Apparently, this is allowed */
1213 *pMem = VK_NULL_HANDLE;
1214 return VK_SUCCESS;
1215 }
1216
1217 /* We support exactly one memory heap. */
1218 assert(pAllocateInfo->memoryTypeIndex == 0 ||
1219 (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
1220
1221 /* FINISHME: Fail if allocation request exceeds heap size. */
1222
1223 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1224 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1225 if (mem == NULL)
1226 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1227
1228 /* The kernel is going to give us whole pages anyway */
1229 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1230
1231 result = anv_bo_init_new(&mem->bo, device, alloc_size);
1232 if (result != VK_SUCCESS)
1233 goto fail;
1234
1235 mem->type_index = pAllocateInfo->memoryTypeIndex;
1236
1237 *pMem = anv_device_memory_to_handle(mem);
1238
1239 return VK_SUCCESS;
1240
1241 fail:
1242 vk_free2(&device->alloc, pAllocator, mem);
1243
1244 return result;
1245 }
1246
1247 void anv_FreeMemory(
1248 VkDevice _device,
1249 VkDeviceMemory _mem,
1250 const VkAllocationCallbacks* pAllocator)
1251 {
1252 ANV_FROM_HANDLE(anv_device, device, _device);
1253 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1254
1255 if (mem == NULL)
1256 return;
1257
1258 if (mem->bo.map)
1259 anv_gem_munmap(mem->bo.map, mem->bo.size);
1260
1261 if (mem->bo.gem_handle != 0)
1262 anv_gem_close(device, mem->bo.gem_handle);
1263
1264 vk_free2(&device->alloc, pAllocator, mem);
1265 }
1266
1267 VkResult anv_MapMemory(
1268 VkDevice _device,
1269 VkDeviceMemory _memory,
1270 VkDeviceSize offset,
1271 VkDeviceSize size,
1272 VkMemoryMapFlags flags,
1273 void** ppData)
1274 {
1275 ANV_FROM_HANDLE(anv_device, device, _device);
1276 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1277
1278 if (mem == NULL) {
1279 *ppData = NULL;
1280 return VK_SUCCESS;
1281 }
1282
1283 if (size == VK_WHOLE_SIZE)
1284 size = mem->bo.size - offset;
1285
1286 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1287 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1288 * at a time is valid. We could just mmap up front and return an offset
1289 * pointer here, but that may exhaust virtual memory on 32 bit
1290 * userspace. */
1291
1292 uint32_t gem_flags = 0;
1293 if (!device->info.has_llc && mem->type_index == 0)
1294 gem_flags |= I915_MMAP_WC;
1295
1296 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
1297 uint64_t map_offset = offset & ~4095ull;
1298 assert(offset >= map_offset);
1299 uint64_t map_size = (offset + size) - map_offset;
1300
1301 /* Let's map whole pages */
1302 map_size = align_u64(map_size, 4096);
1303
1304 mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
1305 map_offset, map_size, gem_flags);
1306 mem->map_size = map_size;
1307
1308 *ppData = mem->map + (offset - map_offset);
1309
1310 return VK_SUCCESS;
1311 }
1312
1313 void anv_UnmapMemory(
1314 VkDevice _device,
1315 VkDeviceMemory _memory)
1316 {
1317 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1318
1319 if (mem == NULL)
1320 return;
1321
1322 anv_gem_munmap(mem->map, mem->map_size);
1323 }
1324
1325 static void
1326 clflush_mapped_ranges(struct anv_device *device,
1327 uint32_t count,
1328 const VkMappedMemoryRange *ranges)
1329 {
1330 for (uint32_t i = 0; i < count; i++) {
1331 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
1332 void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
1333 void *end;
1334
1335 if (ranges[i].offset + ranges[i].size > mem->map_size)
1336 end = mem->map + mem->map_size;
1337 else
1338 end = mem->map + ranges[i].offset + ranges[i].size;
1339
1340 while (p < end) {
1341 __builtin_ia32_clflush(p);
1342 p += CACHELINE_SIZE;
1343 }
1344 }
1345 }
1346
1347 VkResult anv_FlushMappedMemoryRanges(
1348 VkDevice _device,
1349 uint32_t memoryRangeCount,
1350 const VkMappedMemoryRange* pMemoryRanges)
1351 {
1352 ANV_FROM_HANDLE(anv_device, device, _device);
1353
1354 if (device->info.has_llc)
1355 return VK_SUCCESS;
1356
1357 /* Make sure the writes we're flushing have landed. */
1358 __builtin_ia32_mfence();
1359
1360 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1361
1362 return VK_SUCCESS;
1363 }
1364
1365 VkResult anv_InvalidateMappedMemoryRanges(
1366 VkDevice _device,
1367 uint32_t memoryRangeCount,
1368 const VkMappedMemoryRange* pMemoryRanges)
1369 {
1370 ANV_FROM_HANDLE(anv_device, device, _device);
1371
1372 if (device->info.has_llc)
1373 return VK_SUCCESS;
1374
1375 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1376
1377 /* Make sure no reads get moved up above the invalidate. */
1378 __builtin_ia32_mfence();
1379
1380 return VK_SUCCESS;
1381 }
1382
1383 void anv_GetBufferMemoryRequirements(
1384 VkDevice device,
1385 VkBuffer _buffer,
1386 VkMemoryRequirements* pMemoryRequirements)
1387 {
1388 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1389
1390 /* The Vulkan spec (git aaed022) says:
1391 *
1392 * memoryTypeBits is a bitfield and contains one bit set for every
1393 * supported memory type for the resource. The bit `1<<i` is set if and
1394 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1395 * structure for the physical device is supported.
1396 *
1397 * We support exactly one memory type.
1398 */
1399 pMemoryRequirements->memoryTypeBits = 1;
1400
1401 pMemoryRequirements->size = buffer->size;
1402 pMemoryRequirements->alignment = 16;
1403 }
1404
1405 void anv_GetImageMemoryRequirements(
1406 VkDevice device,
1407 VkImage _image,
1408 VkMemoryRequirements* pMemoryRequirements)
1409 {
1410 ANV_FROM_HANDLE(anv_image, image, _image);
1411
1412 /* The Vulkan spec (git aaed022) says:
1413 *
1414 * memoryTypeBits is a bitfield and contains one bit set for every
1415 * supported memory type for the resource. The bit `1<<i` is set if and
1416 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1417 * structure for the physical device is supported.
1418 *
1419 * We support exactly one memory type.
1420 */
1421 pMemoryRequirements->memoryTypeBits = 1;
1422
1423 pMemoryRequirements->size = image->size;
1424 pMemoryRequirements->alignment = image->alignment;
1425 }
1426
1427 void anv_GetImageSparseMemoryRequirements(
1428 VkDevice device,
1429 VkImage image,
1430 uint32_t* pSparseMemoryRequirementCount,
1431 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1432 {
1433 stub();
1434 }
1435
1436 void anv_GetDeviceMemoryCommitment(
1437 VkDevice device,
1438 VkDeviceMemory memory,
1439 VkDeviceSize* pCommittedMemoryInBytes)
1440 {
1441 *pCommittedMemoryInBytes = 0;
1442 }
1443
1444 VkResult anv_BindBufferMemory(
1445 VkDevice device,
1446 VkBuffer _buffer,
1447 VkDeviceMemory _memory,
1448 VkDeviceSize memoryOffset)
1449 {
1450 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1451 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1452
1453 if (mem) {
1454 buffer->bo = &mem->bo;
1455 buffer->offset = memoryOffset;
1456 } else {
1457 buffer->bo = NULL;
1458 buffer->offset = 0;
1459 }
1460
1461 return VK_SUCCESS;
1462 }
1463
1464 VkResult anv_QueueBindSparse(
1465 VkQueue queue,
1466 uint32_t bindInfoCount,
1467 const VkBindSparseInfo* pBindInfo,
1468 VkFence fence)
1469 {
1470 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1471 }
1472
1473 VkResult anv_CreateFence(
1474 VkDevice _device,
1475 const VkFenceCreateInfo* pCreateInfo,
1476 const VkAllocationCallbacks* pAllocator,
1477 VkFence* pFence)
1478 {
1479 ANV_FROM_HANDLE(anv_device, device, _device);
1480 struct anv_bo fence_bo;
1481 struct anv_fence *fence;
1482 struct anv_batch batch;
1483 VkResult result;
1484
1485 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1486
1487 result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
1488 if (result != VK_SUCCESS)
1489 return result;
1490
1491 /* Fences are small. Just store the CPU data structure in the BO. */
1492 fence = fence_bo.map;
1493 fence->bo = fence_bo;
1494
1495 /* Place the batch after the CPU data but on its own cache line. */
1496 const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
1497 batch.next = batch.start = fence->bo.map + batch_offset;
1498 batch.end = fence->bo.map + fence->bo.size;
1499 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1500 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1501
1502 if (!device->info.has_llc) {
1503 assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
1504 assert(batch.next - batch.start <= CACHELINE_SIZE);
1505 __builtin_ia32_mfence();
1506 __builtin_ia32_clflush(batch.start);
1507 }
1508
1509 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1510 fence->exec2_objects[0].relocation_count = 0;
1511 fence->exec2_objects[0].relocs_ptr = 0;
1512 fence->exec2_objects[0].alignment = 0;
1513 fence->exec2_objects[0].offset = fence->bo.offset;
1514 fence->exec2_objects[0].flags = 0;
1515 fence->exec2_objects[0].rsvd1 = 0;
1516 fence->exec2_objects[0].rsvd2 = 0;
1517
1518 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1519 fence->execbuf.buffer_count = 1;
1520 fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
1521 fence->execbuf.batch_len = batch.next - batch.start;
1522 fence->execbuf.cliprects_ptr = 0;
1523 fence->execbuf.num_cliprects = 0;
1524 fence->execbuf.DR1 = 0;
1525 fence->execbuf.DR4 = 0;
1526
1527 fence->execbuf.flags =
1528 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1529 fence->execbuf.rsvd1 = device->context_id;
1530 fence->execbuf.rsvd2 = 0;
1531
1532 fence->state = ANV_FENCE_STATE_RESET;
1533
1534 *pFence = anv_fence_to_handle(fence);
1535
1536 return VK_SUCCESS;
1537 }
1538
1539 void anv_DestroyFence(
1540 VkDevice _device,
1541 VkFence _fence,
1542 const VkAllocationCallbacks* pAllocator)
1543 {
1544 ANV_FROM_HANDLE(anv_device, device, _device);
1545 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1546
1547 assert(fence->bo.map == fence);
1548 anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
1549 }
1550
1551 VkResult anv_ResetFences(
1552 VkDevice _device,
1553 uint32_t fenceCount,
1554 const VkFence* pFences)
1555 {
1556 for (uint32_t i = 0; i < fenceCount; i++) {
1557 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1558 fence->state = ANV_FENCE_STATE_RESET;
1559 }
1560
1561 return VK_SUCCESS;
1562 }
1563
1564 VkResult anv_GetFenceStatus(
1565 VkDevice _device,
1566 VkFence _fence)
1567 {
1568 ANV_FROM_HANDLE(anv_device, device, _device);
1569 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1570 int64_t t = 0;
1571 int ret;
1572
1573 switch (fence->state) {
1574 case ANV_FENCE_STATE_RESET:
1575 /* If it hasn't even been sent off to the GPU yet, it's not ready */
1576 return VK_NOT_READY;
1577
1578 case ANV_FENCE_STATE_SIGNALED:
1579 /* It's been signaled, return success */
1580 return VK_SUCCESS;
1581
1582 case ANV_FENCE_STATE_SUBMITTED:
1583 /* It's been submitted to the GPU but we don't know if it's done yet. */
1584 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1585 if (ret == 0) {
1586 fence->state = ANV_FENCE_STATE_SIGNALED;
1587 return VK_SUCCESS;
1588 } else {
1589 return VK_NOT_READY;
1590 }
1591 default:
1592 unreachable("Invalid fence status");
1593 }
1594 }
1595
1596 #define NSEC_PER_SEC 1000000000
1597 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
1598
1599 VkResult anv_WaitForFences(
1600 VkDevice _device,
1601 uint32_t fenceCount,
1602 const VkFence* pFences,
1603 VkBool32 waitAll,
1604 uint64_t _timeout)
1605 {
1606 ANV_FROM_HANDLE(anv_device, device, _device);
1607 int ret;
1608
1609 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1610 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1611 * for a couple of kernel releases. Since there's no way to know
1612 * whether or not the kernel we're using is one of the broken ones, the
1613 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1614 * maximum timeout from 584 years to 292 years - likely not a big deal.
1615 */
1616 int64_t timeout = MIN2(_timeout, INT64_MAX);
1617
1618 uint32_t pending_fences = fenceCount;
1619 while (pending_fences) {
1620 pending_fences = 0;
1621 bool signaled_fences = false;
1622 for (uint32_t i = 0; i < fenceCount; i++) {
1623 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1624 switch (fence->state) {
1625 case ANV_FENCE_STATE_RESET:
1626 /* This fence hasn't been submitted yet, we'll catch it the next
1627 * time around. Yes, this may mean we dead-loop but, short of
1628 * lots of locking and a condition variable, there's not much that
1629 * we can do about that.
1630 */
1631 pending_fences++;
1632 continue;
1633
1634 case ANV_FENCE_STATE_SIGNALED:
1635 /* This fence is not pending. If waitAll isn't set, we can return
1636 * early. Otherwise, we have to keep going.
1637 */
1638 if (!waitAll)
1639 return VK_SUCCESS;
1640 continue;
1641
1642 case ANV_FENCE_STATE_SUBMITTED:
1643 /* These are the fences we really care about. Go ahead and wait
1644 * on it until we hit a timeout.
1645 */
1646 ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
1647 if (ret == -1 && errno == ETIME) {
1648 return VK_TIMEOUT;
1649 } else if (ret == -1) {
1650 /* We don't know the real error. */
1651 return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
1652 } else {
1653 fence->state = ANV_FENCE_STATE_SIGNALED;
1654 signaled_fences = true;
1655 if (!waitAll)
1656 return VK_SUCCESS;
1657 continue;
1658 }
1659 }
1660 }
1661
1662 if (pending_fences && !signaled_fences) {
1663 /* If we've hit this then someone decided to vkWaitForFences before
1664 * they've actually submitted any of them to a queue. This is a
1665 * fairly pessimal case, so it's ok to lock here and use a standard
1666 * pthreads condition variable.
1667 */
1668 pthread_mutex_lock(&device->mutex);
1669
1670 /* It's possible that some of the fences have changed state since the
1671 * last time we checked. Now that we have the lock, check for
1672 * pending fences again and don't wait if it's changed.
1673 */
1674 uint32_t now_pending_fences = 0;
1675 for (uint32_t i = 0; i < fenceCount; i++) {
1676 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1677 if (fence->state == ANV_FENCE_STATE_RESET)
1678 now_pending_fences++;
1679 }
1680 assert(now_pending_fences <= pending_fences);
1681
1682 if (now_pending_fences == pending_fences) {
1683 struct timespec before;
1684 clock_gettime(CLOCK_MONOTONIC, &before);
1685
1686 uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
1687 uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
1688 (timeout / NSEC_PER_SEC);
1689 abs_nsec %= NSEC_PER_SEC;
1690
1691 /* Avoid roll-over in tv_sec on 32-bit systems if the user
1692 * provided timeout is UINT64_MAX
1693 */
1694 struct timespec abstime;
1695 abstime.tv_nsec = abs_nsec;
1696 abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
1697
1698 ret = pthread_cond_timedwait(&device->queue_submit,
1699 &device->mutex, &abstime);
1700 assert(ret != EINVAL);
1701
1702 struct timespec after;
1703 clock_gettime(CLOCK_MONOTONIC, &after);
1704 uint64_t time_elapsed =
1705 ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
1706 ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
1707
1708 if (time_elapsed >= timeout) {
1709 pthread_mutex_unlock(&device->mutex);
1710 return VK_TIMEOUT;
1711 }
1712
1713 timeout -= time_elapsed;
1714 }
1715
1716 pthread_mutex_unlock(&device->mutex);
1717 }
1718 }
1719
1720 return VK_SUCCESS;
1721 }
1722
1723 // Queue semaphore functions
1724
1725 VkResult anv_CreateSemaphore(
1726 VkDevice device,
1727 const VkSemaphoreCreateInfo* pCreateInfo,
1728 const VkAllocationCallbacks* pAllocator,
1729 VkSemaphore* pSemaphore)
1730 {
1731 /* The DRM execbuffer ioctl always execute in-oder, even between different
1732 * rings. As such, there's nothing to do for the user space semaphore.
1733 */
1734
1735 *pSemaphore = (VkSemaphore)1;
1736
1737 return VK_SUCCESS;
1738 }
1739
1740 void anv_DestroySemaphore(
1741 VkDevice device,
1742 VkSemaphore semaphore,
1743 const VkAllocationCallbacks* pAllocator)
1744 {
1745 }
1746
1747 // Event functions
1748
1749 VkResult anv_CreateEvent(
1750 VkDevice _device,
1751 const VkEventCreateInfo* pCreateInfo,
1752 const VkAllocationCallbacks* pAllocator,
1753 VkEvent* pEvent)
1754 {
1755 ANV_FROM_HANDLE(anv_device, device, _device);
1756 struct anv_state state;
1757 struct anv_event *event;
1758
1759 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
1760
1761 state = anv_state_pool_alloc(&device->dynamic_state_pool,
1762 sizeof(*event), 8);
1763 event = state.map;
1764 event->state = state;
1765 event->semaphore = VK_EVENT_RESET;
1766
1767 if (!device->info.has_llc) {
1768 /* Make sure the writes we're flushing have landed. */
1769 __builtin_ia32_mfence();
1770 __builtin_ia32_clflush(event);
1771 }
1772
1773 *pEvent = anv_event_to_handle(event);
1774
1775 return VK_SUCCESS;
1776 }
1777
1778 void anv_DestroyEvent(
1779 VkDevice _device,
1780 VkEvent _event,
1781 const VkAllocationCallbacks* pAllocator)
1782 {
1783 ANV_FROM_HANDLE(anv_device, device, _device);
1784 ANV_FROM_HANDLE(anv_event, event, _event);
1785
1786 anv_state_pool_free(&device->dynamic_state_pool, event->state);
1787 }
1788
1789 VkResult anv_GetEventStatus(
1790 VkDevice _device,
1791 VkEvent _event)
1792 {
1793 ANV_FROM_HANDLE(anv_device, device, _device);
1794 ANV_FROM_HANDLE(anv_event, event, _event);
1795
1796 if (!device->info.has_llc) {
1797 /* Invalidate read cache before reading event written by GPU. */
1798 __builtin_ia32_clflush(event);
1799 __builtin_ia32_mfence();
1800
1801 }
1802
1803 return event->semaphore;
1804 }
1805
1806 VkResult anv_SetEvent(
1807 VkDevice _device,
1808 VkEvent _event)
1809 {
1810 ANV_FROM_HANDLE(anv_device, device, _device);
1811 ANV_FROM_HANDLE(anv_event, event, _event);
1812
1813 event->semaphore = VK_EVENT_SET;
1814
1815 if (!device->info.has_llc) {
1816 /* Make sure the writes we're flushing have landed. */
1817 __builtin_ia32_mfence();
1818 __builtin_ia32_clflush(event);
1819 }
1820
1821 return VK_SUCCESS;
1822 }
1823
1824 VkResult anv_ResetEvent(
1825 VkDevice _device,
1826 VkEvent _event)
1827 {
1828 ANV_FROM_HANDLE(anv_device, device, _device);
1829 ANV_FROM_HANDLE(anv_event, event, _event);
1830
1831 event->semaphore = VK_EVENT_RESET;
1832
1833 if (!device->info.has_llc) {
1834 /* Make sure the writes we're flushing have landed. */
1835 __builtin_ia32_mfence();
1836 __builtin_ia32_clflush(event);
1837 }
1838
1839 return VK_SUCCESS;
1840 }
1841
1842 // Buffer functions
1843
1844 VkResult anv_CreateBuffer(
1845 VkDevice _device,
1846 const VkBufferCreateInfo* pCreateInfo,
1847 const VkAllocationCallbacks* pAllocator,
1848 VkBuffer* pBuffer)
1849 {
1850 ANV_FROM_HANDLE(anv_device, device, _device);
1851 struct anv_buffer *buffer;
1852
1853 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1854
1855 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1856 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1857 if (buffer == NULL)
1858 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1859
1860 buffer->size = pCreateInfo->size;
1861 buffer->usage = pCreateInfo->usage;
1862 buffer->bo = NULL;
1863 buffer->offset = 0;
1864
1865 *pBuffer = anv_buffer_to_handle(buffer);
1866
1867 return VK_SUCCESS;
1868 }
1869
1870 void anv_DestroyBuffer(
1871 VkDevice _device,
1872 VkBuffer _buffer,
1873 const VkAllocationCallbacks* pAllocator)
1874 {
1875 ANV_FROM_HANDLE(anv_device, device, _device);
1876 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1877
1878 vk_free2(&device->alloc, pAllocator, buffer);
1879 }
1880
1881 void
1882 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
1883 enum isl_format format,
1884 uint32_t offset, uint32_t range, uint32_t stride)
1885 {
1886 isl_buffer_fill_state(&device->isl_dev, state.map,
1887 .address = offset,
1888 .mocs = device->default_mocs,
1889 .size = range,
1890 .format = format,
1891 .stride = stride);
1892
1893 if (!device->info.has_llc)
1894 anv_state_clflush(state);
1895 }
1896
1897 void anv_DestroySampler(
1898 VkDevice _device,
1899 VkSampler _sampler,
1900 const VkAllocationCallbacks* pAllocator)
1901 {
1902 ANV_FROM_HANDLE(anv_device, device, _device);
1903 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1904
1905 vk_free2(&device->alloc, pAllocator, sampler);
1906 }
1907
1908 VkResult anv_CreateFramebuffer(
1909 VkDevice _device,
1910 const VkFramebufferCreateInfo* pCreateInfo,
1911 const VkAllocationCallbacks* pAllocator,
1912 VkFramebuffer* pFramebuffer)
1913 {
1914 ANV_FROM_HANDLE(anv_device, device, _device);
1915 struct anv_framebuffer *framebuffer;
1916
1917 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1918
1919 size_t size = sizeof(*framebuffer) +
1920 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1921 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1922 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1923 if (framebuffer == NULL)
1924 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1925
1926 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1927 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1928 VkImageView _iview = pCreateInfo->pAttachments[i];
1929 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1930 }
1931
1932 framebuffer->width = pCreateInfo->width;
1933 framebuffer->height = pCreateInfo->height;
1934 framebuffer->layers = pCreateInfo->layers;
1935
1936 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1937
1938 return VK_SUCCESS;
1939 }
1940
1941 void anv_DestroyFramebuffer(
1942 VkDevice _device,
1943 VkFramebuffer _fb,
1944 const VkAllocationCallbacks* pAllocator)
1945 {
1946 ANV_FROM_HANDLE(anv_device, device, _device);
1947 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1948
1949 vk_free2(&device->alloc, pAllocator, fb);
1950 }