anv: remove dummy VK_DEBUG_MARKER_EXT entry points
[mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31 #include "anv_timestamp.h"
32 #include "util/strtod.h"
33 #include "util/debug.h"
34
35 #include "genxml/gen7_pack.h"
36
37 struct anv_dispatch_table dtable;
38
39 static void
40 compiler_debug_log(void *data, const char *fmt, ...)
41 { }
42
43 static void
44 compiler_perf_log(void *data, const char *fmt, ...)
45 {
46 va_list args;
47 va_start(args, fmt);
48
49 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
50 vfprintf(stderr, fmt, args);
51
52 va_end(args);
53 }
54
55 static VkResult
56 anv_physical_device_init(struct anv_physical_device *device,
57 struct anv_instance *instance,
58 const char *path)
59 {
60 VkResult result;
61 int fd;
62
63 fd = open(path, O_RDWR | O_CLOEXEC);
64 if (fd < 0)
65 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
66 "failed to open %s: %m", path);
67
68 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
69 device->instance = instance;
70
71 assert(strlen(path) < ARRAY_SIZE(device->path));
72 strncpy(device->path, path, ARRAY_SIZE(device->path));
73
74 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
75 if (!device->chipset_id) {
76 result = VK_ERROR_INITIALIZATION_FAILED;
77 goto fail;
78 }
79
80 device->name = brw_get_device_name(device->chipset_id);
81 device->info = brw_get_device_info(device->chipset_id);
82 if (!device->info) {
83 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
84 "failed to get device info");
85 goto fail;
86 }
87
88 if (device->info->is_haswell) {
89 fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
90 } else if (device->info->gen == 7 && !device->info->is_baytrail) {
91 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
92 } else if (device->info->gen == 7 && device->info->is_baytrail) {
93 fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
94 } else if (device->info->gen >= 8) {
95 /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
96 * supported as anything */
97 } else {
98 result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
99 "Vulkan not yet supported on %s", device->name);
100 goto fail;
101 }
102
103 device->cmd_parser_version = -1;
104 if (device->info->gen == 7) {
105 device->cmd_parser_version =
106 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
107 if (device->cmd_parser_version == -1) {
108 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
109 "failed to get command parser version");
110 goto fail;
111 }
112 }
113
114 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
115 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
116 "failed to get aperture size: %m");
117 goto fail;
118 }
119
120 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
121 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
122 "kernel missing gem wait");
123 goto fail;
124 }
125
126 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
127 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
128 "kernel missing execbuf2");
129 goto fail;
130 }
131
132 if (!device->info->has_llc &&
133 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
134 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
135 "kernel missing wc mmap");
136 goto fail;
137 }
138
139 bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
140
141 close(fd);
142
143 brw_process_intel_debug_variable();
144
145 device->compiler = brw_compiler_create(NULL, device->info);
146 if (device->compiler == NULL) {
147 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
148 goto fail;
149 }
150 device->compiler->shader_debug_log = compiler_debug_log;
151 device->compiler->shader_perf_log = compiler_perf_log;
152
153 result = anv_init_wsi(device);
154 if (result != VK_SUCCESS)
155 goto fail;
156
157 /* XXX: Actually detect bit6 swizzling */
158 isl_device_init(&device->isl_dev, device->info, swizzled);
159
160 return VK_SUCCESS;
161
162 fail:
163 close(fd);
164 return result;
165 }
166
167 static void
168 anv_physical_device_finish(struct anv_physical_device *device)
169 {
170 anv_finish_wsi(device);
171 ralloc_free(device->compiler);
172 }
173
174 static const VkExtensionProperties global_extensions[] = {
175 {
176 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
177 .specVersion = 25,
178 },
179 #ifdef VK_USE_PLATFORM_XCB_KHR
180 {
181 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
182 .specVersion = 5,
183 },
184 #endif
185 #ifdef VK_USE_PLATFORM_XLIB_KHR
186 {
187 .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
188 .specVersion = 5,
189 },
190 #endif
191 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
192 {
193 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
194 .specVersion = 4,
195 },
196 #endif
197 };
198
199 static const VkExtensionProperties device_extensions[] = {
200 {
201 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
202 .specVersion = 67,
203 },
204 };
205
206 static void *
207 default_alloc_func(void *pUserData, size_t size, size_t align,
208 VkSystemAllocationScope allocationScope)
209 {
210 return malloc(size);
211 }
212
213 static void *
214 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
215 size_t align, VkSystemAllocationScope allocationScope)
216 {
217 return realloc(pOriginal, size);
218 }
219
220 static void
221 default_free_func(void *pUserData, void *pMemory)
222 {
223 free(pMemory);
224 }
225
226 static const VkAllocationCallbacks default_alloc = {
227 .pUserData = NULL,
228 .pfnAllocation = default_alloc_func,
229 .pfnReallocation = default_realloc_func,
230 .pfnFree = default_free_func,
231 };
232
233 VkResult anv_CreateInstance(
234 const VkInstanceCreateInfo* pCreateInfo,
235 const VkAllocationCallbacks* pAllocator,
236 VkInstance* pInstance)
237 {
238 struct anv_instance *instance;
239
240 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
241
242 uint32_t client_version;
243 if (pCreateInfo->pApplicationInfo &&
244 pCreateInfo->pApplicationInfo->apiVersion != 0) {
245 client_version = pCreateInfo->pApplicationInfo->apiVersion;
246 } else {
247 client_version = VK_MAKE_VERSION(1, 0, 0);
248 }
249
250 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
251 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
252 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
253 "Client requested version %d.%d.%d",
254 VK_VERSION_MAJOR(client_version),
255 VK_VERSION_MINOR(client_version),
256 VK_VERSION_PATCH(client_version));
257 }
258
259 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
260 bool found = false;
261 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
262 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
263 global_extensions[j].extensionName) == 0) {
264 found = true;
265 break;
266 }
267 }
268 if (!found)
269 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
270 }
271
272 instance = anv_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
273 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
274 if (!instance)
275 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
276
277 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
278
279 if (pAllocator)
280 instance->alloc = *pAllocator;
281 else
282 instance->alloc = default_alloc;
283
284 instance->apiVersion = client_version;
285 instance->physicalDeviceCount = -1;
286
287 _mesa_locale_init();
288
289 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
290
291 *pInstance = anv_instance_to_handle(instance);
292
293 return VK_SUCCESS;
294 }
295
296 void anv_DestroyInstance(
297 VkInstance _instance,
298 const VkAllocationCallbacks* pAllocator)
299 {
300 ANV_FROM_HANDLE(anv_instance, instance, _instance);
301
302 if (instance->physicalDeviceCount > 0) {
303 /* We support at most one physical device. */
304 assert(instance->physicalDeviceCount == 1);
305 anv_physical_device_finish(&instance->physicalDevice);
306 }
307
308 VG(VALGRIND_DESTROY_MEMPOOL(instance));
309
310 _mesa_locale_fini();
311
312 anv_free(&instance->alloc, instance);
313 }
314
315 VkResult anv_EnumeratePhysicalDevices(
316 VkInstance _instance,
317 uint32_t* pPhysicalDeviceCount,
318 VkPhysicalDevice* pPhysicalDevices)
319 {
320 ANV_FROM_HANDLE(anv_instance, instance, _instance);
321 VkResult result;
322
323 if (instance->physicalDeviceCount < 0) {
324 char path[20];
325 for (unsigned i = 0; i < 8; i++) {
326 snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
327 result = anv_physical_device_init(&instance->physicalDevice,
328 instance, path);
329 if (result == VK_SUCCESS)
330 break;
331 }
332
333 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
334 instance->physicalDeviceCount = 0;
335 } else if (result == VK_SUCCESS) {
336 instance->physicalDeviceCount = 1;
337 } else {
338 return result;
339 }
340 }
341
342 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
343 * otherwise it's an inout parameter.
344 *
345 * The Vulkan spec (git aaed022) says:
346 *
347 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
348 * that is initialized with the number of devices the application is
349 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
350 * an array of at least this many VkPhysicalDevice handles [...].
351 *
352 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
353 * overwrites the contents of the variable pointed to by
354 * pPhysicalDeviceCount with the number of physical devices in in the
355 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
356 * pPhysicalDeviceCount with the number of physical handles written to
357 * pPhysicalDevices.
358 */
359 if (!pPhysicalDevices) {
360 *pPhysicalDeviceCount = instance->physicalDeviceCount;
361 } else if (*pPhysicalDeviceCount >= 1) {
362 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
363 *pPhysicalDeviceCount = 1;
364 } else {
365 *pPhysicalDeviceCount = 0;
366 }
367
368 return VK_SUCCESS;
369 }
370
371 void anv_GetPhysicalDeviceFeatures(
372 VkPhysicalDevice physicalDevice,
373 VkPhysicalDeviceFeatures* pFeatures)
374 {
375 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
376
377 *pFeatures = (VkPhysicalDeviceFeatures) {
378 .robustBufferAccess = true,
379 .fullDrawIndexUint32 = true,
380 .imageCubeArray = false,
381 .independentBlend = true,
382 .geometryShader = true,
383 .tessellationShader = false,
384 .sampleRateShading = true,
385 .dualSrcBlend = true,
386 .logicOp = true,
387 .multiDrawIndirect = false,
388 .drawIndirectFirstInstance = false,
389 .depthClamp = true,
390 .depthBiasClamp = false,
391 .fillModeNonSolid = true,
392 .depthBounds = false,
393 .wideLines = true,
394 .largePoints = true,
395 .alphaToOne = true,
396 .multiViewport = true,
397 .samplerAnisotropy = false, /* FINISHME */
398 .textureCompressionETC2 = pdevice->info->gen >= 8 ||
399 pdevice->info->is_baytrail,
400 .textureCompressionASTC_LDR = pdevice->info->gen >= 9, /* FINISHME CHV */
401 .textureCompressionBC = true,
402 .occlusionQueryPrecise = true,
403 .pipelineStatisticsQuery = false,
404 .fragmentStoresAndAtomics = true,
405 .shaderTessellationAndGeometryPointSize = true,
406 .shaderImageGatherExtended = false,
407 .shaderStorageImageExtendedFormats = false,
408 .shaderStorageImageMultisample = false,
409 .shaderUniformBufferArrayDynamicIndexing = true,
410 .shaderSampledImageArrayDynamicIndexing = true,
411 .shaderStorageBufferArrayDynamicIndexing = true,
412 .shaderStorageImageArrayDynamicIndexing = true,
413 .shaderStorageImageReadWithoutFormat = false,
414 .shaderStorageImageWriteWithoutFormat = true,
415 .shaderClipDistance = false,
416 .shaderCullDistance = false,
417 .shaderFloat64 = false,
418 .shaderInt64 = false,
419 .shaderInt16 = false,
420 .alphaToOne = true,
421 .variableMultisampleRate = false,
422 .inheritedQueries = false,
423 };
424
425 /* We can't do image stores in vec4 shaders */
426 pFeatures->vertexPipelineStoresAndAtomics =
427 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
428 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
429 }
430
431 void
432 anv_device_get_cache_uuid(void *uuid)
433 {
434 memset(uuid, 0, VK_UUID_SIZE);
435 snprintf(uuid, VK_UUID_SIZE, "anv-%s", ANV_TIMESTAMP);
436 }
437
438 void anv_GetPhysicalDeviceProperties(
439 VkPhysicalDevice physicalDevice,
440 VkPhysicalDeviceProperties* pProperties)
441 {
442 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
443 const struct brw_device_info *devinfo = pdevice->info;
444
445 const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
446
447 /* See assertions made when programming the buffer surface state. */
448 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
449 (1ul << 30) : (1ul << 27);
450
451 VkSampleCountFlags sample_counts =
452 isl_device_get_sample_counts(&pdevice->isl_dev);
453
454 VkPhysicalDeviceLimits limits = {
455 .maxImageDimension1D = (1 << 14),
456 .maxImageDimension2D = (1 << 14),
457 .maxImageDimension3D = (1 << 11),
458 .maxImageDimensionCube = (1 << 14),
459 .maxImageArrayLayers = (1 << 11),
460 .maxTexelBufferElements = 128 * 1024 * 1024,
461 .maxUniformBufferRange = (1ul << 27),
462 .maxStorageBufferRange = max_raw_buffer_sz,
463 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
464 .maxMemoryAllocationCount = UINT32_MAX,
465 .maxSamplerAllocationCount = 64 * 1024,
466 .bufferImageGranularity = 64, /* A cache line */
467 .sparseAddressSpaceSize = 0,
468 .maxBoundDescriptorSets = MAX_SETS,
469 .maxPerStageDescriptorSamplers = 64,
470 .maxPerStageDescriptorUniformBuffers = 64,
471 .maxPerStageDescriptorStorageBuffers = 64,
472 .maxPerStageDescriptorSampledImages = 64,
473 .maxPerStageDescriptorStorageImages = 64,
474 .maxPerStageDescriptorInputAttachments = 64,
475 .maxPerStageResources = 128,
476 .maxDescriptorSetSamplers = 256,
477 .maxDescriptorSetUniformBuffers = 256,
478 .maxDescriptorSetUniformBuffersDynamic = 256,
479 .maxDescriptorSetStorageBuffers = 256,
480 .maxDescriptorSetStorageBuffersDynamic = 256,
481 .maxDescriptorSetSampledImages = 256,
482 .maxDescriptorSetStorageImages = 256,
483 .maxDescriptorSetInputAttachments = 256,
484 .maxVertexInputAttributes = 32,
485 .maxVertexInputBindings = 32,
486 .maxVertexInputAttributeOffset = 2047,
487 .maxVertexInputBindingStride = 2048,
488 .maxVertexOutputComponents = 128,
489 .maxTessellationGenerationLevel = 0,
490 .maxTessellationPatchSize = 0,
491 .maxTessellationControlPerVertexInputComponents = 0,
492 .maxTessellationControlPerVertexOutputComponents = 0,
493 .maxTessellationControlPerPatchOutputComponents = 0,
494 .maxTessellationControlTotalOutputComponents = 0,
495 .maxTessellationEvaluationInputComponents = 0,
496 .maxTessellationEvaluationOutputComponents = 0,
497 .maxGeometryShaderInvocations = 32,
498 .maxGeometryInputComponents = 64,
499 .maxGeometryOutputComponents = 128,
500 .maxGeometryOutputVertices = 256,
501 .maxGeometryTotalOutputComponents = 1024,
502 .maxFragmentInputComponents = 128,
503 .maxFragmentOutputAttachments = 8,
504 .maxFragmentDualSrcAttachments = 2,
505 .maxFragmentCombinedOutputResources = 8,
506 .maxComputeSharedMemorySize = 32768,
507 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
508 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
509 .maxComputeWorkGroupSize = {
510 16 * devinfo->max_cs_threads,
511 16 * devinfo->max_cs_threads,
512 16 * devinfo->max_cs_threads,
513 },
514 .subPixelPrecisionBits = 4 /* FIXME */,
515 .subTexelPrecisionBits = 4 /* FIXME */,
516 .mipmapPrecisionBits = 4 /* FIXME */,
517 .maxDrawIndexedIndexValue = UINT32_MAX,
518 .maxDrawIndirectCount = UINT32_MAX,
519 .maxSamplerLodBias = 16,
520 .maxSamplerAnisotropy = 16,
521 .maxViewports = MAX_VIEWPORTS,
522 .maxViewportDimensions = { (1 << 14), (1 << 14) },
523 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
524 .viewportSubPixelBits = 13, /* We take a float? */
525 .minMemoryMapAlignment = 4096, /* A page */
526 .minTexelBufferOffsetAlignment = 1,
527 .minUniformBufferOffsetAlignment = 1,
528 .minStorageBufferOffsetAlignment = 1,
529 .minTexelOffset = -8,
530 .maxTexelOffset = 7,
531 .minTexelGatherOffset = -8,
532 .maxTexelGatherOffset = 7,
533 .minInterpolationOffset = -0.5,
534 .maxInterpolationOffset = 0.4375,
535 .subPixelInterpolationOffsetBits = 4,
536 .maxFramebufferWidth = (1 << 14),
537 .maxFramebufferHeight = (1 << 14),
538 .maxFramebufferLayers = (1 << 10),
539 .framebufferColorSampleCounts = sample_counts,
540 .framebufferDepthSampleCounts = sample_counts,
541 .framebufferStencilSampleCounts = sample_counts,
542 .framebufferNoAttachmentsSampleCounts = sample_counts,
543 .maxColorAttachments = MAX_RTS,
544 .sampledImageColorSampleCounts = sample_counts,
545 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
546 .sampledImageDepthSampleCounts = sample_counts,
547 .sampledImageStencilSampleCounts = sample_counts,
548 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
549 .maxSampleMaskWords = 1,
550 .timestampComputeAndGraphics = false,
551 .timestampPeriod = time_stamp_base / (1000 * 1000 * 1000),
552 .maxClipDistances = 0 /* FIXME */,
553 .maxCullDistances = 0 /* FIXME */,
554 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
555 .discreteQueuePriorities = 1,
556 .pointSizeRange = { 0.125, 255.875 },
557 .lineWidthRange = { 0.0, 7.9921875 },
558 .pointSizeGranularity = (1.0 / 8.0),
559 .lineWidthGranularity = (1.0 / 128.0),
560 .strictLines = false, /* FINISHME */
561 .standardSampleLocations = true,
562 .optimalBufferCopyOffsetAlignment = 128,
563 .optimalBufferCopyRowPitchAlignment = 128,
564 .nonCoherentAtomSize = 64,
565 };
566
567 *pProperties = (VkPhysicalDeviceProperties) {
568 .apiVersion = VK_MAKE_VERSION(1, 0, 5),
569 .driverVersion = 1,
570 .vendorID = 0x8086,
571 .deviceID = pdevice->chipset_id,
572 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
573 .limits = limits,
574 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
575 };
576
577 strcpy(pProperties->deviceName, pdevice->name);
578 anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
579 }
580
581 void anv_GetPhysicalDeviceQueueFamilyProperties(
582 VkPhysicalDevice physicalDevice,
583 uint32_t* pCount,
584 VkQueueFamilyProperties* pQueueFamilyProperties)
585 {
586 if (pQueueFamilyProperties == NULL) {
587 *pCount = 1;
588 return;
589 }
590
591 assert(*pCount >= 1);
592
593 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
594 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
595 VK_QUEUE_COMPUTE_BIT |
596 VK_QUEUE_TRANSFER_BIT,
597 .queueCount = 1,
598 .timestampValidBits = 36, /* XXX: Real value here */
599 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
600 };
601 }
602
603 void anv_GetPhysicalDeviceMemoryProperties(
604 VkPhysicalDevice physicalDevice,
605 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
606 {
607 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
608 VkDeviceSize heap_size;
609
610 /* Reserve some wiggle room for the driver by exposing only 75% of the
611 * aperture to the heap.
612 */
613 heap_size = 3 * physical_device->aperture_size / 4;
614
615 if (physical_device->info->has_llc) {
616 /* Big core GPUs share LLC with the CPU and thus one memory type can be
617 * both cached and coherent at the same time.
618 */
619 pMemoryProperties->memoryTypeCount = 1;
620 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
621 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
622 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
623 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
624 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
625 .heapIndex = 0,
626 };
627 } else {
628 /* The spec requires that we expose a host-visible, coherent memory
629 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
630 * to give the application a choice between cached, but not coherent and
631 * coherent but uncached (WC though).
632 */
633 pMemoryProperties->memoryTypeCount = 2;
634 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
635 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
636 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
637 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
638 .heapIndex = 0,
639 };
640 pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
641 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
642 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
643 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
644 .heapIndex = 0,
645 };
646 }
647
648 pMemoryProperties->memoryHeapCount = 1;
649 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
650 .size = heap_size,
651 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
652 };
653 }
654
655 PFN_vkVoidFunction anv_GetInstanceProcAddr(
656 VkInstance instance,
657 const char* pName)
658 {
659 return anv_lookup_entrypoint(pName);
660 }
661
662 /* With version 1+ of the loader interface the ICD should expose
663 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
664 */
665 PUBLIC
666 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
667 VkInstance instance,
668 const char* pName);
669
670 PUBLIC
671 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
672 VkInstance instance,
673 const char* pName)
674 {
675 return anv_GetInstanceProcAddr(instance, pName);
676 }
677
678 PFN_vkVoidFunction anv_GetDeviceProcAddr(
679 VkDevice device,
680 const char* pName)
681 {
682 return anv_lookup_entrypoint(pName);
683 }
684
685 static VkResult
686 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
687 {
688 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
689 queue->device = device;
690 queue->pool = &device->surface_state_pool;
691
692 return VK_SUCCESS;
693 }
694
695 static void
696 anv_queue_finish(struct anv_queue *queue)
697 {
698 }
699
700 static struct anv_state
701 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
702 {
703 struct anv_state state;
704
705 state = anv_state_pool_alloc(pool, size, align);
706 memcpy(state.map, p, size);
707
708 if (!pool->block_pool->device->info.has_llc)
709 anv_state_clflush(state);
710
711 return state;
712 }
713
714 struct gen8_border_color {
715 union {
716 float float32[4];
717 uint32_t uint32[4];
718 };
719 /* Pad out to 64 bytes */
720 uint32_t _pad[12];
721 };
722
723 static void
724 anv_device_init_border_colors(struct anv_device *device)
725 {
726 static const struct gen8_border_color border_colors[] = {
727 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
728 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
729 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
730 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
731 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
732 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
733 };
734
735 device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
736 sizeof(border_colors), 64,
737 border_colors);
738 }
739
740 VkResult
741 anv_device_submit_simple_batch(struct anv_device *device,
742 struct anv_batch *batch)
743 {
744 struct drm_i915_gem_execbuffer2 execbuf;
745 struct drm_i915_gem_exec_object2 exec2_objects[1];
746 struct anv_bo bo;
747 VkResult result = VK_SUCCESS;
748 uint32_t size;
749 int64_t timeout;
750 int ret;
751
752 /* Kernel driver requires 8 byte aligned batch length */
753 size = align_u32(batch->next - batch->start, 8);
754 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
755 if (result != VK_SUCCESS)
756 return result;
757
758 memcpy(bo.map, batch->start, size);
759 if (!device->info.has_llc)
760 anv_clflush_range(bo.map, size);
761
762 exec2_objects[0].handle = bo.gem_handle;
763 exec2_objects[0].relocation_count = 0;
764 exec2_objects[0].relocs_ptr = 0;
765 exec2_objects[0].alignment = 0;
766 exec2_objects[0].offset = bo.offset;
767 exec2_objects[0].flags = 0;
768 exec2_objects[0].rsvd1 = 0;
769 exec2_objects[0].rsvd2 = 0;
770
771 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
772 execbuf.buffer_count = 1;
773 execbuf.batch_start_offset = 0;
774 execbuf.batch_len = size;
775 execbuf.cliprects_ptr = 0;
776 execbuf.num_cliprects = 0;
777 execbuf.DR1 = 0;
778 execbuf.DR4 = 0;
779
780 execbuf.flags =
781 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
782 execbuf.rsvd1 = device->context_id;
783 execbuf.rsvd2 = 0;
784
785 ret = anv_gem_execbuffer(device, &execbuf);
786 if (ret != 0) {
787 /* We don't know the real error. */
788 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
789 goto fail;
790 }
791
792 timeout = INT64_MAX;
793 ret = anv_gem_wait(device, bo.gem_handle, &timeout);
794 if (ret != 0) {
795 /* We don't know the real error. */
796 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
797 goto fail;
798 }
799
800 fail:
801 anv_bo_pool_free(&device->batch_bo_pool, &bo);
802
803 return result;
804 }
805
806 VkResult anv_CreateDevice(
807 VkPhysicalDevice physicalDevice,
808 const VkDeviceCreateInfo* pCreateInfo,
809 const VkAllocationCallbacks* pAllocator,
810 VkDevice* pDevice)
811 {
812 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
813 VkResult result;
814 struct anv_device *device;
815
816 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
817
818 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
819 bool found = false;
820 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
821 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
822 device_extensions[j].extensionName) == 0) {
823 found = true;
824 break;
825 }
826 }
827 if (!found)
828 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
829 }
830
831 anv_set_dispatch_devinfo(physical_device->info);
832
833 device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
834 sizeof(*device), 8,
835 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
836 if (!device)
837 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
838
839 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
840 device->instance = physical_device->instance;
841 device->chipset_id = physical_device->chipset_id;
842
843 if (pAllocator)
844 device->alloc = *pAllocator;
845 else
846 device->alloc = physical_device->instance->alloc;
847
848 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
849 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
850 if (device->fd == -1) {
851 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
852 goto fail_device;
853 }
854
855 device->context_id = anv_gem_create_context(device);
856 if (device->context_id == -1) {
857 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
858 goto fail_fd;
859 }
860
861 device->info = *physical_device->info;
862 device->isl_dev = physical_device->isl_dev;
863
864 /* On Broadwell and later, we can use batch chaining to more efficiently
865 * implement growing command buffers. Prior to Haswell, the kernel
866 * command parser gets in the way and we have to fall back to growing
867 * the batch.
868 */
869 device->can_chain_batches = device->info.gen >= 8;
870
871 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
872 pCreateInfo->pEnabledFeatures->robustBufferAccess;
873
874 pthread_mutex_init(&device->mutex, NULL);
875
876 anv_bo_pool_init(&device->batch_bo_pool, device);
877
878 anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
879
880 anv_state_pool_init(&device->dynamic_state_pool,
881 &device->dynamic_state_block_pool);
882
883 anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
884 anv_pipeline_cache_init(&device->default_pipeline_cache, device);
885
886 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
887
888 anv_state_pool_init(&device->surface_state_pool,
889 &device->surface_state_block_pool);
890
891 anv_bo_init_new(&device->workaround_bo, device, 1024);
892
893 anv_scratch_pool_init(device, &device->scratch_pool);
894
895 anv_queue_init(device, &device->queue);
896
897 switch (device->info.gen) {
898 case 7:
899 if (!device->info.is_haswell)
900 result = gen7_init_device_state(device);
901 else
902 result = gen75_init_device_state(device);
903 break;
904 case 8:
905 result = gen8_init_device_state(device);
906 break;
907 case 9:
908 result = gen9_init_device_state(device);
909 break;
910 default:
911 /* Shouldn't get here as we don't create physical devices for any other
912 * gens. */
913 unreachable("unhandled gen");
914 }
915 if (result != VK_SUCCESS)
916 goto fail_fd;
917
918 result = anv_device_init_meta(device);
919 if (result != VK_SUCCESS)
920 goto fail_fd;
921
922 anv_device_init_border_colors(device);
923
924 *pDevice = anv_device_to_handle(device);
925
926 return VK_SUCCESS;
927
928 fail_fd:
929 close(device->fd);
930 fail_device:
931 anv_free(&device->alloc, device);
932
933 return result;
934 }
935
936 void anv_DestroyDevice(
937 VkDevice _device,
938 const VkAllocationCallbacks* pAllocator)
939 {
940 ANV_FROM_HANDLE(anv_device, device, _device);
941
942 anv_queue_finish(&device->queue);
943
944 anv_device_finish_meta(device);
945
946 #ifdef HAVE_VALGRIND
947 /* We only need to free these to prevent valgrind errors. The backing
948 * BO will go away in a couple of lines so we don't actually leak.
949 */
950 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
951 #endif
952
953 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
954 anv_gem_close(device, device->workaround_bo.gem_handle);
955
956 anv_bo_pool_finish(&device->batch_bo_pool);
957 anv_state_pool_finish(&device->dynamic_state_pool);
958 anv_block_pool_finish(&device->dynamic_state_block_pool);
959 anv_block_pool_finish(&device->instruction_block_pool);
960 anv_state_pool_finish(&device->surface_state_pool);
961 anv_block_pool_finish(&device->surface_state_block_pool);
962 anv_scratch_pool_finish(device, &device->scratch_pool);
963
964 close(device->fd);
965
966 pthread_mutex_destroy(&device->mutex);
967
968 anv_free(&device->alloc, device);
969 }
970
971 VkResult anv_EnumerateInstanceExtensionProperties(
972 const char* pLayerName,
973 uint32_t* pPropertyCount,
974 VkExtensionProperties* pProperties)
975 {
976 if (pProperties == NULL) {
977 *pPropertyCount = ARRAY_SIZE(global_extensions);
978 return VK_SUCCESS;
979 }
980
981 assert(*pPropertyCount >= ARRAY_SIZE(global_extensions));
982
983 *pPropertyCount = ARRAY_SIZE(global_extensions);
984 memcpy(pProperties, global_extensions, sizeof(global_extensions));
985
986 return VK_SUCCESS;
987 }
988
989 VkResult anv_EnumerateDeviceExtensionProperties(
990 VkPhysicalDevice physicalDevice,
991 const char* pLayerName,
992 uint32_t* pPropertyCount,
993 VkExtensionProperties* pProperties)
994 {
995 if (pProperties == NULL) {
996 *pPropertyCount = ARRAY_SIZE(device_extensions);
997 return VK_SUCCESS;
998 }
999
1000 assert(*pPropertyCount >= ARRAY_SIZE(device_extensions));
1001
1002 *pPropertyCount = ARRAY_SIZE(device_extensions);
1003 memcpy(pProperties, device_extensions, sizeof(device_extensions));
1004
1005 return VK_SUCCESS;
1006 }
1007
1008 VkResult anv_EnumerateInstanceLayerProperties(
1009 uint32_t* pPropertyCount,
1010 VkLayerProperties* pProperties)
1011 {
1012 if (pProperties == NULL) {
1013 *pPropertyCount = 0;
1014 return VK_SUCCESS;
1015 }
1016
1017 /* None supported at this time */
1018 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1019 }
1020
1021 VkResult anv_EnumerateDeviceLayerProperties(
1022 VkPhysicalDevice physicalDevice,
1023 uint32_t* pPropertyCount,
1024 VkLayerProperties* pProperties)
1025 {
1026 if (pProperties == NULL) {
1027 *pPropertyCount = 0;
1028 return VK_SUCCESS;
1029 }
1030
1031 /* None supported at this time */
1032 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1033 }
1034
1035 void anv_GetDeviceQueue(
1036 VkDevice _device,
1037 uint32_t queueNodeIndex,
1038 uint32_t queueIndex,
1039 VkQueue* pQueue)
1040 {
1041 ANV_FROM_HANDLE(anv_device, device, _device);
1042
1043 assert(queueIndex == 0);
1044
1045 *pQueue = anv_queue_to_handle(&device->queue);
1046 }
1047
1048 VkResult anv_QueueSubmit(
1049 VkQueue _queue,
1050 uint32_t submitCount,
1051 const VkSubmitInfo* pSubmits,
1052 VkFence _fence)
1053 {
1054 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1055 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1056 struct anv_device *device = queue->device;
1057 int ret;
1058
1059 for (uint32_t i = 0; i < submitCount; i++) {
1060 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1061 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1062 pSubmits[i].pCommandBuffers[j]);
1063 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1064
1065 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
1066 if (ret != 0) {
1067 /* We don't know the real error. */
1068 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1069 "execbuf2 failed: %m");
1070 }
1071
1072 for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
1073 cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
1074 }
1075 }
1076
1077 if (fence) {
1078 ret = anv_gem_execbuffer(device, &fence->execbuf);
1079 if (ret != 0) {
1080 /* We don't know the real error. */
1081 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1082 "execbuf2 failed: %m");
1083 }
1084 }
1085
1086 return VK_SUCCESS;
1087 }
1088
1089 VkResult anv_QueueWaitIdle(
1090 VkQueue _queue)
1091 {
1092 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1093
1094 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
1095 }
1096
1097 VkResult anv_DeviceWaitIdle(
1098 VkDevice _device)
1099 {
1100 ANV_FROM_HANDLE(anv_device, device, _device);
1101 struct anv_batch batch;
1102
1103 uint32_t cmds[8];
1104 batch.start = batch.next = cmds;
1105 batch.end = (void *) cmds + sizeof(cmds);
1106
1107 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1108 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1109
1110 return anv_device_submit_simple_batch(device, &batch);
1111 }
1112
1113 VkResult
1114 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1115 {
1116 bo->gem_handle = anv_gem_create(device, size);
1117 if (!bo->gem_handle)
1118 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1119
1120 bo->map = NULL;
1121 bo->index = 0;
1122 bo->offset = 0;
1123 bo->size = size;
1124 bo->is_winsys_bo = false;
1125
1126 return VK_SUCCESS;
1127 }
1128
1129 VkResult anv_AllocateMemory(
1130 VkDevice _device,
1131 const VkMemoryAllocateInfo* pAllocateInfo,
1132 const VkAllocationCallbacks* pAllocator,
1133 VkDeviceMemory* pMem)
1134 {
1135 ANV_FROM_HANDLE(anv_device, device, _device);
1136 struct anv_device_memory *mem;
1137 VkResult result;
1138
1139 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1140
1141 if (pAllocateInfo->allocationSize == 0) {
1142 /* Apparently, this is allowed */
1143 *pMem = VK_NULL_HANDLE;
1144 return VK_SUCCESS;
1145 }
1146
1147 /* We support exactly one memory heap. */
1148 assert(pAllocateInfo->memoryTypeIndex == 0 ||
1149 (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
1150
1151 /* FINISHME: Fail if allocation request exceeds heap size. */
1152
1153 mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1154 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1155 if (mem == NULL)
1156 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1157
1158 /* The kernel is going to give us whole pages anyway */
1159 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1160
1161 result = anv_bo_init_new(&mem->bo, device, alloc_size);
1162 if (result != VK_SUCCESS)
1163 goto fail;
1164
1165 mem->type_index = pAllocateInfo->memoryTypeIndex;
1166
1167 *pMem = anv_device_memory_to_handle(mem);
1168
1169 return VK_SUCCESS;
1170
1171 fail:
1172 anv_free2(&device->alloc, pAllocator, mem);
1173
1174 return result;
1175 }
1176
1177 void anv_FreeMemory(
1178 VkDevice _device,
1179 VkDeviceMemory _mem,
1180 const VkAllocationCallbacks* pAllocator)
1181 {
1182 ANV_FROM_HANDLE(anv_device, device, _device);
1183 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1184
1185 if (mem == NULL)
1186 return;
1187
1188 if (mem->bo.map)
1189 anv_gem_munmap(mem->bo.map, mem->bo.size);
1190
1191 if (mem->bo.gem_handle != 0)
1192 anv_gem_close(device, mem->bo.gem_handle);
1193
1194 anv_free2(&device->alloc, pAllocator, mem);
1195 }
1196
1197 VkResult anv_MapMemory(
1198 VkDevice _device,
1199 VkDeviceMemory _memory,
1200 VkDeviceSize offset,
1201 VkDeviceSize size,
1202 VkMemoryMapFlags flags,
1203 void** ppData)
1204 {
1205 ANV_FROM_HANDLE(anv_device, device, _device);
1206 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1207
1208 if (mem == NULL) {
1209 *ppData = NULL;
1210 return VK_SUCCESS;
1211 }
1212
1213 if (size == VK_WHOLE_SIZE)
1214 size = mem->bo.size - offset;
1215
1216 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1217 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1218 * at a time is valid. We could just mmap up front and return an offset
1219 * pointer here, but that may exhaust virtual memory on 32 bit
1220 * userspace. */
1221
1222 uint32_t gem_flags = 0;
1223 if (!device->info.has_llc && mem->type_index == 0)
1224 gem_flags |= I915_MMAP_WC;
1225
1226 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
1227 uint64_t map_offset = offset & ~4095ull;
1228 assert(offset >= map_offset);
1229 uint64_t map_size = (offset + size) - map_offset;
1230
1231 /* Let's map whole pages */
1232 map_size = align_u64(map_size, 4096);
1233
1234 mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
1235 map_offset, map_size, gem_flags);
1236 mem->map_size = map_size;
1237
1238 *ppData = mem->map + (offset - map_offset);
1239
1240 return VK_SUCCESS;
1241 }
1242
1243 void anv_UnmapMemory(
1244 VkDevice _device,
1245 VkDeviceMemory _memory)
1246 {
1247 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1248
1249 if (mem == NULL)
1250 return;
1251
1252 anv_gem_munmap(mem->map, mem->map_size);
1253 }
1254
1255 static void
1256 clflush_mapped_ranges(struct anv_device *device,
1257 uint32_t count,
1258 const VkMappedMemoryRange *ranges)
1259 {
1260 for (uint32_t i = 0; i < count; i++) {
1261 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
1262 void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
1263 void *end;
1264
1265 if (ranges[i].offset + ranges[i].size > mem->map_size)
1266 end = mem->map + mem->map_size;
1267 else
1268 end = mem->map + ranges[i].offset + ranges[i].size;
1269
1270 while (p < end) {
1271 __builtin_ia32_clflush(p);
1272 p += CACHELINE_SIZE;
1273 }
1274 }
1275 }
1276
1277 VkResult anv_FlushMappedMemoryRanges(
1278 VkDevice _device,
1279 uint32_t memoryRangeCount,
1280 const VkMappedMemoryRange* pMemoryRanges)
1281 {
1282 ANV_FROM_HANDLE(anv_device, device, _device);
1283
1284 if (device->info.has_llc)
1285 return VK_SUCCESS;
1286
1287 /* Make sure the writes we're flushing have landed. */
1288 __builtin_ia32_mfence();
1289
1290 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1291
1292 return VK_SUCCESS;
1293 }
1294
1295 VkResult anv_InvalidateMappedMemoryRanges(
1296 VkDevice _device,
1297 uint32_t memoryRangeCount,
1298 const VkMappedMemoryRange* pMemoryRanges)
1299 {
1300 ANV_FROM_HANDLE(anv_device, device, _device);
1301
1302 if (device->info.has_llc)
1303 return VK_SUCCESS;
1304
1305 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1306
1307 /* Make sure no reads get moved up above the invalidate. */
1308 __builtin_ia32_mfence();
1309
1310 return VK_SUCCESS;
1311 }
1312
1313 void anv_GetBufferMemoryRequirements(
1314 VkDevice device,
1315 VkBuffer _buffer,
1316 VkMemoryRequirements* pMemoryRequirements)
1317 {
1318 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1319
1320 /* The Vulkan spec (git aaed022) says:
1321 *
1322 * memoryTypeBits is a bitfield and contains one bit set for every
1323 * supported memory type for the resource. The bit `1<<i` is set if and
1324 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1325 * structure for the physical device is supported.
1326 *
1327 * We support exactly one memory type.
1328 */
1329 pMemoryRequirements->memoryTypeBits = 1;
1330
1331 pMemoryRequirements->size = buffer->size;
1332 pMemoryRequirements->alignment = 16;
1333 }
1334
1335 void anv_GetImageMemoryRequirements(
1336 VkDevice device,
1337 VkImage _image,
1338 VkMemoryRequirements* pMemoryRequirements)
1339 {
1340 ANV_FROM_HANDLE(anv_image, image, _image);
1341
1342 /* The Vulkan spec (git aaed022) says:
1343 *
1344 * memoryTypeBits is a bitfield and contains one bit set for every
1345 * supported memory type for the resource. The bit `1<<i` is set if and
1346 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1347 * structure for the physical device is supported.
1348 *
1349 * We support exactly one memory type.
1350 */
1351 pMemoryRequirements->memoryTypeBits = 1;
1352
1353 pMemoryRequirements->size = image->size;
1354 pMemoryRequirements->alignment = image->alignment;
1355 }
1356
1357 void anv_GetImageSparseMemoryRequirements(
1358 VkDevice device,
1359 VkImage image,
1360 uint32_t* pSparseMemoryRequirementCount,
1361 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1362 {
1363 stub();
1364 }
1365
1366 void anv_GetDeviceMemoryCommitment(
1367 VkDevice device,
1368 VkDeviceMemory memory,
1369 VkDeviceSize* pCommittedMemoryInBytes)
1370 {
1371 *pCommittedMemoryInBytes = 0;
1372 }
1373
1374 VkResult anv_BindBufferMemory(
1375 VkDevice device,
1376 VkBuffer _buffer,
1377 VkDeviceMemory _memory,
1378 VkDeviceSize memoryOffset)
1379 {
1380 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1381 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1382
1383 if (mem) {
1384 buffer->bo = &mem->bo;
1385 buffer->offset = memoryOffset;
1386 } else {
1387 buffer->bo = NULL;
1388 buffer->offset = 0;
1389 }
1390
1391 return VK_SUCCESS;
1392 }
1393
1394 VkResult anv_BindImageMemory(
1395 VkDevice device,
1396 VkImage _image,
1397 VkDeviceMemory _memory,
1398 VkDeviceSize memoryOffset)
1399 {
1400 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1401 ANV_FROM_HANDLE(anv_image, image, _image);
1402
1403 if (mem) {
1404 image->bo = &mem->bo;
1405 image->offset = memoryOffset;
1406 } else {
1407 image->bo = NULL;
1408 image->offset = 0;
1409 }
1410
1411 return VK_SUCCESS;
1412 }
1413
1414 VkResult anv_QueueBindSparse(
1415 VkQueue queue,
1416 uint32_t bindInfoCount,
1417 const VkBindSparseInfo* pBindInfo,
1418 VkFence fence)
1419 {
1420 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1421 }
1422
1423 VkResult anv_CreateFence(
1424 VkDevice _device,
1425 const VkFenceCreateInfo* pCreateInfo,
1426 const VkAllocationCallbacks* pAllocator,
1427 VkFence* pFence)
1428 {
1429 ANV_FROM_HANDLE(anv_device, device, _device);
1430 struct anv_bo fence_bo;
1431 struct anv_fence *fence;
1432 struct anv_batch batch;
1433 VkResult result;
1434
1435 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1436
1437 result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
1438 if (result != VK_SUCCESS)
1439 return result;
1440
1441 /* Fences are small. Just store the CPU data structure in the BO. */
1442 fence = fence_bo.map;
1443 fence->bo = fence_bo;
1444
1445 /* Place the batch after the CPU data but on its own cache line. */
1446 const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
1447 batch.next = batch.start = fence->bo.map + batch_offset;
1448 batch.end = fence->bo.map + fence->bo.size;
1449 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1450 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1451
1452 if (!device->info.has_llc) {
1453 assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
1454 assert(batch.next - batch.start <= CACHELINE_SIZE);
1455 __builtin_ia32_mfence();
1456 __builtin_ia32_clflush(batch.start);
1457 }
1458
1459 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1460 fence->exec2_objects[0].relocation_count = 0;
1461 fence->exec2_objects[0].relocs_ptr = 0;
1462 fence->exec2_objects[0].alignment = 0;
1463 fence->exec2_objects[0].offset = fence->bo.offset;
1464 fence->exec2_objects[0].flags = 0;
1465 fence->exec2_objects[0].rsvd1 = 0;
1466 fence->exec2_objects[0].rsvd2 = 0;
1467
1468 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1469 fence->execbuf.buffer_count = 1;
1470 fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
1471 fence->execbuf.batch_len = batch.next - batch.start;
1472 fence->execbuf.cliprects_ptr = 0;
1473 fence->execbuf.num_cliprects = 0;
1474 fence->execbuf.DR1 = 0;
1475 fence->execbuf.DR4 = 0;
1476
1477 fence->execbuf.flags =
1478 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1479 fence->execbuf.rsvd1 = device->context_id;
1480 fence->execbuf.rsvd2 = 0;
1481
1482 fence->ready = false;
1483
1484 *pFence = anv_fence_to_handle(fence);
1485
1486 return VK_SUCCESS;
1487 }
1488
1489 void anv_DestroyFence(
1490 VkDevice _device,
1491 VkFence _fence,
1492 const VkAllocationCallbacks* pAllocator)
1493 {
1494 ANV_FROM_HANDLE(anv_device, device, _device);
1495 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1496
1497 assert(fence->bo.map == fence);
1498 anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
1499 }
1500
1501 VkResult anv_ResetFences(
1502 VkDevice _device,
1503 uint32_t fenceCount,
1504 const VkFence* pFences)
1505 {
1506 for (uint32_t i = 0; i < fenceCount; i++) {
1507 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1508 fence->ready = false;
1509 }
1510
1511 return VK_SUCCESS;
1512 }
1513
1514 VkResult anv_GetFenceStatus(
1515 VkDevice _device,
1516 VkFence _fence)
1517 {
1518 ANV_FROM_HANDLE(anv_device, device, _device);
1519 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1520 int64_t t = 0;
1521 int ret;
1522
1523 if (fence->ready)
1524 return VK_SUCCESS;
1525
1526 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1527 if (ret == 0) {
1528 fence->ready = true;
1529 return VK_SUCCESS;
1530 }
1531
1532 return VK_NOT_READY;
1533 }
1534
1535 VkResult anv_WaitForFences(
1536 VkDevice _device,
1537 uint32_t fenceCount,
1538 const VkFence* pFences,
1539 VkBool32 waitAll,
1540 uint64_t timeout)
1541 {
1542 ANV_FROM_HANDLE(anv_device, device, _device);
1543
1544 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1545 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1546 * for a couple of kernel releases. Since there's no way to know
1547 * whether or not the kernel we're using is one of the broken ones, the
1548 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1549 * maximum timeout from 584 years to 292 years - likely not a big deal.
1550 */
1551 if (timeout > INT64_MAX)
1552 timeout = INT64_MAX;
1553
1554 int64_t t = timeout;
1555
1556 /* FIXME: handle !waitAll */
1557
1558 for (uint32_t i = 0; i < fenceCount; i++) {
1559 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1560 int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1561 if (ret == -1 && errno == ETIME) {
1562 return VK_TIMEOUT;
1563 } else if (ret == -1) {
1564 /* We don't know the real error. */
1565 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1566 "gem wait failed: %m");
1567 }
1568 }
1569
1570 return VK_SUCCESS;
1571 }
1572
1573 // Queue semaphore functions
1574
1575 VkResult anv_CreateSemaphore(
1576 VkDevice device,
1577 const VkSemaphoreCreateInfo* pCreateInfo,
1578 const VkAllocationCallbacks* pAllocator,
1579 VkSemaphore* pSemaphore)
1580 {
1581 /* The DRM execbuffer ioctl always execute in-oder, even between different
1582 * rings. As such, there's nothing to do for the user space semaphore.
1583 */
1584
1585 *pSemaphore = (VkSemaphore)1;
1586
1587 return VK_SUCCESS;
1588 }
1589
1590 void anv_DestroySemaphore(
1591 VkDevice device,
1592 VkSemaphore semaphore,
1593 const VkAllocationCallbacks* pAllocator)
1594 {
1595 }
1596
1597 // Event functions
1598
1599 VkResult anv_CreateEvent(
1600 VkDevice _device,
1601 const VkEventCreateInfo* pCreateInfo,
1602 const VkAllocationCallbacks* pAllocator,
1603 VkEvent* pEvent)
1604 {
1605 ANV_FROM_HANDLE(anv_device, device, _device);
1606 struct anv_state state;
1607 struct anv_event *event;
1608
1609 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
1610
1611 state = anv_state_pool_alloc(&device->dynamic_state_pool,
1612 sizeof(*event), 8);
1613 event = state.map;
1614 event->state = state;
1615 event->semaphore = VK_EVENT_RESET;
1616
1617 if (!device->info.has_llc) {
1618 /* Make sure the writes we're flushing have landed. */
1619 __builtin_ia32_mfence();
1620 __builtin_ia32_clflush(event);
1621 }
1622
1623 *pEvent = anv_event_to_handle(event);
1624
1625 return VK_SUCCESS;
1626 }
1627
1628 void anv_DestroyEvent(
1629 VkDevice _device,
1630 VkEvent _event,
1631 const VkAllocationCallbacks* pAllocator)
1632 {
1633 ANV_FROM_HANDLE(anv_device, device, _device);
1634 ANV_FROM_HANDLE(anv_event, event, _event);
1635
1636 anv_state_pool_free(&device->dynamic_state_pool, event->state);
1637 }
1638
1639 VkResult anv_GetEventStatus(
1640 VkDevice _device,
1641 VkEvent _event)
1642 {
1643 ANV_FROM_HANDLE(anv_device, device, _device);
1644 ANV_FROM_HANDLE(anv_event, event, _event);
1645
1646 if (!device->info.has_llc) {
1647 /* Invalidate read cache before reading event written by GPU. */
1648 __builtin_ia32_clflush(event);
1649 __builtin_ia32_mfence();
1650
1651 }
1652
1653 return event->semaphore;
1654 }
1655
1656 VkResult anv_SetEvent(
1657 VkDevice _device,
1658 VkEvent _event)
1659 {
1660 ANV_FROM_HANDLE(anv_device, device, _device);
1661 ANV_FROM_HANDLE(anv_event, event, _event);
1662
1663 event->semaphore = VK_EVENT_SET;
1664
1665 if (!device->info.has_llc) {
1666 /* Make sure the writes we're flushing have landed. */
1667 __builtin_ia32_mfence();
1668 __builtin_ia32_clflush(event);
1669 }
1670
1671 return VK_SUCCESS;
1672 }
1673
1674 VkResult anv_ResetEvent(
1675 VkDevice _device,
1676 VkEvent _event)
1677 {
1678 ANV_FROM_HANDLE(anv_device, device, _device);
1679 ANV_FROM_HANDLE(anv_event, event, _event);
1680
1681 event->semaphore = VK_EVENT_RESET;
1682
1683 if (!device->info.has_llc) {
1684 /* Make sure the writes we're flushing have landed. */
1685 __builtin_ia32_mfence();
1686 __builtin_ia32_clflush(event);
1687 }
1688
1689 return VK_SUCCESS;
1690 }
1691
1692 // Buffer functions
1693
1694 VkResult anv_CreateBuffer(
1695 VkDevice _device,
1696 const VkBufferCreateInfo* pCreateInfo,
1697 const VkAllocationCallbacks* pAllocator,
1698 VkBuffer* pBuffer)
1699 {
1700 ANV_FROM_HANDLE(anv_device, device, _device);
1701 struct anv_buffer *buffer;
1702
1703 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1704
1705 buffer = anv_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1706 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1707 if (buffer == NULL)
1708 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1709
1710 buffer->size = pCreateInfo->size;
1711 buffer->usage = pCreateInfo->usage;
1712 buffer->bo = NULL;
1713 buffer->offset = 0;
1714
1715 *pBuffer = anv_buffer_to_handle(buffer);
1716
1717 return VK_SUCCESS;
1718 }
1719
1720 void anv_DestroyBuffer(
1721 VkDevice _device,
1722 VkBuffer _buffer,
1723 const VkAllocationCallbacks* pAllocator)
1724 {
1725 ANV_FROM_HANDLE(anv_device, device, _device);
1726 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1727
1728 anv_free2(&device->alloc, pAllocator, buffer);
1729 }
1730
1731 void
1732 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
1733 enum isl_format format,
1734 uint32_t offset, uint32_t range, uint32_t stride)
1735 {
1736 isl_buffer_fill_state(&device->isl_dev, state.map,
1737 .address = offset,
1738 .mocs = device->default_mocs,
1739 .size = range,
1740 .format = format,
1741 .stride = stride);
1742
1743 if (!device->info.has_llc)
1744 anv_state_clflush(state);
1745 }
1746
1747 void anv_DestroySampler(
1748 VkDevice _device,
1749 VkSampler _sampler,
1750 const VkAllocationCallbacks* pAllocator)
1751 {
1752 ANV_FROM_HANDLE(anv_device, device, _device);
1753 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1754
1755 anv_free2(&device->alloc, pAllocator, sampler);
1756 }
1757
1758 VkResult anv_CreateFramebuffer(
1759 VkDevice _device,
1760 const VkFramebufferCreateInfo* pCreateInfo,
1761 const VkAllocationCallbacks* pAllocator,
1762 VkFramebuffer* pFramebuffer)
1763 {
1764 ANV_FROM_HANDLE(anv_device, device, _device);
1765 struct anv_framebuffer *framebuffer;
1766
1767 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1768
1769 size_t size = sizeof(*framebuffer) +
1770 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1771 framebuffer = anv_alloc2(&device->alloc, pAllocator, size, 8,
1772 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1773 if (framebuffer == NULL)
1774 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1775
1776 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1777 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1778 VkImageView _iview = pCreateInfo->pAttachments[i];
1779 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1780 }
1781
1782 framebuffer->width = pCreateInfo->width;
1783 framebuffer->height = pCreateInfo->height;
1784 framebuffer->layers = pCreateInfo->layers;
1785
1786 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1787
1788 return VK_SUCCESS;
1789 }
1790
1791 void anv_DestroyFramebuffer(
1792 VkDevice _device,
1793 VkFramebuffer _fb,
1794 const VkAllocationCallbacks* pAllocator)
1795 {
1796 ANV_FROM_HANDLE(anv_device, device, _device);
1797 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1798
1799 anv_free2(&device->alloc, pAllocator, fb);
1800 }