vk/device: Make reloc lists growable
[mesa.git] / src / vulkan / device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 static int
33 anv_env_get_int(const char *name)
34 {
35 const char *val = getenv(name);
36
37 if (!val)
38 return 0;
39
40 return strtol(val, NULL, 0);
41 }
42
43 static VkResult
44 fill_physical_device(struct anv_physical_device *device,
45 struct anv_instance *instance,
46 const char *path)
47 {
48 int fd;
49
50 fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
51 if (fd < 0)
52 return vk_error(VK_ERROR_UNAVAILABLE);
53
54 device->instance = instance;
55 device->path = path;
56
57 device->chipset_id = anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device->no_hw = false;
59 if (device->chipset_id) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
61 device->no_hw = true;
62 } else {
63 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
64 }
65 if (!device->chipset_id)
66 goto fail;
67
68 device->name = brw_get_device_name(device->chipset_id);
69 device->info = brw_get_device_info(device->chipset_id, -1);
70 if (!device->info)
71 goto fail;
72
73 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
74 goto fail;
75
76 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
77 goto fail;
78
79 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
80 goto fail;
81
82 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
83 goto fail;
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91
92 return vk_error(VK_ERROR_UNAVAILABLE);
93 }
94
95 static void *default_alloc(
96 void* pUserData,
97 size_t size,
98 size_t alignment,
99 VkSystemAllocType allocType)
100 {
101 return malloc(size);
102 }
103
104 static void default_free(
105 void* pUserData,
106 void* pMem)
107 {
108 free(pMem);
109 }
110
111 static const VkAllocCallbacks default_alloc_callbacks = {
112 .pUserData = NULL,
113 .pfnAlloc = default_alloc,
114 .pfnFree = default_free
115 };
116
117 VkResult anv_CreateInstance(
118 const VkInstanceCreateInfo* pCreateInfo,
119 VkInstance* pInstance)
120 {
121 struct anv_instance *instance;
122 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
123 void *user_data = NULL;
124 VkResult result;
125
126 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
127
128 if (pCreateInfo->pAllocCb) {
129 alloc_callbacks = pCreateInfo->pAllocCb;
130 user_data = pCreateInfo->pAllocCb->pUserData;
131 }
132 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
134 if (!instance)
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
136
137 instance->pAllocUserData = alloc_callbacks->pUserData;
138 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
139 instance->pfnFree = alloc_callbacks->pfnFree;
140 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
141
142 instance->physicalDeviceCount = 0;
143 result = fill_physical_device(&instance->physicalDevice,
144 instance, "/dev/dri/renderD128");
145
146 if (result != VK_SUCCESS)
147 return result;
148
149 instance->physicalDeviceCount++;
150 *pInstance = (VkInstance) instance;
151
152 return VK_SUCCESS;
153 }
154
155 VkResult anv_DestroyInstance(
156 VkInstance _instance)
157 {
158 struct anv_instance *instance = (struct anv_instance *) _instance;
159
160 instance->pfnFree(instance->pAllocUserData, instance);
161
162 return VK_SUCCESS;
163 }
164
165 VkResult anv_EnumeratePhysicalDevices(
166 VkInstance _instance,
167 uint32_t* pPhysicalDeviceCount,
168 VkPhysicalDevice* pPhysicalDevices)
169 {
170 struct anv_instance *instance = (struct anv_instance *) _instance;
171
172 if (*pPhysicalDeviceCount >= 1)
173 pPhysicalDevices[0] = (VkPhysicalDevice) &instance->physicalDevice;
174 *pPhysicalDeviceCount = instance->physicalDeviceCount;
175
176 return VK_SUCCESS;
177 }
178
179 VkResult anv_GetPhysicalDeviceInfo(
180 VkPhysicalDevice physicalDevice,
181 VkPhysicalDeviceInfoType infoType,
182 size_t* pDataSize,
183 void* pData)
184 {
185 struct anv_physical_device *device = (struct anv_physical_device *) physicalDevice;
186 VkPhysicalDeviceProperties *properties;
187 VkPhysicalDevicePerformance *performance;
188 VkPhysicalDeviceQueueProperties *queue_properties;
189 VkPhysicalDeviceMemoryProperties *memory_properties;
190 VkDisplayPropertiesWSI *display_properties;
191 uint64_t ns_per_tick = 80;
192
193 switch ((uint32_t) infoType) {
194 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES:
195 properties = pData;
196
197 *pDataSize = sizeof(*properties);
198 if (pData == NULL)
199 return VK_SUCCESS;
200
201 properties->apiVersion = 1;
202 properties->driverVersion = 1;
203 properties->vendorId = 0x8086;
204 properties->deviceId = device->chipset_id;
205 properties->deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
206 strcpy(properties->deviceName, device->name);
207 properties->maxInlineMemoryUpdateSize = 0;
208 properties->maxBoundDescriptorSets = MAX_SETS;
209 properties->maxThreadGroupSize = 512;
210 properties->timestampFrequency = 1000 * 1000 * 1000 / ns_per_tick;
211 properties->multiColorAttachmentClears = true;
212 properties->maxDescriptorSets = 8;
213 properties->maxViewports = 16;
214 properties->maxColorAttachments = 8;
215 return VK_SUCCESS;
216
217 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE:
218 performance = pData;
219
220 *pDataSize = sizeof(*performance);
221 if (pData == NULL)
222 return VK_SUCCESS;
223
224 performance->maxDeviceClock = 1.0;
225 performance->aluPerClock = 1.0;
226 performance->texPerClock = 1.0;
227 performance->primsPerClock = 1.0;
228 performance->pixelsPerClock = 1.0;
229 return VK_SUCCESS;
230
231 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES:
232 queue_properties = pData;
233
234 *pDataSize = sizeof(*queue_properties);
235 if (pData == NULL)
236 return VK_SUCCESS;
237
238 queue_properties->queueFlags = 0;
239 queue_properties->queueCount = 1;
240 queue_properties->maxAtomicCounters = 0;
241 queue_properties->supportsTimestamps = true;
242 queue_properties->maxMemReferences = 256;
243 return VK_SUCCESS;
244
245 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES:
246 memory_properties = pData;
247
248 *pDataSize = sizeof(*memory_properties);
249 if (pData == NULL)
250 return VK_SUCCESS;
251
252 memory_properties->supportsMigration = false;
253 memory_properties->supportsPinning = false;
254 return VK_SUCCESS;
255
256 case VK_PHYSICAL_DEVICE_INFO_TYPE_DISPLAY_PROPERTIES_WSI:
257 anv_finishme("VK_PHYSICAL_DEVICE_INFO_TYPE_DISPLAY_PROPERTIES_WSI");
258
259 *pDataSize = sizeof(*display_properties);
260 if (pData == NULL)
261 return VK_SUCCESS;
262
263 display_properties = pData;
264 display_properties->display = 0;
265 display_properties->physicalResolution = (VkExtent2D) { 0, 0 };
266 return VK_SUCCESS;
267
268 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PRESENT_PROPERTIES_WSI:
269 anv_finishme("VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PRESENT_PROPERTIES_WSI");
270 return VK_SUCCESS;
271
272
273 default:
274 return VK_UNSUPPORTED;
275 }
276
277 }
278
279 void * vkGetProcAddr(
280 VkPhysicalDevice physicalDevice,
281 const char* pName)
282 {
283 return anv_lookup_entrypoint(pName);
284 }
285
286 static void
287 parse_debug_flags(struct anv_device *device)
288 {
289 const char *debug, *p, *end;
290
291 debug = getenv("INTEL_DEBUG");
292 device->dump_aub = false;
293 if (debug) {
294 for (p = debug; *p; p = end + 1) {
295 end = strchrnul(p, ',');
296 if (end - p == 3 && memcmp(p, "aub", 3) == 0)
297 device->dump_aub = true;
298 if (end - p == 5 && memcmp(p, "no_hw", 5) == 0)
299 device->no_hw = true;
300 if (*end == '\0')
301 break;
302 }
303 }
304 }
305
306 static const uint32_t BATCH_SIZE = 1 << 15;
307
308 VkResult anv_CreateDevice(
309 VkPhysicalDevice _physicalDevice,
310 const VkDeviceCreateInfo* pCreateInfo,
311 VkDevice* pDevice)
312 {
313 struct anv_physical_device *physicalDevice =
314 (struct anv_physical_device *) _physicalDevice;
315 struct anv_instance *instance = physicalDevice->instance;
316 struct anv_device *device;
317
318 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
319
320 device = instance->pfnAlloc(instance->pAllocUserData,
321 sizeof(*device), 8,
322 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
323 if (!device)
324 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
325
326 device->no_hw = physicalDevice->no_hw;
327 parse_debug_flags(device);
328
329 device->instance = physicalDevice->instance;
330 device->fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
331 if (device->fd == -1)
332 goto fail_device;
333
334 device->context_id = anv_gem_create_context(device);
335 if (device->context_id == -1)
336 goto fail_fd;
337
338 anv_bo_pool_init(&device->batch_bo_pool, device, BATCH_SIZE);
339
340 anv_block_pool_init(&device->dynamic_state_block_pool, device, 2048);
341
342 anv_state_pool_init(&device->dynamic_state_pool,
343 &device->dynamic_state_block_pool);
344
345 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
346 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
347
348
349 /* Binding table pointers are only 16 bits so we have to make sure that
350 * they get allocated at the beginning of the surface state BO. To
351 * handle this, we create a separate block pool that works out of the
352 * first 64 KB of the surface state BO.
353 */
354 anv_block_pool_init_slave(&device->binding_table_block_pool,
355 &device->surface_state_block_pool, 32);
356
357 anv_state_pool_init(&device->surface_state_pool,
358 &device->surface_state_block_pool);
359
360 device->compiler = anv_compiler_create(device->fd);
361 device->aub_writer = NULL;
362
363 device->info = *physicalDevice->info;
364
365 pthread_mutex_init(&device->mutex, NULL);
366
367 anv_device_init_meta(device);
368
369 *pDevice = (VkDevice) device;
370
371 return VK_SUCCESS;
372
373 fail_fd:
374 close(device->fd);
375 fail_device:
376 anv_device_free(device, device);
377
378 return vk_error(VK_ERROR_UNAVAILABLE);
379 }
380
381 VkResult anv_DestroyDevice(
382 VkDevice _device)
383 {
384 struct anv_device *device = (struct anv_device *) _device;
385
386 anv_compiler_destroy(device->compiler);
387
388
389 anv_bo_pool_finish(&device->batch_bo_pool);
390 anv_block_pool_finish(&device->dynamic_state_block_pool);
391 anv_block_pool_finish(&device->instruction_block_pool);
392 anv_block_pool_finish(&device->surface_state_block_pool);
393
394 close(device->fd);
395
396 if (device->aub_writer)
397 anv_aub_writer_destroy(device->aub_writer);
398
399 anv_device_free(device, device);
400
401 return VK_SUCCESS;
402 }
403
404 VkResult anv_GetGlobalExtensionInfo(
405 VkExtensionInfoType infoType,
406 uint32_t extensionIndex,
407 size_t* pDataSize,
408 void* pData)
409 {
410 static const VkExtensionProperties extensions[] = {
411 {
412 .extName = "VK_WSI_LunarG",
413 .version = 3
414 }
415 };
416 uint32_t count = ARRAY_SIZE(extensions);
417
418 switch (infoType) {
419 case VK_EXTENSION_INFO_TYPE_COUNT:
420 memcpy(pData, &count, sizeof(count));
421 *pDataSize = sizeof(count);
422 return VK_SUCCESS;
423
424 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
425 if (extensionIndex >= count)
426 return vk_error(VK_ERROR_INVALID_EXTENSION);
427
428 memcpy(pData, &extensions[extensionIndex], sizeof(extensions[0]));
429 *pDataSize = sizeof(extensions[0]);
430 return VK_SUCCESS;
431
432 default:
433 return VK_UNSUPPORTED;
434 }
435 }
436
437 VkResult anv_GetPhysicalDeviceExtensionInfo(
438 VkPhysicalDevice physicalDevice,
439 VkExtensionInfoType infoType,
440 uint32_t extensionIndex,
441 size_t* pDataSize,
442 void* pData)
443 {
444 uint32_t *count;
445
446 switch (infoType) {
447 case VK_EXTENSION_INFO_TYPE_COUNT:
448 *pDataSize = 4;
449 if (pData == NULL)
450 return VK_SUCCESS;
451
452 count = pData;
453 *count = 0;
454 return VK_SUCCESS;
455
456 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
457 return vk_error(VK_ERROR_INVALID_EXTENSION);
458
459 default:
460 return VK_UNSUPPORTED;
461 }
462 }
463
464 VkResult anv_EnumerateLayers(
465 VkPhysicalDevice physicalDevice,
466 size_t maxStringSize,
467 size_t* pLayerCount,
468 char* const* pOutLayers,
469 void* pReserved)
470 {
471 *pLayerCount = 0;
472
473 return VK_SUCCESS;
474 }
475
476 VkResult anv_GetDeviceQueue(
477 VkDevice _device,
478 uint32_t queueNodeIndex,
479 uint32_t queueIndex,
480 VkQueue* pQueue)
481 {
482 struct anv_device *device = (struct anv_device *) _device;
483 struct anv_queue *queue;
484
485 /* FIXME: Should allocate these at device create time. */
486
487 queue = anv_device_alloc(device, sizeof(*queue), 8,
488 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
489 if (queue == NULL)
490 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
491
492 queue->device = device;
493 queue->pool = &device->surface_state_pool;
494
495 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
496 *(uint32_t *)queue->completed_serial.map = 0;
497 queue->next_serial = 1;
498
499 *pQueue = (VkQueue) queue;
500
501 return VK_SUCCESS;
502 }
503
504 static VkResult
505 anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
506 {
507 list->num_relocs = 0;
508 list->array_length = 256;
509 list->relocs =
510 anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
511 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
512
513 if (list->relocs == NULL)
514 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
515
516 list->reloc_bos =
517 anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
518 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
519
520 if (list->relocs == NULL) {
521 anv_device_free(device, list->relocs);
522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
523 }
524
525 return VK_SUCCESS;
526 }
527
528 static void
529 anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
530 {
531 anv_device_free(device, list->relocs);
532 anv_device_free(device, list->reloc_bos);
533 }
534
535 static VkResult
536 anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
537 size_t num_additional_relocs)
538 {
539 if (list->num_relocs + num_additional_relocs <= list->array_length)
540 return VK_SUCCESS;
541
542 size_t new_length = list->array_length * 2;
543 while (new_length < list->num_relocs + num_additional_relocs)
544 new_length *= 2;
545
546 struct drm_i915_gem_relocation_entry *new_relocs =
547 anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
548 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
549 if (new_relocs == NULL)
550 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
551
552 struct anv_bo **new_reloc_bos =
553 anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
554 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
555 if (new_relocs == NULL) {
556 anv_device_free(device, new_relocs);
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558 }
559
560 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
561 memcpy(new_reloc_bos, list->reloc_bos,
562 list->num_relocs * sizeof(*list->reloc_bos));
563
564 anv_device_free(device, list->relocs);
565 anv_device_free(device, list->reloc_bos);
566
567 list->relocs = new_relocs;
568 list->reloc_bos = new_reloc_bos;
569
570 return VK_SUCCESS;
571 }
572
573 VkResult
574 anv_batch_init(struct anv_batch *batch, struct anv_device *device)
575 {
576 VkResult result;
577
578 result = anv_bo_pool_alloc(&device->batch_bo_pool, &batch->bo);
579 if (result != VK_SUCCESS)
580 return result;
581
582 result = anv_reloc_list_init(&batch->cmd_relocs, device);
583 if (result != VK_SUCCESS) {
584 anv_bo_pool_free(&device->batch_bo_pool, &batch->bo);
585 return result;
586 }
587
588 batch->device = device;
589 batch->next = batch->bo.map;
590
591 return VK_SUCCESS;
592 }
593
594 void
595 anv_batch_finish(struct anv_batch *batch)
596 {
597 anv_bo_pool_free(&batch->device->batch_bo_pool, &batch->bo);
598 anv_reloc_list_finish(&batch->cmd_relocs, batch->device);
599 }
600
601 void
602 anv_batch_reset(struct anv_batch *batch)
603 {
604 batch->next = batch->bo.map;
605 batch->cmd_relocs.num_relocs = 0;
606 }
607
608 void *
609 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
610 {
611 void *p = batch->next;
612
613 batch->next += num_dwords * 4;
614
615 return p;
616 }
617
618 static void
619 anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
620 struct anv_reloc_list *other, uint32_t offset)
621 {
622 anv_reloc_list_grow(list, device, other->num_relocs);
623 /* TODO: Handle failure */
624
625 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
626 other->num_relocs * sizeof(other->relocs[0]));
627 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
628 other->num_relocs * sizeof(other->reloc_bos[0]));
629
630 for (uint32_t i = 0; i < other->num_relocs; i++)
631 list->relocs[i + list->num_relocs].offset += offset;
632
633 list->num_relocs += other->num_relocs;
634 }
635
636 static uint64_t
637 anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
638 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
639 {
640 struct drm_i915_gem_relocation_entry *entry;
641 int index;
642
643 anv_reloc_list_grow(list, device, 1);
644 /* TODO: Handle failure */
645
646 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
647 index = list->num_relocs++;
648 list->reloc_bos[index] = target_bo;
649 entry = &list->relocs[index];
650 entry->target_handle = target_bo->gem_handle;
651 entry->delta = delta;
652 entry->offset = offset;
653 entry->presumed_offset = target_bo->offset;
654 entry->read_domains = 0;
655 entry->write_domain = 0;
656
657 return target_bo->offset + delta;
658 }
659
660 void
661 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
662 {
663 uint32_t size, offset;
664
665 size = other->next - other->bo.map;
666 memcpy(batch->next, other->bo.map, size);
667
668 offset = batch->next - batch->bo.map;
669 anv_reloc_list_append(&batch->cmd_relocs, batch->device,
670 &other->cmd_relocs, offset);
671
672 batch->next += size;
673 }
674
675 uint64_t
676 anv_batch_emit_reloc(struct anv_batch *batch,
677 void *location, struct anv_bo *bo, uint32_t delta)
678 {
679 return anv_reloc_list_add(&batch->cmd_relocs, batch->device,
680 location - batch->bo.map, bo, delta);
681 }
682
683 VkResult anv_QueueSubmit(
684 VkQueue _queue,
685 uint32_t cmdBufferCount,
686 const VkCmdBuffer* pCmdBuffers,
687 VkFence _fence)
688 {
689 struct anv_queue *queue = (struct anv_queue *) _queue;
690 struct anv_device *device = queue->device;
691 struct anv_fence *fence = (struct anv_fence *) _fence;
692 int ret;
693
694 for (uint32_t i = 0; i < cmdBufferCount; i++) {
695 struct anv_cmd_buffer *cmd_buffer =
696 (struct anv_cmd_buffer *) pCmdBuffers[i];
697
698 if (device->dump_aub)
699 anv_cmd_buffer_dump(cmd_buffer);
700
701 if (!device->no_hw) {
702 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf);
703 if (ret != 0)
704 return vk_error(VK_ERROR_UNKNOWN);
705
706 if (fence) {
707 ret = anv_gem_execbuffer(device, &fence->execbuf);
708 if (ret != 0)
709 return vk_error(VK_ERROR_UNKNOWN);
710 }
711
712 for (uint32_t i = 0; i < cmd_buffer->bo_count; i++)
713 cmd_buffer->exec2_bos[i]->offset = cmd_buffer->exec2_objects[i].offset;
714 } else {
715 *(uint32_t *)queue->completed_serial.map = cmd_buffer->serial;
716 }
717 }
718
719 return VK_SUCCESS;
720 }
721
722 VkResult anv_QueueAddMemReferences(
723 VkQueue queue,
724 uint32_t count,
725 const VkDeviceMemory* pMems)
726 {
727 return VK_SUCCESS;
728 }
729
730 VkResult anv_QueueRemoveMemReferences(
731 VkQueue queue,
732 uint32_t count,
733 const VkDeviceMemory* pMems)
734 {
735 return VK_SUCCESS;
736 }
737
738 VkResult anv_QueueWaitIdle(
739 VkQueue _queue)
740 {
741 struct anv_queue *queue = (struct anv_queue *) _queue;
742
743 return vkDeviceWaitIdle((VkDevice) queue->device);
744 }
745
746 VkResult anv_DeviceWaitIdle(
747 VkDevice _device)
748 {
749 struct anv_device *device = (struct anv_device *) _device;
750 struct anv_state state;
751 struct anv_batch batch;
752 struct drm_i915_gem_execbuffer2 execbuf;
753 struct drm_i915_gem_exec_object2 exec2_objects[1];
754 struct anv_bo *bo = NULL;
755 VkResult result;
756 int64_t timeout;
757 int ret;
758
759 state = anv_state_pool_alloc(&device->dynamic_state_pool, 32, 32);
760 bo = &device->dynamic_state_pool.block_pool->bo;
761 batch.next = state.map;
762 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
763 anv_batch_emit(&batch, GEN8_MI_NOOP);
764
765 exec2_objects[0].handle = bo->gem_handle;
766 exec2_objects[0].relocation_count = 0;
767 exec2_objects[0].relocs_ptr = 0;
768 exec2_objects[0].alignment = 0;
769 exec2_objects[0].offset = bo->offset;
770 exec2_objects[0].flags = 0;
771 exec2_objects[0].rsvd1 = 0;
772 exec2_objects[0].rsvd2 = 0;
773
774 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
775 execbuf.buffer_count = 1;
776 execbuf.batch_start_offset = state.offset;
777 execbuf.batch_len = batch.next - state.map;
778 execbuf.cliprects_ptr = 0;
779 execbuf.num_cliprects = 0;
780 execbuf.DR1 = 0;
781 execbuf.DR4 = 0;
782
783 execbuf.flags =
784 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
785 execbuf.rsvd1 = device->context_id;
786 execbuf.rsvd2 = 0;
787
788 if (!device->no_hw) {
789 ret = anv_gem_execbuffer(device, &execbuf);
790 if (ret != 0) {
791 result = vk_error(VK_ERROR_UNKNOWN);
792 goto fail;
793 }
794
795 timeout = INT64_MAX;
796 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
797 if (ret != 0) {
798 result = vk_error(VK_ERROR_UNKNOWN);
799 goto fail;
800 }
801 }
802
803 anv_state_pool_free(&device->dynamic_state_pool, state);
804
805 return VK_SUCCESS;
806
807 fail:
808 anv_state_pool_free(&device->dynamic_state_pool, state);
809
810 return result;
811 }
812
813 void *
814 anv_device_alloc(struct anv_device * device,
815 size_t size,
816 size_t alignment,
817 VkSystemAllocType allocType)
818 {
819 return device->instance->pfnAlloc(device->instance->pAllocUserData,
820 size,
821 alignment,
822 allocType);
823 }
824
825 void
826 anv_device_free(struct anv_device * device,
827 void * mem)
828 {
829 return device->instance->pfnFree(device->instance->pAllocUserData,
830 mem);
831 }
832
833 VkResult
834 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
835 {
836 bo->gem_handle = anv_gem_create(device, size);
837 if (!bo->gem_handle)
838 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
839
840 bo->map = NULL;
841 bo->index = 0;
842 bo->offset = 0;
843 bo->size = size;
844
845 return VK_SUCCESS;
846 }
847
848 VkResult anv_AllocMemory(
849 VkDevice _device,
850 const VkMemoryAllocInfo* pAllocInfo,
851 VkDeviceMemory* pMem)
852 {
853 struct anv_device *device = (struct anv_device *) _device;
854 struct anv_device_memory *mem;
855 VkResult result;
856
857 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
858
859 mem = anv_device_alloc(device, sizeof(*mem), 8,
860 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
861 if (mem == NULL)
862 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
863
864 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
865 if (result != VK_SUCCESS)
866 goto fail;
867
868 *pMem = (VkDeviceMemory) mem;
869
870 return VK_SUCCESS;
871
872 fail:
873 anv_device_free(device, mem);
874
875 return result;
876 }
877
878 VkResult anv_FreeMemory(
879 VkDevice _device,
880 VkDeviceMemory _mem)
881 {
882 struct anv_device *device = (struct anv_device *) _device;
883 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
884
885 if (mem->bo.map)
886 anv_gem_munmap(mem->bo.map, mem->bo.size);
887
888 if (mem->bo.gem_handle != 0)
889 anv_gem_close(device, mem->bo.gem_handle);
890
891 anv_device_free(device, mem);
892
893 return VK_SUCCESS;
894 }
895
896 VkResult anv_SetMemoryPriority(
897 VkDevice device,
898 VkDeviceMemory mem,
899 VkMemoryPriority priority)
900 {
901 return VK_SUCCESS;
902 }
903
904 VkResult anv_MapMemory(
905 VkDevice _device,
906 VkDeviceMemory _mem,
907 VkDeviceSize offset,
908 VkDeviceSize size,
909 VkMemoryMapFlags flags,
910 void** ppData)
911 {
912 struct anv_device *device = (struct anv_device *) _device;
913 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
914
915 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
916 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
917 * at a time is valid. We could just mmap up front and return an offset
918 * pointer here, but that may exhaust virtual memory on 32 bit
919 * userspace. */
920
921 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
922 mem->map_size = size;
923
924 *ppData = mem->map;
925
926 return VK_SUCCESS;
927 }
928
929 VkResult anv_UnmapMemory(
930 VkDevice _device,
931 VkDeviceMemory _mem)
932 {
933 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
934
935 anv_gem_munmap(mem->map, mem->map_size);
936
937 return VK_SUCCESS;
938 }
939
940 VkResult anv_FlushMappedMemory(
941 VkDevice device,
942 VkDeviceMemory mem,
943 VkDeviceSize offset,
944 VkDeviceSize size)
945 {
946 /* clflush here for !llc platforms */
947
948 return VK_SUCCESS;
949 }
950
951 VkResult anv_PinSystemMemory(
952 VkDevice device,
953 const void* pSysMem,
954 size_t memSize,
955 VkDeviceMemory* pMem)
956 {
957 return VK_SUCCESS;
958 }
959
960 VkResult anv_GetMultiDeviceCompatibility(
961 VkPhysicalDevice physicalDevice0,
962 VkPhysicalDevice physicalDevice1,
963 VkPhysicalDeviceCompatibilityInfo* pInfo)
964 {
965 return VK_UNSUPPORTED;
966 }
967
968 VkResult anv_OpenSharedMemory(
969 VkDevice device,
970 const VkMemoryOpenInfo* pOpenInfo,
971 VkDeviceMemory* pMem)
972 {
973 return VK_UNSUPPORTED;
974 }
975
976 VkResult anv_OpenSharedSemaphore(
977 VkDevice device,
978 const VkSemaphoreOpenInfo* pOpenInfo,
979 VkSemaphore* pSemaphore)
980 {
981 return VK_UNSUPPORTED;
982 }
983
984 VkResult anv_OpenPeerMemory(
985 VkDevice device,
986 const VkPeerMemoryOpenInfo* pOpenInfo,
987 VkDeviceMemory* pMem)
988 {
989 return VK_UNSUPPORTED;
990 }
991
992 VkResult anv_OpenPeerImage(
993 VkDevice device,
994 const VkPeerImageOpenInfo* pOpenInfo,
995 VkImage* pImage,
996 VkDeviceMemory* pMem)
997 {
998 return VK_UNSUPPORTED;
999 }
1000
1001 VkResult anv_DestroyObject(
1002 VkDevice _device,
1003 VkObjectType objType,
1004 VkObject _object)
1005 {
1006 struct anv_device *device = (struct anv_device *) _device;
1007 struct anv_object *object = (struct anv_object *) _object;
1008
1009 switch (objType) {
1010 case VK_OBJECT_TYPE_INSTANCE:
1011 return anv_DestroyInstance((VkInstance) _object);
1012
1013 case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
1014 /* We don't want to actually destroy physical devices */
1015 return VK_SUCCESS;
1016
1017 case VK_OBJECT_TYPE_DEVICE:
1018 assert(_device == (VkDevice) _object);
1019 return anv_DestroyDevice((VkDevice) _object);
1020
1021 case VK_OBJECT_TYPE_QUEUE:
1022 /* TODO */
1023 return VK_SUCCESS;
1024
1025 case VK_OBJECT_TYPE_DEVICE_MEMORY:
1026 return anv_FreeMemory(_device, (VkDeviceMemory) _object);
1027
1028 case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
1029 /* These are just dummys anyway, so we don't need to destroy them */
1030 return VK_SUCCESS;
1031
1032 case VK_OBJECT_TYPE_BUFFER:
1033 case VK_OBJECT_TYPE_BUFFER_VIEW:
1034 case VK_OBJECT_TYPE_IMAGE:
1035 case VK_OBJECT_TYPE_IMAGE_VIEW:
1036 case VK_OBJECT_TYPE_COLOR_ATTACHMENT_VIEW:
1037 case VK_OBJECT_TYPE_DEPTH_STENCIL_VIEW:
1038 case VK_OBJECT_TYPE_SHADER:
1039 case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
1040 case VK_OBJECT_TYPE_SAMPLER:
1041 case VK_OBJECT_TYPE_DESCRIPTOR_SET:
1042 case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
1043 case VK_OBJECT_TYPE_DYNAMIC_RS_STATE:
1044 case VK_OBJECT_TYPE_DYNAMIC_CB_STATE:
1045 case VK_OBJECT_TYPE_DYNAMIC_DS_STATE:
1046 case VK_OBJECT_TYPE_RENDER_PASS:
1047 /* These are trivially destroyable */
1048 anv_device_free(device, (void *) _object);
1049 return VK_SUCCESS;
1050
1051 case VK_OBJECT_TYPE_COMMAND_BUFFER:
1052 case VK_OBJECT_TYPE_PIPELINE:
1053 case VK_OBJECT_TYPE_DYNAMIC_VP_STATE:
1054 case VK_OBJECT_TYPE_FENCE:
1055 case VK_OBJECT_TYPE_QUERY_POOL:
1056 case VK_OBJECT_TYPE_FRAMEBUFFER:
1057 (object->destructor)(device, object, objType);
1058 return VK_SUCCESS;
1059
1060 case VK_OBJECT_TYPE_SEMAPHORE:
1061 case VK_OBJECT_TYPE_EVENT:
1062 stub_return(VK_UNSUPPORTED);
1063
1064 default:
1065 unreachable("Invalid object type");
1066 }
1067 }
1068
1069 static void
1070 fill_memory_requirements(
1071 VkObjectType objType,
1072 VkObject object,
1073 VkMemoryRequirements * memory_requirements)
1074 {
1075 struct anv_buffer *buffer;
1076 struct anv_image *image;
1077
1078 memory_requirements->memPropsAllowed =
1079 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
1080 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT |
1081 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
1082 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT |
1083 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL |
1084 VK_MEMORY_PROPERTY_SHAREABLE_BIT;
1085
1086 memory_requirements->memPropsRequired = 0;
1087
1088 switch (objType) {
1089 case VK_OBJECT_TYPE_BUFFER:
1090 buffer = (struct anv_buffer *) object;
1091 memory_requirements->size = buffer->size;
1092 memory_requirements->alignment = 16;
1093 break;
1094 case VK_OBJECT_TYPE_IMAGE:
1095 image = (struct anv_image *) object;
1096 memory_requirements->size = image->size;
1097 memory_requirements->alignment = image->alignment;
1098 break;
1099 default:
1100 memory_requirements->size = 0;
1101 break;
1102 }
1103 }
1104
1105 static uint32_t
1106 get_allocation_count(VkObjectType objType)
1107 {
1108 switch (objType) {
1109 case VK_OBJECT_TYPE_BUFFER:
1110 case VK_OBJECT_TYPE_IMAGE:
1111 return 1;
1112 default:
1113 return 0;
1114 }
1115 }
1116
1117 VkResult anv_GetObjectInfo(
1118 VkDevice _device,
1119 VkObjectType objType,
1120 VkObject object,
1121 VkObjectInfoType infoType,
1122 size_t* pDataSize,
1123 void* pData)
1124 {
1125 VkMemoryRequirements memory_requirements;
1126 uint32_t *count;
1127
1128 switch (infoType) {
1129 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
1130 *pDataSize = sizeof(memory_requirements);
1131 if (pData == NULL)
1132 return VK_SUCCESS;
1133
1134 fill_memory_requirements(objType, object, pData);
1135 return VK_SUCCESS;
1136
1137 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
1138 *pDataSize = sizeof(count);
1139 if (pData == NULL)
1140 return VK_SUCCESS;
1141
1142 count = pData;
1143 *count = get_allocation_count(objType);
1144 return VK_SUCCESS;
1145
1146 default:
1147 return VK_UNSUPPORTED;
1148 }
1149
1150 }
1151
1152 VkResult anv_QueueBindObjectMemory(
1153 VkQueue queue,
1154 VkObjectType objType,
1155 VkObject object,
1156 uint32_t allocationIdx,
1157 VkDeviceMemory _mem,
1158 VkDeviceSize memOffset)
1159 {
1160 struct anv_buffer *buffer;
1161 struct anv_image *image;
1162 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
1163
1164 switch (objType) {
1165 case VK_OBJECT_TYPE_BUFFER:
1166 buffer = (struct anv_buffer *) object;
1167 buffer->bo = &mem->bo;
1168 buffer->offset = memOffset;
1169 break;
1170 case VK_OBJECT_TYPE_IMAGE:
1171 image = (struct anv_image *) object;
1172 image->bo = &mem->bo;
1173 image->offset = memOffset;
1174 break;
1175 default:
1176 break;
1177 }
1178
1179 return VK_SUCCESS;
1180 }
1181
1182 VkResult anv_QueueBindObjectMemoryRange(
1183 VkQueue queue,
1184 VkObjectType objType,
1185 VkObject object,
1186 uint32_t allocationIdx,
1187 VkDeviceSize rangeOffset,
1188 VkDeviceSize rangeSize,
1189 VkDeviceMemory mem,
1190 VkDeviceSize memOffset)
1191 {
1192 stub_return(VK_UNSUPPORTED);
1193 }
1194
1195 VkResult anv_QueueBindImageMemoryRange(
1196 VkQueue queue,
1197 VkImage image,
1198 uint32_t allocationIdx,
1199 const VkImageMemoryBindInfo* pBindInfo,
1200 VkDeviceMemory mem,
1201 VkDeviceSize memOffset)
1202 {
1203 stub_return(VK_UNSUPPORTED);
1204 }
1205
1206 static void
1207 anv_fence_destroy(struct anv_device *device,
1208 struct anv_object *object,
1209 VkObjectType obj_type)
1210 {
1211 struct anv_fence *fence = (struct anv_fence *) object;
1212
1213 assert(obj_type == VK_OBJECT_TYPE_FENCE);
1214
1215 anv_gem_munmap(fence->bo.map, fence->bo.size);
1216 anv_gem_close(device, fence->bo.gem_handle);
1217 anv_device_free(device, fence);
1218 }
1219
1220 VkResult anv_CreateFence(
1221 VkDevice _device,
1222 const VkFenceCreateInfo* pCreateInfo,
1223 VkFence* pFence)
1224 {
1225 struct anv_device *device = (struct anv_device *) _device;
1226 struct anv_fence *fence;
1227 struct anv_batch batch;
1228 VkResult result;
1229
1230 const uint32_t fence_size = 128;
1231
1232 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1233
1234 fence = anv_device_alloc(device, sizeof(*fence), 8,
1235 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1236 if (fence == NULL)
1237 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1238
1239 result = anv_bo_init_new(&fence->bo, device, fence_size);
1240 if (result != VK_SUCCESS)
1241 goto fail;
1242
1243 fence->base.destructor = anv_fence_destroy;
1244
1245 fence->bo.map =
1246 anv_gem_mmap(device, fence->bo.gem_handle, 0, fence->bo.size);
1247 batch.next = fence->bo.map;
1248 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
1249 anv_batch_emit(&batch, GEN8_MI_NOOP);
1250
1251 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1252 fence->exec2_objects[0].relocation_count = 0;
1253 fence->exec2_objects[0].relocs_ptr = 0;
1254 fence->exec2_objects[0].alignment = 0;
1255 fence->exec2_objects[0].offset = fence->bo.offset;
1256 fence->exec2_objects[0].flags = 0;
1257 fence->exec2_objects[0].rsvd1 = 0;
1258 fence->exec2_objects[0].rsvd2 = 0;
1259
1260 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1261 fence->execbuf.buffer_count = 1;
1262 fence->execbuf.batch_start_offset = 0;
1263 fence->execbuf.batch_len = batch.next - fence->bo.map;
1264 fence->execbuf.cliprects_ptr = 0;
1265 fence->execbuf.num_cliprects = 0;
1266 fence->execbuf.DR1 = 0;
1267 fence->execbuf.DR4 = 0;
1268
1269 fence->execbuf.flags =
1270 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1271 fence->execbuf.rsvd1 = device->context_id;
1272 fence->execbuf.rsvd2 = 0;
1273
1274 *pFence = (VkQueryPool) fence;
1275
1276 return VK_SUCCESS;
1277
1278 fail:
1279 anv_device_free(device, fence);
1280
1281 return result;
1282 }
1283
1284 VkResult anv_ResetFences(
1285 VkDevice _device,
1286 uint32_t fenceCount,
1287 VkFence* pFences)
1288 {
1289 struct anv_fence **fences = (struct anv_fence **) pFences;
1290
1291 for (uint32_t i; i < fenceCount; i++)
1292 fences[i]->ready = false;
1293
1294 return VK_SUCCESS;
1295 }
1296
1297 VkResult anv_GetFenceStatus(
1298 VkDevice _device,
1299 VkFence _fence)
1300 {
1301 struct anv_device *device = (struct anv_device *) _device;
1302 struct anv_fence *fence = (struct anv_fence *) _fence;
1303 int64_t t = 0;
1304 int ret;
1305
1306 if (fence->ready)
1307 return VK_SUCCESS;
1308
1309 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1310 if (ret == 0) {
1311 fence->ready = true;
1312 return VK_SUCCESS;
1313 }
1314
1315 return VK_NOT_READY;
1316 }
1317
1318 VkResult anv_WaitForFences(
1319 VkDevice _device,
1320 uint32_t fenceCount,
1321 const VkFence* pFences,
1322 bool32_t waitAll,
1323 uint64_t timeout)
1324 {
1325 struct anv_device *device = (struct anv_device *) _device;
1326 struct anv_fence **fences = (struct anv_fence **) pFences;
1327 int64_t t = timeout;
1328 int ret;
1329
1330 /* FIXME: handle !waitAll */
1331
1332 for (uint32_t i = 0; i < fenceCount; i++) {
1333 ret = anv_gem_wait(device, fences[i]->bo.gem_handle, &t);
1334 if (ret == -1 && errno == ETIME)
1335 return VK_TIMEOUT;
1336 else if (ret == -1)
1337 return vk_error(VK_ERROR_UNKNOWN);
1338 }
1339
1340 return VK_SUCCESS;
1341 }
1342
1343 // Queue semaphore functions
1344
1345 VkResult anv_CreateSemaphore(
1346 VkDevice device,
1347 const VkSemaphoreCreateInfo* pCreateInfo,
1348 VkSemaphore* pSemaphore)
1349 {
1350 stub_return(VK_UNSUPPORTED);
1351 }
1352
1353 VkResult anv_QueueSignalSemaphore(
1354 VkQueue queue,
1355 VkSemaphore semaphore)
1356 {
1357 stub_return(VK_UNSUPPORTED);
1358 }
1359
1360 VkResult anv_QueueWaitSemaphore(
1361 VkQueue queue,
1362 VkSemaphore semaphore)
1363 {
1364 stub_return(VK_UNSUPPORTED);
1365 }
1366
1367 // Event functions
1368
1369 VkResult anv_CreateEvent(
1370 VkDevice device,
1371 const VkEventCreateInfo* pCreateInfo,
1372 VkEvent* pEvent)
1373 {
1374 stub_return(VK_UNSUPPORTED);
1375 }
1376
1377 VkResult anv_GetEventStatus(
1378 VkDevice device,
1379 VkEvent event)
1380 {
1381 stub_return(VK_UNSUPPORTED);
1382 }
1383
1384 VkResult anv_SetEvent(
1385 VkDevice device,
1386 VkEvent event)
1387 {
1388 stub_return(VK_UNSUPPORTED);
1389 }
1390
1391 VkResult anv_ResetEvent(
1392 VkDevice device,
1393 VkEvent event)
1394 {
1395 stub_return(VK_UNSUPPORTED);
1396 }
1397
1398 // Query functions
1399
1400 static void
1401 anv_query_pool_destroy(struct anv_device *device,
1402 struct anv_object *object,
1403 VkObjectType obj_type)
1404 {
1405 struct anv_query_pool *pool = (struct anv_query_pool *) object;
1406
1407 assert(obj_type == VK_OBJECT_TYPE_QUERY_POOL);
1408
1409 anv_gem_munmap(pool->bo.map, pool->bo.size);
1410 anv_gem_close(device, pool->bo.gem_handle);
1411 anv_device_free(device, pool);
1412 }
1413
1414 VkResult anv_CreateQueryPool(
1415 VkDevice _device,
1416 const VkQueryPoolCreateInfo* pCreateInfo,
1417 VkQueryPool* pQueryPool)
1418 {
1419 struct anv_device *device = (struct anv_device *) _device;
1420 struct anv_query_pool *pool;
1421 VkResult result;
1422 size_t size;
1423
1424 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
1425
1426 switch (pCreateInfo->queryType) {
1427 case VK_QUERY_TYPE_OCCLUSION:
1428 break;
1429 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1430 return VK_UNSUPPORTED;
1431 default:
1432 unreachable("");
1433 }
1434
1435 pool = anv_device_alloc(device, sizeof(*pool), 8,
1436 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1437 if (pool == NULL)
1438 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1439
1440 pool->base.destructor = anv_query_pool_destroy;
1441
1442 pool->type = pCreateInfo->queryType;
1443 size = pCreateInfo->slots * sizeof(struct anv_query_pool_slot);
1444 result = anv_bo_init_new(&pool->bo, device, size);
1445 if (result != VK_SUCCESS)
1446 goto fail;
1447
1448 pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size);
1449
1450 *pQueryPool = (VkQueryPool) pool;
1451
1452 return VK_SUCCESS;
1453
1454 fail:
1455 anv_device_free(device, pool);
1456
1457 return result;
1458 }
1459
1460 VkResult anv_GetQueryPoolResults(
1461 VkDevice _device,
1462 VkQueryPool queryPool,
1463 uint32_t startQuery,
1464 uint32_t queryCount,
1465 size_t* pDataSize,
1466 void* pData,
1467 VkQueryResultFlags flags)
1468 {
1469 struct anv_device *device = (struct anv_device *) _device;
1470 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
1471 struct anv_query_pool_slot *slot = pool->bo.map;
1472 int64_t timeout = INT64_MAX;
1473 uint32_t *dst32 = pData;
1474 uint64_t *dst64 = pData;
1475 uint64_t result;
1476 int ret;
1477
1478 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1479 /* Where is the availabilty info supposed to go? */
1480 anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
1481 return VK_UNSUPPORTED;
1482 }
1483
1484 assert(pool->type == VK_QUERY_TYPE_OCCLUSION);
1485
1486 if (flags & VK_QUERY_RESULT_64_BIT)
1487 *pDataSize = queryCount * sizeof(uint64_t);
1488 else
1489 *pDataSize = queryCount * sizeof(uint32_t);
1490
1491 if (pData == NULL)
1492 return VK_SUCCESS;
1493
1494 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1495 ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
1496 if (ret == -1)
1497 return vk_error(VK_ERROR_UNKNOWN);
1498 }
1499
1500 for (uint32_t i = 0; i < queryCount; i++) {
1501 result = slot[startQuery + i].end - slot[startQuery + i].begin;
1502 if (flags & VK_QUERY_RESULT_64_BIT) {
1503 *dst64++ = result;
1504 } else {
1505 if (result > UINT32_MAX)
1506 result = UINT32_MAX;
1507 *dst32++ = result;
1508 }
1509 }
1510
1511 return VK_SUCCESS;
1512 }
1513
1514 // Buffer functions
1515
1516 VkResult anv_CreateBuffer(
1517 VkDevice _device,
1518 const VkBufferCreateInfo* pCreateInfo,
1519 VkBuffer* pBuffer)
1520 {
1521 struct anv_device *device = (struct anv_device *) _device;
1522 struct anv_buffer *buffer;
1523
1524 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1525
1526 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1527 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1528 if (buffer == NULL)
1529 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1530
1531 buffer->size = pCreateInfo->size;
1532 buffer->bo = NULL;
1533 buffer->offset = 0;
1534
1535 *pBuffer = (VkBuffer) buffer;
1536
1537 return VK_SUCCESS;
1538 }
1539
1540 // Buffer view functions
1541
1542 VkResult anv_CreateBufferView(
1543 VkDevice _device,
1544 const VkBufferViewCreateInfo* pCreateInfo,
1545 VkBufferView* pView)
1546 {
1547 struct anv_device *device = (struct anv_device *) _device;
1548 struct anv_buffer *buffer = (struct anv_buffer *) pCreateInfo->buffer;
1549 struct anv_surface_view *view;
1550 const struct anv_format *format;
1551
1552 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1553
1554 view = anv_device_alloc(device, sizeof(*view), 8,
1555 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1556 if (view == NULL)
1557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1558
1559 view->bo = buffer->bo;
1560 view->offset = buffer->offset + pCreateInfo->offset;
1561 view->surface_state =
1562 anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
1563 view->format = pCreateInfo->format;
1564
1565 format = anv_format_for_vk_format(pCreateInfo->format);
1566 /* This assumes RGBA float format. */
1567 uint32_t stride = 4;
1568 uint32_t num_elements = pCreateInfo->range / stride;
1569 struct GEN8_RENDER_SURFACE_STATE surface_state = {
1570 .SurfaceType = SURFTYPE_BUFFER,
1571 .SurfaceArray = false,
1572 .SurfaceFormat = format->format,
1573 .SurfaceVerticalAlignment = VALIGN4,
1574 .SurfaceHorizontalAlignment = HALIGN4,
1575 .TileMode = LINEAR,
1576 .VerticalLineStride = 0,
1577 .VerticalLineStrideOffset = 0,
1578 .SamplerL2BypassModeDisable = true,
1579 .RenderCacheReadWriteMode = WriteOnlyCache,
1580 .MemoryObjectControlState = GEN8_MOCS,
1581 .BaseMipLevel = 0,
1582 .SurfaceQPitch = 0,
1583 .Height = (num_elements >> 7) & 0x3fff,
1584 .Width = num_elements & 0x7f,
1585 .Depth = (num_elements >> 21) & 0x3f,
1586 .SurfacePitch = stride - 1,
1587 .MinimumArrayElement = 0,
1588 .NumberofMultisamples = MULTISAMPLECOUNT_1,
1589 .XOffset = 0,
1590 .YOffset = 0,
1591 .SurfaceMinLOD = 0,
1592 .MIPCountLOD = 0,
1593 .AuxiliarySurfaceMode = AUX_NONE,
1594 .RedClearColor = 0,
1595 .GreenClearColor = 0,
1596 .BlueClearColor = 0,
1597 .AlphaClearColor = 0,
1598 .ShaderChannelSelectRed = SCS_RED,
1599 .ShaderChannelSelectGreen = SCS_GREEN,
1600 .ShaderChannelSelectBlue = SCS_BLUE,
1601 .ShaderChannelSelectAlpha = SCS_ALPHA,
1602 .ResourceMinLOD = 0,
1603 /* FIXME: We assume that the image must be bound at this time. */
1604 .SurfaceBaseAddress = { NULL, view->offset },
1605 };
1606
1607 GEN8_RENDER_SURFACE_STATE_pack(NULL, view->surface_state.map, &surface_state);
1608
1609 *pView = (VkImageView) view;
1610
1611 return VK_SUCCESS;
1612 }
1613
1614 // Sampler functions
1615
1616 VkResult anv_CreateSampler(
1617 VkDevice _device,
1618 const VkSamplerCreateInfo* pCreateInfo,
1619 VkSampler* pSampler)
1620 {
1621 struct anv_device *device = (struct anv_device *) _device;
1622 struct anv_sampler *sampler;
1623
1624 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1625
1626 sampler = anv_device_alloc(device, sizeof(*sampler), 8,
1627 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1628 if (!sampler)
1629 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1630
1631 static const uint32_t vk_to_gen_tex_filter[] = {
1632 [VK_TEX_FILTER_NEAREST] = MAPFILTER_NEAREST,
1633 [VK_TEX_FILTER_LINEAR] = MAPFILTER_LINEAR
1634 };
1635
1636 static const uint32_t vk_to_gen_mipmap_mode[] = {
1637 [VK_TEX_MIPMAP_MODE_BASE] = MIPFILTER_NONE,
1638 [VK_TEX_MIPMAP_MODE_NEAREST] = MIPFILTER_NEAREST,
1639 [VK_TEX_MIPMAP_MODE_LINEAR] = MIPFILTER_LINEAR
1640 };
1641
1642 static const uint32_t vk_to_gen_tex_address[] = {
1643 [VK_TEX_ADDRESS_WRAP] = TCM_WRAP,
1644 [VK_TEX_ADDRESS_MIRROR] = TCM_MIRROR,
1645 [VK_TEX_ADDRESS_CLAMP] = TCM_CLAMP,
1646 [VK_TEX_ADDRESS_MIRROR_ONCE] = TCM_MIRROR_ONCE,
1647 [VK_TEX_ADDRESS_CLAMP_BORDER] = TCM_CLAMP_BORDER,
1648 };
1649
1650 static const uint32_t vk_to_gen_compare_op[] = {
1651 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER,
1652 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS,
1653 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL,
1654 [VK_COMPARE_OP_LESS_EQUAL] = PREFILTEROPLEQUAL,
1655 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER,
1656 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL,
1657 [VK_COMPARE_OP_GREATER_EQUAL] = PREFILTEROPGEQUAL,
1658 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS,
1659 };
1660
1661 if (pCreateInfo->maxAnisotropy > 0)
1662 anv_finishme("missing support for anisotropic filtering");
1663
1664 struct GEN8_SAMPLER_STATE sampler_state = {
1665 .SamplerDisable = false,
1666 .TextureBorderColorMode = DX10OGL,
1667 .LODPreClampMode = 0,
1668 .BaseMipLevel = 0,
1669 .MipModeFilter = vk_to_gen_mipmap_mode[pCreateInfo->mipMode],
1670 .MagModeFilter = vk_to_gen_tex_filter[pCreateInfo->magFilter],
1671 .MinModeFilter = vk_to_gen_tex_filter[pCreateInfo->minFilter],
1672 .TextureLODBias = pCreateInfo->mipLodBias * 256,
1673 .AnisotropicAlgorithm = EWAApproximation,
1674 .MinLOD = pCreateInfo->minLod * 256,
1675 .MaxLOD = pCreateInfo->maxLod * 256,
1676 .ChromaKeyEnable = 0,
1677 .ChromaKeyIndex = 0,
1678 .ChromaKeyMode = 0,
1679 .ShadowFunction = vk_to_gen_compare_op[pCreateInfo->compareOp],
1680 .CubeSurfaceControlMode = 0,
1681 .IndirectStatePointer = 0,
1682 .LODClampMagnificationMode = MIPNONE,
1683 .MaximumAnisotropy = 0,
1684 .RAddressMinFilterRoundingEnable = 0,
1685 .RAddressMagFilterRoundingEnable = 0,
1686 .VAddressMinFilterRoundingEnable = 0,
1687 .VAddressMagFilterRoundingEnable = 0,
1688 .UAddressMinFilterRoundingEnable = 0,
1689 .UAddressMagFilterRoundingEnable = 0,
1690 .TrilinearFilterQuality = 0,
1691 .NonnormalizedCoordinateEnable = 0,
1692 .TCXAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressU],
1693 .TCYAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressV],
1694 .TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressW],
1695 };
1696
1697 GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state);
1698
1699 *pSampler = (VkSampler) sampler;
1700
1701 return VK_SUCCESS;
1702 }
1703
1704 // Descriptor set functions
1705
1706 VkResult anv_CreateDescriptorSetLayout(
1707 VkDevice _device,
1708 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1709 VkDescriptorSetLayout* pSetLayout)
1710 {
1711 struct anv_device *device = (struct anv_device *) _device;
1712 struct anv_descriptor_set_layout *set_layout;
1713
1714 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1715
1716 uint32_t sampler_count[VK_NUM_SHADER_STAGE] = { 0, };
1717 uint32_t surface_count[VK_NUM_SHADER_STAGE] = { 0, };
1718 uint32_t num_dynamic_buffers = 0;
1719 uint32_t count = 0;
1720 uint32_t s;
1721
1722 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1723 switch (pCreateInfo->pBinding[i].descriptorType) {
1724 case VK_DESCRIPTOR_TYPE_SAMPLER:
1725 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1726 sampler_count[s] += pCreateInfo->pBinding[i].count;
1727 break;
1728
1729 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1730 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1731 sampler_count[s] += pCreateInfo->pBinding[i].count;
1732
1733 /* fall through */
1734
1735 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1736 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1737 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1738 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1739 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1740 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1741 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1742 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1743 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1744 surface_count[s] += pCreateInfo->pBinding[i].count;
1745 break;
1746 default:
1747 break;
1748 }
1749
1750 count += pCreateInfo->pBinding[i].count;
1751 }
1752
1753 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1754 switch (pCreateInfo->pBinding[i].descriptorType) {
1755 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1756 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1757 num_dynamic_buffers++;
1758 break;
1759 default:
1760 break;
1761 }
1762 }
1763
1764 uint32_t sampler_total = 0;
1765 uint32_t surface_total = 0;
1766 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
1767 sampler_total += sampler_count[s];
1768 surface_total += surface_count[s];
1769 }
1770
1771 size_t size = sizeof(*set_layout) +
1772 (sampler_total + surface_total) * sizeof(uint32_t);
1773 set_layout = anv_device_alloc(device, size, 8,
1774 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1775 if (!set_layout)
1776 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1777
1778 set_layout->num_dynamic_buffers = num_dynamic_buffers;
1779 set_layout->count = count;
1780
1781 uint32_t *p = set_layout->entries;
1782 uint32_t *sampler[VK_NUM_SHADER_STAGE];
1783 uint32_t *surface[VK_NUM_SHADER_STAGE];
1784 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
1785 set_layout->stage[s].surface_count = surface_count[s];
1786 set_layout->stage[s].surface_start = surface[s] = p;
1787 p += surface_count[s];
1788 set_layout->stage[s].sampler_count = sampler_count[s];
1789 set_layout->stage[s].sampler_start = sampler[s] = p;
1790 p += sampler_count[s];
1791 }
1792
1793 uint32_t descriptor = 0;
1794 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1795 switch (pCreateInfo->pBinding[i].descriptorType) {
1796 case VK_DESCRIPTOR_TYPE_SAMPLER:
1797 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1798 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++)
1799 *(sampler[s])++ = descriptor + j;
1800 break;
1801
1802 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1803 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1804 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++)
1805 *(sampler[s])++ = descriptor + j;
1806
1807 /* fallthrough */
1808
1809 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1810 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1811 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1812 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1813 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1814 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1815 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1816 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1817 for_each_bit(s, pCreateInfo->pBinding[i].stageFlags)
1818 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
1819 *(surface[s])++ = descriptor + j;
1820 }
1821 break;
1822 default:
1823 unreachable("");
1824 }
1825 descriptor += pCreateInfo->pBinding[i].count;
1826 }
1827
1828 *pSetLayout = (VkDescriptorSetLayout) set_layout;
1829
1830 return VK_SUCCESS;
1831 }
1832
1833 VkResult anv_BeginDescriptorPoolUpdate(
1834 VkDevice device,
1835 VkDescriptorUpdateMode updateMode)
1836 {
1837 return VK_SUCCESS;
1838 }
1839
1840 VkResult anv_EndDescriptorPoolUpdate(
1841 VkDevice device,
1842 VkCmdBuffer cmd)
1843 {
1844 return VK_SUCCESS;
1845 }
1846
1847 VkResult anv_CreateDescriptorPool(
1848 VkDevice device,
1849 VkDescriptorPoolUsage poolUsage,
1850 uint32_t maxSets,
1851 const VkDescriptorPoolCreateInfo* pCreateInfo,
1852 VkDescriptorPool* pDescriptorPool)
1853 {
1854 *pDescriptorPool = 1;
1855
1856 return VK_SUCCESS;
1857 }
1858
1859 VkResult anv_ResetDescriptorPool(
1860 VkDevice device,
1861 VkDescriptorPool descriptorPool)
1862 {
1863 return VK_SUCCESS;
1864 }
1865
1866 VkResult anv_AllocDescriptorSets(
1867 VkDevice _device,
1868 VkDescriptorPool descriptorPool,
1869 VkDescriptorSetUsage setUsage,
1870 uint32_t count,
1871 const VkDescriptorSetLayout* pSetLayouts,
1872 VkDescriptorSet* pDescriptorSets,
1873 uint32_t* pCount)
1874 {
1875 struct anv_device *device = (struct anv_device *) _device;
1876 const struct anv_descriptor_set_layout *layout;
1877 struct anv_descriptor_set *set;
1878 size_t size;
1879
1880 for (uint32_t i = 0; i < count; i++) {
1881 layout = (struct anv_descriptor_set_layout *) pSetLayouts[i];
1882 size = sizeof(*set) + layout->count * sizeof(set->descriptors[0]);
1883 set = anv_device_alloc(device, size, 8,
1884 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1885 if (!set) {
1886 *pCount = i;
1887 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1888 }
1889
1890 /* Descriptor sets may not be 100% filled out so we need to memset to
1891 * ensure that we can properly detect and handle holes.
1892 */
1893 memset(set, 0, size);
1894
1895 pDescriptorSets[i] = (VkDescriptorSet) set;
1896 }
1897
1898 *pCount = count;
1899
1900 return VK_SUCCESS;
1901 }
1902
1903 void anv_ClearDescriptorSets(
1904 VkDevice device,
1905 VkDescriptorPool descriptorPool,
1906 uint32_t count,
1907 const VkDescriptorSet* pDescriptorSets)
1908 {
1909 }
1910
1911 void anv_UpdateDescriptors(
1912 VkDevice _device,
1913 VkDescriptorSet descriptorSet,
1914 uint32_t updateCount,
1915 const void** ppUpdateArray)
1916 {
1917 struct anv_descriptor_set *set = (struct anv_descriptor_set *) descriptorSet;
1918 VkUpdateSamplers *update_samplers;
1919 VkUpdateSamplerTextures *update_sampler_textures;
1920 VkUpdateImages *update_images;
1921 VkUpdateBuffers *update_buffers;
1922 VkUpdateAsCopy *update_as_copy;
1923
1924 for (uint32_t i = 0; i < updateCount; i++) {
1925 const struct anv_common *common = ppUpdateArray[i];
1926
1927 switch (common->sType) {
1928 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
1929 update_samplers = (VkUpdateSamplers *) common;
1930
1931 for (uint32_t j = 0; j < update_samplers->count; j++) {
1932 set->descriptors[update_samplers->binding + j].sampler =
1933 (struct anv_sampler *) update_samplers->pSamplers[j];
1934 }
1935 break;
1936
1937 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
1938 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1939 update_sampler_textures = (VkUpdateSamplerTextures *) common;
1940
1941 for (uint32_t j = 0; j < update_sampler_textures->count; j++) {
1942 set->descriptors[update_sampler_textures->binding + j].view =
1943 (struct anv_surface_view *)
1944 update_sampler_textures->pSamplerImageViews[j].pImageView->view;
1945 set->descriptors[update_sampler_textures->binding + j].sampler =
1946 (struct anv_sampler *)
1947 update_sampler_textures->pSamplerImageViews[j].sampler;
1948 }
1949 break;
1950
1951 case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
1952 update_images = (VkUpdateImages *) common;
1953
1954 for (uint32_t j = 0; j < update_images->count; j++) {
1955 set->descriptors[update_images->binding + j].view =
1956 (struct anv_surface_view *) update_images->pImageViews[j].view;
1957 }
1958 break;
1959
1960 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
1961 update_buffers = (VkUpdateBuffers *) common;
1962
1963 for (uint32_t j = 0; j < update_buffers->count; j++) {
1964 set->descriptors[update_buffers->binding + j].view =
1965 (struct anv_surface_view *) update_buffers->pBufferViews[j].view;
1966 }
1967 /* FIXME: descriptor arrays? */
1968 break;
1969
1970 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
1971 update_as_copy = (VkUpdateAsCopy *) common;
1972 (void) update_as_copy;
1973 break;
1974
1975 default:
1976 break;
1977 }
1978 }
1979 }
1980
1981 // State object functions
1982
1983 static inline int64_t
1984 clamp_int64(int64_t x, int64_t min, int64_t max)
1985 {
1986 if (x < min)
1987 return min;
1988 else if (x < max)
1989 return x;
1990 else
1991 return max;
1992 }
1993
1994 static void
1995 anv_dynamic_vp_state_destroy(struct anv_device *device,
1996 struct anv_object *object,
1997 VkObjectType obj_type)
1998 {
1999 struct anv_dynamic_vp_state *state = (void *)object;
2000
2001 assert(obj_type == VK_OBJECT_TYPE_DYNAMIC_VP_STATE);
2002
2003 anv_state_pool_free(&device->dynamic_state_pool, state->sf_clip_vp);
2004 anv_state_pool_free(&device->dynamic_state_pool, state->cc_vp);
2005 anv_state_pool_free(&device->dynamic_state_pool, state->scissor);
2006
2007 anv_device_free(device, state);
2008 }
2009
2010 VkResult anv_CreateDynamicViewportState(
2011 VkDevice _device,
2012 const VkDynamicVpStateCreateInfo* pCreateInfo,
2013 VkDynamicVpState* pState)
2014 {
2015 struct anv_device *device = (struct anv_device *) _device;
2016 struct anv_dynamic_vp_state *state;
2017
2018 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
2019
2020 state = anv_device_alloc(device, sizeof(*state), 8,
2021 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2022 if (state == NULL)
2023 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2024
2025 state->base.destructor = anv_dynamic_vp_state_destroy;
2026
2027 unsigned count = pCreateInfo->viewportAndScissorCount;
2028 state->sf_clip_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
2029 count * 64, 64);
2030 state->cc_vp = anv_state_pool_alloc(&device->dynamic_state_pool,
2031 count * 8, 32);
2032 state->scissor = anv_state_pool_alloc(&device->dynamic_state_pool,
2033 count * 32, 32);
2034
2035 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
2036 const VkViewport *vp = &pCreateInfo->pViewports[i];
2037 const VkRect *s = &pCreateInfo->pScissors[i];
2038
2039 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
2040 .ViewportMatrixElementm00 = vp->width / 2,
2041 .ViewportMatrixElementm11 = vp->height / 2,
2042 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
2043 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
2044 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
2045 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
2046 .XMinClipGuardband = -1.0f,
2047 .XMaxClipGuardband = 1.0f,
2048 .YMinClipGuardband = -1.0f,
2049 .YMaxClipGuardband = 1.0f,
2050 .XMinViewPort = vp->originX,
2051 .XMaxViewPort = vp->originX + vp->width - 1,
2052 .YMinViewPort = vp->originY,
2053 .YMaxViewPort = vp->originY + vp->height - 1,
2054 };
2055
2056 struct GEN8_CC_VIEWPORT cc_viewport = {
2057 .MinimumDepth = vp->minDepth,
2058 .MaximumDepth = vp->maxDepth
2059 };
2060
2061 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
2062 * ymax < ymin for empty clips. In case clip x, y, width height are all
2063 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
2064 * what we want. Just special case empty clips and produce a canonical
2065 * empty clip. */
2066 static const struct GEN8_SCISSOR_RECT empty_scissor = {
2067 .ScissorRectangleYMin = 1,
2068 .ScissorRectangleXMin = 1,
2069 .ScissorRectangleYMax = 0,
2070 .ScissorRectangleXMax = 0
2071 };
2072
2073 const int max = 0xffff;
2074 struct GEN8_SCISSOR_RECT scissor = {
2075 /* Do this math using int64_t so overflow gets clamped correctly. */
2076 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
2077 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
2078 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
2079 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
2080 };
2081
2082 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
2083 GEN8_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
2084
2085 if (s->extent.width <= 0 || s->extent.height <= 0) {
2086 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
2087 } else {
2088 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
2089 }
2090 }
2091
2092 *pState = (VkDynamicVpState) state;
2093
2094 return VK_SUCCESS;
2095 }
2096
2097 VkResult anv_CreateDynamicRasterState(
2098 VkDevice _device,
2099 const VkDynamicRsStateCreateInfo* pCreateInfo,
2100 VkDynamicRsState* pState)
2101 {
2102 struct anv_device *device = (struct anv_device *) _device;
2103 struct anv_dynamic_rs_state *state;
2104
2105 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
2106
2107 state = anv_device_alloc(device, sizeof(*state), 8,
2108 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2109 if (state == NULL)
2110 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2111
2112 /* Missing these:
2113 * float pointFadeThreshold;
2114 * // optional (GL45) - Size of point fade threshold
2115 */
2116
2117 struct GEN8_3DSTATE_SF sf = {
2118 GEN8_3DSTATE_SF_header,
2119 .LineWidth = pCreateInfo->lineWidth,
2120 .PointWidth = pCreateInfo->pointSize,
2121 };
2122
2123 GEN8_3DSTATE_SF_pack(NULL, state->state_sf, &sf);
2124
2125 bool enable_bias = pCreateInfo->depthBias != 0.0f ||
2126 pCreateInfo->slopeScaledDepthBias != 0.0f;
2127 struct GEN8_3DSTATE_RASTER raster = {
2128 .GlobalDepthOffsetEnableSolid = enable_bias,
2129 .GlobalDepthOffsetEnableWireframe = enable_bias,
2130 .GlobalDepthOffsetEnablePoint = enable_bias,
2131 .GlobalDepthOffsetConstant = pCreateInfo->depthBias,
2132 .GlobalDepthOffsetScale = pCreateInfo->slopeScaledDepthBias,
2133 .GlobalDepthOffsetClamp = pCreateInfo->depthBiasClamp
2134 };
2135
2136 GEN8_3DSTATE_RASTER_pack(NULL, state->state_raster, &raster);
2137
2138 *pState = (VkDynamicRsState) state;
2139
2140 return VK_SUCCESS;
2141 }
2142
2143 VkResult anv_CreateDynamicColorBlendState(
2144 VkDevice _device,
2145 const VkDynamicCbStateCreateInfo* pCreateInfo,
2146 VkDynamicCbState* pState)
2147 {
2148 struct anv_device *device = (struct anv_device *) _device;
2149 struct anv_dynamic_cb_state *state;
2150
2151 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
2152
2153 state = anv_device_alloc(device, sizeof(*state), 8,
2154 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2155 if (state == NULL)
2156 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2157
2158 struct GEN8_COLOR_CALC_STATE color_calc_state = {
2159 .BlendConstantColorRed = pCreateInfo->blendConst[0],
2160 .BlendConstantColorGreen = pCreateInfo->blendConst[1],
2161 .BlendConstantColorBlue = pCreateInfo->blendConst[2],
2162 .BlendConstantColorAlpha = pCreateInfo->blendConst[3]
2163 };
2164
2165 GEN8_COLOR_CALC_STATE_pack(NULL, state->state_color_calc, &color_calc_state);
2166
2167 *pState = (VkDynamicCbState) state;
2168
2169 return VK_SUCCESS;
2170 }
2171
2172 VkResult anv_CreateDynamicDepthStencilState(
2173 VkDevice _device,
2174 const VkDynamicDsStateCreateInfo* pCreateInfo,
2175 VkDynamicDsState* pState)
2176 {
2177 struct anv_device *device = (struct anv_device *) _device;
2178 struct anv_dynamic_ds_state *state;
2179
2180 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO);
2181
2182 state = anv_device_alloc(device, sizeof(*state), 8,
2183 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2184 if (state == NULL)
2185 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2186
2187 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
2188 GEN8_3DSTATE_WM_DEPTH_STENCIL_header,
2189
2190 /* Is this what we need to do? */
2191 .StencilBufferWriteEnable = pCreateInfo->stencilWriteMask != 0,
2192
2193 .StencilTestMask = pCreateInfo->stencilReadMask,
2194 .StencilWriteMask = pCreateInfo->stencilWriteMask,
2195
2196 .BackfaceStencilTestMask = pCreateInfo->stencilReadMask,
2197 .BackfaceStencilWriteMask = pCreateInfo->stencilWriteMask,
2198 };
2199
2200 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, state->state_wm_depth_stencil,
2201 &wm_depth_stencil);
2202
2203 struct GEN8_COLOR_CALC_STATE color_calc_state = {
2204 .StencilReferenceValue = pCreateInfo->stencilFrontRef,
2205 .BackFaceStencilReferenceValue = pCreateInfo->stencilBackRef
2206 };
2207
2208 GEN8_COLOR_CALC_STATE_pack(NULL, state->state_color_calc, &color_calc_state);
2209
2210 *pState = (VkDynamicDsState) state;
2211
2212 return VK_SUCCESS;
2213 }
2214
2215 // Command buffer functions
2216
2217 static void
2218 anv_cmd_buffer_destroy(struct anv_device *device,
2219 struct anv_object *object,
2220 VkObjectType obj_type)
2221 {
2222 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) object;
2223
2224 assert(obj_type == VK_OBJECT_TYPE_COMMAND_BUFFER);
2225
2226 anv_gem_munmap(cmd_buffer->surface_bo.map, BATCH_SIZE);
2227 anv_gem_close(device, cmd_buffer->surface_bo.gem_handle);
2228 anv_reloc_list_finish(&cmd_buffer->surface_relocs, device);
2229 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
2230 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
2231 anv_state_stream_finish(&cmd_buffer->binding_table_state_stream);
2232 anv_batch_finish(&cmd_buffer->batch);
2233 anv_device_free(device, cmd_buffer->exec2_objects);
2234 anv_device_free(device, cmd_buffer->exec2_bos);
2235 anv_device_free(device, cmd_buffer);
2236 }
2237
2238 VkResult anv_CreateCommandBuffer(
2239 VkDevice _device,
2240 const VkCmdBufferCreateInfo* pCreateInfo,
2241 VkCmdBuffer* pCmdBuffer)
2242 {
2243 struct anv_device *device = (struct anv_device *) _device;
2244 struct anv_cmd_buffer *cmd_buffer;
2245 VkResult result;
2246
2247 cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
2248 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2249 if (cmd_buffer == NULL)
2250 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2251
2252 cmd_buffer->base.destructor = anv_cmd_buffer_destroy;
2253
2254 cmd_buffer->device = device;
2255 cmd_buffer->rs_state = NULL;
2256 cmd_buffer->vp_state = NULL;
2257 memset(&cmd_buffer->default_bindings, 0, sizeof(cmd_buffer->default_bindings));
2258 cmd_buffer->bindings = &cmd_buffer->default_bindings;
2259
2260 result = anv_batch_init(&cmd_buffer->batch, device);
2261 if (result != VK_SUCCESS)
2262 goto fail;
2263
2264 result = anv_bo_init_new(&cmd_buffer->surface_bo, device, BATCH_SIZE);
2265 if (result != VK_SUCCESS)
2266 goto fail_batch;
2267
2268 cmd_buffer->surface_bo.map =
2269 anv_gem_mmap(device, cmd_buffer->surface_bo.gem_handle, 0, BATCH_SIZE);
2270 if (cmd_buffer->surface_bo.map == NULL) {
2271 result = vk_error(VK_ERROR_MEMORY_MAP_FAILED);
2272 goto fail_surface_bo;
2273 }
2274
2275 /* Start surface_next at 1 so surface offset 0 is invalid. */
2276 cmd_buffer->surface_next = 1;
2277 anv_reloc_list_init(&cmd_buffer->surface_relocs, device);
2278
2279 cmd_buffer->exec2_objects =
2280 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_objects[0]), 8,
2281 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2282 if (cmd_buffer->exec2_objects == NULL) {
2283 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2284 goto fail_surface_map;
2285 }
2286
2287 cmd_buffer->exec2_bos =
2288 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_bos[0]), 8,
2289 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2290 if (cmd_buffer->exec2_bos == NULL) {
2291 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2292 goto fail_exec2_objects;
2293 }
2294
2295 anv_state_stream_init(&cmd_buffer->binding_table_state_stream,
2296 &device->binding_table_block_pool);
2297 anv_state_stream_init(&cmd_buffer->surface_state_stream,
2298 &device->surface_state_block_pool);
2299 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
2300 &device->dynamic_state_block_pool);
2301
2302 cmd_buffer->dirty = 0;
2303 cmd_buffer->vb_dirty = 0;
2304 cmd_buffer->pipeline = NULL;
2305
2306 *pCmdBuffer = (VkCmdBuffer) cmd_buffer;
2307
2308 return VK_SUCCESS;
2309
2310 fail_exec2_objects:
2311 anv_device_free(device, cmd_buffer->exec2_objects);
2312 fail_surface_map:
2313 anv_gem_munmap(cmd_buffer->surface_bo.map, BATCH_SIZE);
2314 fail_surface_bo:
2315 anv_gem_close(device, cmd_buffer->surface_bo.gem_handle);
2316 fail_batch:
2317 anv_batch_finish(&cmd_buffer->batch);
2318 fail:
2319 anv_device_free(device, cmd_buffer);
2320
2321 return result;
2322 }
2323
2324 VkResult anv_BeginCommandBuffer(
2325 VkCmdBuffer cmdBuffer,
2326 const VkCmdBufferBeginInfo* pBeginInfo)
2327 {
2328 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2329 struct anv_device *device = cmd_buffer->device;
2330
2331 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT,
2332 .PipelineSelection = _3D);
2333 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_SIP);
2334
2335 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_BASE_ADDRESS,
2336 .GeneralStateBaseAddress = { NULL, 0 },
2337 .GeneralStateMemoryObjectControlState = GEN8_MOCS,
2338 .GeneralStateBaseAddressModifyEnable = true,
2339 .GeneralStateBufferSize = 0xfffff,
2340 .GeneralStateBufferSizeModifyEnable = true,
2341
2342 .SurfaceStateBaseAddress = { &cmd_buffer->surface_bo, 0 },
2343 .SurfaceStateMemoryObjectControlState = GEN8_MOCS,
2344 .SurfaceStateBaseAddressModifyEnable = true,
2345
2346 .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
2347 .DynamicStateMemoryObjectControlState = GEN8_MOCS,
2348 .DynamicStateBaseAddressModifyEnable = true,
2349 .DynamicStateBufferSize = 0xfffff,
2350 .DynamicStateBufferSizeModifyEnable = true,
2351
2352 .IndirectObjectBaseAddress = { NULL, 0 },
2353 .IndirectObjectMemoryObjectControlState = GEN8_MOCS,
2354 .IndirectObjectBaseAddressModifyEnable = true,
2355 .IndirectObjectBufferSize = 0xfffff,
2356 .IndirectObjectBufferSizeModifyEnable = true,
2357
2358 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
2359 .InstructionMemoryObjectControlState = GEN8_MOCS,
2360 .InstructionBaseAddressModifyEnable = true,
2361 .InstructionBufferSize = 0xfffff,
2362 .InstructionBuffersizeModifyEnable = true);
2363
2364 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF_STATISTICS,
2365 .StatisticsEnable = true);
2366 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HS, .Enable = false);
2367 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_TE, .TEEnable = false);
2368 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
2369 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
2370
2371 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2372 .ConstantBufferOffset = 0,
2373 .ConstantBufferSize = 4);
2374 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2375 .ConstantBufferOffset = 4,
2376 .ConstantBufferSize = 4);
2377 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2378 .ConstantBufferOffset = 8,
2379 .ConstantBufferSize = 4);
2380
2381 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_CHROMAKEY,
2382 .ChromaKeyKillEnable = false);
2383 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SBE_SWIZ);
2384 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
2385
2386 return VK_SUCCESS;
2387 }
2388
2389 static void
2390 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
2391 struct anv_bo *bo, struct anv_reloc_list *list)
2392 {
2393 struct drm_i915_gem_exec_object2 *obj;
2394
2395 bo->index = cmd_buffer->bo_count;
2396 obj = &cmd_buffer->exec2_objects[bo->index];
2397 cmd_buffer->exec2_bos[bo->index] = bo;
2398 cmd_buffer->bo_count++;
2399
2400 obj->handle = bo->gem_handle;
2401 obj->relocation_count = 0;
2402 obj->relocs_ptr = 0;
2403 obj->alignment = 0;
2404 obj->offset = bo->offset;
2405 obj->flags = 0;
2406 obj->rsvd1 = 0;
2407 obj->rsvd2 = 0;
2408
2409 if (list) {
2410 obj->relocation_count = list->num_relocs;
2411 obj->relocs_ptr = (uintptr_t) list->relocs;
2412 }
2413 }
2414
2415 static void
2416 anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer *cmd_buffer,
2417 struct anv_reloc_list *list)
2418 {
2419 struct anv_bo *bo, *batch_bo;
2420
2421 batch_bo = &cmd_buffer->batch.bo;
2422 for (size_t i = 0; i < list->num_relocs; i++) {
2423 bo = list->reloc_bos[i];
2424 /* Skip any relocations targeting the batch bo. We need to make sure
2425 * it's the last in the list so we'll add it manually later.
2426 */
2427 if (bo == batch_bo)
2428 continue;
2429 if (bo->index < cmd_buffer->bo_count && cmd_buffer->exec2_bos[bo->index] == bo)
2430 continue;
2431
2432 anv_cmd_buffer_add_bo(cmd_buffer, bo, NULL);
2433 }
2434 }
2435
2436 static void
2437 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
2438 struct anv_reloc_list *list)
2439 {
2440 struct anv_bo *bo;
2441
2442 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
2443 * struct drm_i915_gem_exec_object2 against the bos current offset and if
2444 * all bos haven't moved it will skip relocation processing alltogether.
2445 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
2446 * value of offset so we can set it either way. For that to work we need
2447 * to make sure all relocs use the same presumed offset.
2448 */
2449
2450 for (size_t i = 0; i < list->num_relocs; i++) {
2451 bo = list->reloc_bos[i];
2452 if (bo->offset != list->relocs[i].presumed_offset)
2453 cmd_buffer->need_reloc = true;
2454
2455 list->relocs[i].target_handle = bo->index;
2456 }
2457 }
2458
2459 VkResult anv_EndCommandBuffer(
2460 VkCmdBuffer cmdBuffer)
2461 {
2462 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2463 struct anv_device *device = cmd_buffer->device;
2464 struct anv_batch *batch = &cmd_buffer->batch;
2465
2466 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_END);
2467
2468 /* Round batch up to an even number of dwords. */
2469 if ((batch->next - batch->bo.map) & 4)
2470 anv_batch_emit(batch, GEN8_MI_NOOP);
2471
2472 cmd_buffer->bo_count = 0;
2473 cmd_buffer->need_reloc = false;
2474
2475 /* Lock for access to bo->index. */
2476 pthread_mutex_lock(&device->mutex);
2477
2478 /* Add block pool bos first so we can add them with their relocs. */
2479 anv_cmd_buffer_add_bo(cmd_buffer, &cmd_buffer->surface_bo,
2480 &cmd_buffer->surface_relocs);
2481
2482 anv_cmd_buffer_add_validate_bos(cmd_buffer, &cmd_buffer->surface_relocs);
2483 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->cmd_relocs);
2484 anv_cmd_buffer_add_bo(cmd_buffer, &batch->bo, &batch->cmd_relocs);
2485 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
2486 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->cmd_relocs);
2487
2488 cmd_buffer->execbuf.buffers_ptr = (uintptr_t) cmd_buffer->exec2_objects;
2489 cmd_buffer->execbuf.buffer_count = cmd_buffer->bo_count;
2490 cmd_buffer->execbuf.batch_start_offset = 0;
2491 cmd_buffer->execbuf.batch_len = batch->next - batch->bo.map;
2492 cmd_buffer->execbuf.cliprects_ptr = 0;
2493 cmd_buffer->execbuf.num_cliprects = 0;
2494 cmd_buffer->execbuf.DR1 = 0;
2495 cmd_buffer->execbuf.DR4 = 0;
2496
2497 cmd_buffer->execbuf.flags = I915_EXEC_HANDLE_LUT;
2498 if (!cmd_buffer->need_reloc)
2499 cmd_buffer->execbuf.flags |= I915_EXEC_NO_RELOC;
2500 cmd_buffer->execbuf.flags |= I915_EXEC_RENDER;
2501 cmd_buffer->execbuf.rsvd1 = device->context_id;
2502 cmd_buffer->execbuf.rsvd2 = 0;
2503
2504 pthread_mutex_unlock(&device->mutex);
2505
2506 return VK_SUCCESS;
2507 }
2508
2509 VkResult anv_ResetCommandBuffer(
2510 VkCmdBuffer cmdBuffer)
2511 {
2512 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2513
2514 anv_batch_reset(&cmd_buffer->batch);
2515 cmd_buffer->surface_next = 0;
2516 cmd_buffer->surface_relocs.num_relocs = 0;
2517
2518 return VK_SUCCESS;
2519 }
2520
2521 // Command buffer building functions
2522
2523 void anv_CmdBindPipeline(
2524 VkCmdBuffer cmdBuffer,
2525 VkPipelineBindPoint pipelineBindPoint,
2526 VkPipeline _pipeline)
2527 {
2528 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2529 struct anv_pipeline *pipeline = (struct anv_pipeline *) _pipeline;
2530
2531 cmd_buffer->pipeline = pipeline;
2532 cmd_buffer->vb_dirty |= pipeline->vb_used;
2533 cmd_buffer->dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
2534 }
2535
2536 void anv_CmdBindDynamicStateObject(
2537 VkCmdBuffer cmdBuffer,
2538 VkStateBindPoint stateBindPoint,
2539 VkDynamicStateObject dynamicState)
2540 {
2541 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2542 struct anv_dynamic_vp_state *vp_state;
2543
2544 switch (stateBindPoint) {
2545 case VK_STATE_BIND_POINT_VIEWPORT:
2546 vp_state = (struct anv_dynamic_vp_state *) dynamicState;
2547 /* We emit state immediately, but set cmd_buffer->vp_state to indicate
2548 * that vp state has been set in this command buffer. */
2549 cmd_buffer->vp_state = vp_state;
2550 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SCISSOR_STATE_POINTERS,
2551 .ScissorRectPointer = vp_state->scissor.offset);
2552 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2553 .CCViewportPointer = vp_state->cc_vp.offset);
2554 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2555 .SFClipViewportPointer = vp_state->sf_clip_vp.offset);
2556 break;
2557 case VK_STATE_BIND_POINT_RASTER:
2558 cmd_buffer->rs_state = (struct anv_dynamic_rs_state *) dynamicState;
2559 cmd_buffer->dirty |= ANV_CMD_BUFFER_RS_DIRTY;
2560 break;
2561 case VK_STATE_BIND_POINT_COLOR_BLEND:
2562 cmd_buffer->cb_state = (struct anv_dynamic_cb_state *) dynamicState;
2563 cmd_buffer->dirty |= ANV_CMD_BUFFER_CB_DIRTY;
2564 break;
2565 case VK_STATE_BIND_POINT_DEPTH_STENCIL:
2566 cmd_buffer->ds_state = (struct anv_dynamic_ds_state *) dynamicState;
2567 cmd_buffer->dirty |= ANV_CMD_BUFFER_DS_DIRTY;
2568 break;
2569 default:
2570 break;
2571 };
2572 }
2573
2574 static struct anv_state
2575 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer,
2576 uint32_t size, uint32_t alignment)
2577 {
2578 struct anv_state state;
2579
2580 state.offset = ALIGN_U32(cmd_buffer->surface_next, alignment);
2581 state.map = cmd_buffer->surface_bo.map + state.offset;
2582 state.alloc_size = size;
2583 cmd_buffer->surface_next = state.offset + size;
2584
2585 assert(state.offset + size < cmd_buffer->surface_bo.size);
2586
2587 return state;
2588 }
2589
2590 void anv_CmdBindDescriptorSets(
2591 VkCmdBuffer cmdBuffer,
2592 VkPipelineBindPoint pipelineBindPoint,
2593 uint32_t firstSet,
2594 uint32_t setCount,
2595 const VkDescriptorSet* pDescriptorSets,
2596 uint32_t dynamicOffsetCount,
2597 const uint32_t* pDynamicOffsets)
2598 {
2599 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2600 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2601 struct anv_bindings *bindings = cmd_buffer->bindings;
2602
2603 uint32_t offset = 0;
2604 for (uint32_t i = 0; i < setCount; i++) {
2605 struct anv_descriptor_set *set =
2606 (struct anv_descriptor_set *) pDescriptorSets[i];
2607 struct anv_descriptor_set_layout *set_layout = layout->set[firstSet + i].layout;
2608
2609 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
2610 uint32_t *surface_to_desc = set_layout->stage[s].surface_start;
2611 uint32_t *sampler_to_desc = set_layout->stage[s].sampler_start;
2612 uint32_t bias = s == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
2613 uint32_t start;
2614
2615 start = bias + layout->set[firstSet + i].surface_start[s];
2616 for (uint32_t b = 0; b < set_layout->stage[s].surface_count; b++) {
2617 struct anv_surface_view *view = set->descriptors[surface_to_desc[b]].view;
2618 if (!view)
2619 continue;
2620
2621 struct anv_state state =
2622 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
2623 memcpy(state.map, view->surface_state.map, 64);
2624
2625 /* The address goes in dwords 8 and 9 of the SURFACE_STATE */
2626 *(uint64_t *)(state.map + 8 * 4) =
2627 anv_reloc_list_add(&cmd_buffer->surface_relocs,
2628 cmd_buffer->device,
2629 state.offset + 8 * 4,
2630 view->bo, view->offset);
2631
2632 bindings->descriptors[s].surfaces[start + b] = state.offset;
2633 }
2634
2635 start = layout->set[firstSet + i].sampler_start[s];
2636 for (uint32_t b = 0; b < set_layout->stage[s].sampler_count; b++) {
2637 struct anv_sampler *sampler = set->descriptors[sampler_to_desc[b]].sampler;
2638 if (!sampler)
2639 continue;
2640
2641 memcpy(&bindings->descriptors[s].samplers[start + b],
2642 sampler->state, sizeof(sampler->state));
2643 }
2644 }
2645
2646 offset += layout->set[firstSet + i].layout->num_dynamic_buffers;
2647 }
2648
2649 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
2650 }
2651
2652 void anv_CmdBindIndexBuffer(
2653 VkCmdBuffer cmdBuffer,
2654 VkBuffer _buffer,
2655 VkDeviceSize offset,
2656 VkIndexType indexType)
2657 {
2658 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2659 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2660
2661 static const uint32_t vk_to_gen_index_type[] = {
2662 [VK_INDEX_TYPE_UINT8] = INDEX_BYTE,
2663 [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
2664 [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
2665 };
2666
2667 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER,
2668 .IndexFormat = vk_to_gen_index_type[indexType],
2669 .MemoryObjectControlState = GEN8_MOCS,
2670 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
2671 .BufferSize = buffer->size - offset);
2672 }
2673
2674 void anv_CmdBindVertexBuffers(
2675 VkCmdBuffer cmdBuffer,
2676 uint32_t startBinding,
2677 uint32_t bindingCount,
2678 const VkBuffer* pBuffers,
2679 const VkDeviceSize* pOffsets)
2680 {
2681 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2682 struct anv_bindings *bindings = cmd_buffer->bindings;
2683
2684 /* We have to defer setting up vertex buffer since we need the buffer
2685 * stride from the pipeline. */
2686
2687 for (uint32_t i = 0; i < bindingCount; i++) {
2688 bindings->vb[startBinding + i].buffer = (struct anv_buffer *) pBuffers[i];
2689 bindings->vb[startBinding + i].offset = pOffsets[i];
2690 cmd_buffer->vb_dirty |= 1 << (startBinding + i);
2691 }
2692 }
2693
2694 static void
2695 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
2696 {
2697 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2698 struct anv_bindings *bindings = cmd_buffer->bindings;
2699 uint32_t layers = cmd_buffer->framebuffer->layers;
2700
2701 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
2702 uint32_t bias;
2703
2704 if (s == VK_SHADER_STAGE_FRAGMENT) {
2705 bias = MAX_RTS;
2706 layers = cmd_buffer->framebuffer->layers;
2707 } else {
2708 bias = 0;
2709 layers = 0;
2710 }
2711
2712 /* This is a little awkward: layout can be NULL but we still have to
2713 * allocate and set a binding table for the PS stage for render
2714 * targets. */
2715 uint32_t surface_count = layout ? layout->stage[s].surface_count : 0;
2716
2717 if (layers + surface_count > 0) {
2718 struct anv_state state;
2719 uint32_t size;
2720
2721 size = (bias + surface_count) * sizeof(uint32_t);
2722 state = anv_cmd_buffer_alloc_surface_state(cmd_buffer, size, 32);
2723 memcpy(state.map, bindings->descriptors[s].surfaces, size);
2724
2725 static const uint32_t binding_table_opcodes[] = {
2726 [VK_SHADER_STAGE_VERTEX] = 38,
2727 [VK_SHADER_STAGE_TESS_CONTROL] = 39,
2728 [VK_SHADER_STAGE_TESS_EVALUATION] = 40,
2729 [VK_SHADER_STAGE_GEOMETRY] = 41,
2730 [VK_SHADER_STAGE_FRAGMENT] = 42,
2731 [VK_SHADER_STAGE_COMPUTE] = 0,
2732 };
2733
2734 anv_batch_emit(&cmd_buffer->batch,
2735 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS,
2736 ._3DCommandSubOpcode = binding_table_opcodes[s],
2737 .PointertoVSBindingTable = state.offset);
2738 }
2739
2740 if (layout && layout->stage[s].sampler_count > 0) {
2741 struct anv_state state;
2742 size_t size;
2743
2744 size = layout->stage[s].sampler_count * 16;
2745 state = anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream, size, 32);
2746 memcpy(state.map, bindings->descriptors[s].samplers, size);
2747
2748 static const uint32_t sampler_state_opcodes[] = {
2749 [VK_SHADER_STAGE_VERTEX] = 43,
2750 [VK_SHADER_STAGE_TESS_CONTROL] = 44, /* HS */
2751 [VK_SHADER_STAGE_TESS_EVALUATION] = 45, /* DS */
2752 [VK_SHADER_STAGE_GEOMETRY] = 46,
2753 [VK_SHADER_STAGE_FRAGMENT] = 47,
2754 [VK_SHADER_STAGE_COMPUTE] = 0,
2755 };
2756
2757 anv_batch_emit(&cmd_buffer->batch,
2758 GEN8_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2759 ._3DCommandSubOpcode = sampler_state_opcodes[s],
2760 .PointertoVSSamplerState = state.offset);
2761 }
2762 }
2763 }
2764
2765 static struct anv_state
2766 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
2767 uint32_t *a, uint32_t dwords, uint32_t alignment)
2768 {
2769 struct anv_device *device = cmd_buffer->device;
2770 struct anv_state state;
2771
2772 state = anv_state_pool_alloc(&device->dynamic_state_pool, dwords * 4, alignment);
2773 memcpy(state.map, a, dwords * 4);
2774
2775 return state;
2776 }
2777
2778 static struct anv_state
2779 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
2780 uint32_t *a, uint32_t *b, uint32_t dwords, uint32_t alignment)
2781 {
2782 struct anv_device *device = cmd_buffer->device;
2783 struct anv_state state;
2784 uint32_t *p;
2785
2786 state = anv_state_pool_alloc(&device->dynamic_state_pool, dwords * 4, alignment);
2787 p = state.map;
2788 for (uint32_t i = 0; i < dwords; i++)
2789 p[i] = a[i] | b[i];
2790
2791 return state;
2792 }
2793
2794 static void
2795 anv_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
2796 {
2797 struct anv_pipeline *pipeline = cmd_buffer->pipeline;
2798 struct anv_bindings *bindings = cmd_buffer->bindings;
2799 uint32_t *p;
2800
2801 uint32_t vb_emit = cmd_buffer->vb_dirty & pipeline->vb_used;
2802 const uint32_t num_buffers = __builtin_popcount(vb_emit);
2803 const uint32_t num_dwords = 1 + num_buffers * 4;
2804
2805 if (vb_emit) {
2806 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
2807 GEN8_3DSTATE_VERTEX_BUFFERS);
2808 uint32_t vb, i = 0;
2809 for_each_bit(vb, vb_emit) {
2810 struct anv_buffer *buffer = bindings->vb[vb].buffer;
2811 uint32_t offset = bindings->vb[vb].offset;
2812
2813 struct GEN8_VERTEX_BUFFER_STATE state = {
2814 .VertexBufferIndex = vb,
2815 .MemoryObjectControlState = GEN8_MOCS,
2816 .AddressModifyEnable = true,
2817 .BufferPitch = pipeline->binding_stride[vb],
2818 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
2819 .BufferSize = buffer->size - offset
2820 };
2821
2822 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
2823 i++;
2824 }
2825 }
2826
2827 if (cmd_buffer->dirty & ANV_CMD_BUFFER_PIPELINE_DIRTY)
2828 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2829
2830 if (cmd_buffer->dirty & ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY)
2831 flush_descriptor_sets(cmd_buffer);
2832
2833 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_RS_DIRTY)) {
2834 anv_batch_emit_merge(&cmd_buffer->batch,
2835 cmd_buffer->rs_state->state_sf, pipeline->state_sf);
2836 anv_batch_emit_merge(&cmd_buffer->batch,
2837 cmd_buffer->rs_state->state_raster, pipeline->state_raster);
2838 }
2839
2840 if (cmd_buffer->ds_state &&
2841 (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_DS_DIRTY)))
2842 anv_batch_emit_merge(&cmd_buffer->batch,
2843 cmd_buffer->ds_state->state_wm_depth_stencil,
2844 pipeline->state_wm_depth_stencil);
2845
2846 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_CB_DIRTY | ANV_CMD_BUFFER_DS_DIRTY)) {
2847 struct anv_state state;
2848 if (cmd_buffer->ds_state == NULL)
2849 state = anv_cmd_buffer_emit_dynamic(cmd_buffer,
2850 cmd_buffer->cb_state->state_color_calc,
2851 GEN8_COLOR_CALC_STATE_length, 32);
2852 else if (cmd_buffer->cb_state == NULL)
2853 state = anv_cmd_buffer_emit_dynamic(cmd_buffer,
2854 cmd_buffer->ds_state->state_color_calc,
2855 GEN8_COLOR_CALC_STATE_length, 32);
2856 else
2857 state = anv_cmd_buffer_merge_dynamic(cmd_buffer,
2858 cmd_buffer->ds_state->state_color_calc,
2859 cmd_buffer->cb_state->state_color_calc,
2860 GEN8_COLOR_CALC_STATE_length, 32);
2861
2862 anv_batch_emit(&cmd_buffer->batch,
2863 GEN8_3DSTATE_CC_STATE_POINTERS,
2864 .ColorCalcStatePointer = state.offset,
2865 .ColorCalcStatePointerValid = true);
2866 }
2867
2868 cmd_buffer->vb_dirty &= ~vb_emit;
2869 cmd_buffer->dirty = 0;
2870 }
2871
2872 void anv_CmdDraw(
2873 VkCmdBuffer cmdBuffer,
2874 uint32_t firstVertex,
2875 uint32_t vertexCount,
2876 uint32_t firstInstance,
2877 uint32_t instanceCount)
2878 {
2879 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2880
2881 anv_cmd_buffer_flush_state(cmd_buffer);
2882
2883 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2884 .VertexAccessType = SEQUENTIAL,
2885 .VertexCountPerInstance = vertexCount,
2886 .StartVertexLocation = firstVertex,
2887 .InstanceCount = instanceCount,
2888 .StartInstanceLocation = firstInstance,
2889 .BaseVertexLocation = 0);
2890 }
2891
2892 void anv_CmdDrawIndexed(
2893 VkCmdBuffer cmdBuffer,
2894 uint32_t firstIndex,
2895 uint32_t indexCount,
2896 int32_t vertexOffset,
2897 uint32_t firstInstance,
2898 uint32_t instanceCount)
2899 {
2900 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2901
2902 anv_cmd_buffer_flush_state(cmd_buffer);
2903
2904 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2905 .VertexAccessType = RANDOM,
2906 .VertexCountPerInstance = indexCount,
2907 .StartVertexLocation = firstIndex,
2908 .InstanceCount = instanceCount,
2909 .StartInstanceLocation = firstInstance,
2910 .BaseVertexLocation = 0);
2911 }
2912
2913 static void
2914 anv_batch_lrm(struct anv_batch *batch,
2915 uint32_t reg, struct anv_bo *bo, uint32_t offset)
2916 {
2917 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
2918 .RegisterAddress = reg,
2919 .MemoryAddress = { bo, offset });
2920 }
2921
2922 static void
2923 anv_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
2924 {
2925 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM,
2926 .RegisterOffset = reg,
2927 .DataDWord = imm);
2928 }
2929
2930 /* Auto-Draw / Indirect Registers */
2931 #define GEN7_3DPRIM_END_OFFSET 0x2420
2932 #define GEN7_3DPRIM_START_VERTEX 0x2430
2933 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2934 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2935 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2936 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2937
2938 void anv_CmdDrawIndirect(
2939 VkCmdBuffer cmdBuffer,
2940 VkBuffer _buffer,
2941 VkDeviceSize offset,
2942 uint32_t count,
2943 uint32_t stride)
2944 {
2945 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2946 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2947 struct anv_bo *bo = buffer->bo;
2948 uint32_t bo_offset = buffer->offset + offset;
2949
2950 anv_cmd_buffer_flush_state(cmd_buffer);
2951
2952 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2953 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2954 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2955 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
2956 anv_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
2957
2958 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2959 .IndirectParameterEnable = true,
2960 .VertexAccessType = SEQUENTIAL);
2961 }
2962
2963 void anv_CmdDrawIndexedIndirect(
2964 VkCmdBuffer cmdBuffer,
2965 VkBuffer _buffer,
2966 VkDeviceSize offset,
2967 uint32_t count,
2968 uint32_t stride)
2969 {
2970 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2971 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2972 struct anv_bo *bo = buffer->bo;
2973 uint32_t bo_offset = buffer->offset + offset;
2974
2975 anv_cmd_buffer_flush_state(cmd_buffer);
2976
2977 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2978 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2979 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2980 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
2981 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
2982
2983 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2984 .IndirectParameterEnable = true,
2985 .VertexAccessType = RANDOM);
2986 }
2987
2988 void anv_CmdDispatch(
2989 VkCmdBuffer cmdBuffer,
2990 uint32_t x,
2991 uint32_t y,
2992 uint32_t z)
2993 {
2994 stub();
2995 }
2996
2997 void anv_CmdDispatchIndirect(
2998 VkCmdBuffer cmdBuffer,
2999 VkBuffer buffer,
3000 VkDeviceSize offset)
3001 {
3002 stub();
3003 }
3004
3005 void anv_CmdSetEvent(
3006 VkCmdBuffer cmdBuffer,
3007 VkEvent event,
3008 VkPipeEvent pipeEvent)
3009 {
3010 stub();
3011 }
3012
3013 void anv_CmdResetEvent(
3014 VkCmdBuffer cmdBuffer,
3015 VkEvent event,
3016 VkPipeEvent pipeEvent)
3017 {
3018 stub();
3019 }
3020
3021 void anv_CmdWaitEvents(
3022 VkCmdBuffer cmdBuffer,
3023 VkWaitEvent waitEvent,
3024 uint32_t eventCount,
3025 const VkEvent* pEvents,
3026 uint32_t memBarrierCount,
3027 const void** ppMemBarriers)
3028 {
3029 stub();
3030 }
3031
3032 void anv_CmdPipelineBarrier(
3033 VkCmdBuffer cmdBuffer,
3034 VkWaitEvent waitEvent,
3035 uint32_t pipeEventCount,
3036 const VkPipeEvent* pPipeEvents,
3037 uint32_t memBarrierCount,
3038 const void** ppMemBarriers)
3039 {
3040 stub();
3041 }
3042
3043 static void
3044 anv_batch_emit_ps_depth_count(struct anv_batch *batch,
3045 struct anv_bo *bo, uint32_t offset)
3046 {
3047 anv_batch_emit(batch, GEN8_PIPE_CONTROL,
3048 .DestinationAddressType = DAT_PPGTT,
3049 .PostSyncOperation = WritePSDepthCount,
3050 .Address = { bo, offset }); /* FIXME: This is only lower 32 bits */
3051 }
3052
3053 void anv_CmdBeginQuery(
3054 VkCmdBuffer cmdBuffer,
3055 VkQueryPool queryPool,
3056 uint32_t slot,
3057 VkQueryControlFlags flags)
3058 {
3059 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3060 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
3061
3062 switch (pool->type) {
3063 case VK_QUERY_TYPE_OCCLUSION:
3064 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
3065 slot * sizeof(struct anv_query_pool_slot));
3066 break;
3067
3068 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
3069 default:
3070 unreachable("");
3071 }
3072 }
3073
3074 void anv_CmdEndQuery(
3075 VkCmdBuffer cmdBuffer,
3076 VkQueryPool queryPool,
3077 uint32_t slot)
3078 {
3079 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3080 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
3081
3082 switch (pool->type) {
3083 case VK_QUERY_TYPE_OCCLUSION:
3084 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
3085 slot * sizeof(struct anv_query_pool_slot) + 8);
3086 break;
3087
3088 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
3089 default:
3090 unreachable("");
3091 }
3092 }
3093
3094 void anv_CmdResetQueryPool(
3095 VkCmdBuffer cmdBuffer,
3096 VkQueryPool queryPool,
3097 uint32_t startQuery,
3098 uint32_t queryCount)
3099 {
3100 stub();
3101 }
3102
3103 #define TIMESTAMP 0x2358
3104
3105 void anv_CmdWriteTimestamp(
3106 VkCmdBuffer cmdBuffer,
3107 VkTimestampType timestampType,
3108 VkBuffer destBuffer,
3109 VkDeviceSize destOffset)
3110 {
3111 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3112 struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
3113 struct anv_bo *bo = buffer->bo;
3114
3115 switch (timestampType) {
3116 case VK_TIMESTAMP_TYPE_TOP:
3117 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
3118 .RegisterAddress = TIMESTAMP,
3119 .MemoryAddress = { bo, buffer->offset + destOffset });
3120 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
3121 .RegisterAddress = TIMESTAMP + 4,
3122 .MemoryAddress = { bo, buffer->offset + destOffset + 4 });
3123 break;
3124
3125 case VK_TIMESTAMP_TYPE_BOTTOM:
3126 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
3127 .DestinationAddressType = DAT_PPGTT,
3128 .PostSyncOperation = WriteTimestamp,
3129 .Address = /* FIXME: This is only lower 32 bits */
3130 { bo, buffer->offset + destOffset });
3131 break;
3132
3133 default:
3134 break;
3135 }
3136 }
3137
3138 #define alu_opcode(v) __gen_field((v), 20, 31)
3139 #define alu_operand1(v) __gen_field((v), 10, 19)
3140 #define alu_operand2(v) __gen_field((v), 0, 9)
3141 #define alu(opcode, operand1, operand2) \
3142 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
3143
3144 #define OPCODE_NOOP 0x000
3145 #define OPCODE_LOAD 0x080
3146 #define OPCODE_LOADINV 0x480
3147 #define OPCODE_LOAD0 0x081
3148 #define OPCODE_LOAD1 0x481
3149 #define OPCODE_ADD 0x100
3150 #define OPCODE_SUB 0x101
3151 #define OPCODE_AND 0x102
3152 #define OPCODE_OR 0x103
3153 #define OPCODE_XOR 0x104
3154 #define OPCODE_STORE 0x180
3155 #define OPCODE_STOREINV 0x580
3156
3157 #define OPERAND_R0 0x00
3158 #define OPERAND_R1 0x01
3159 #define OPERAND_R2 0x02
3160 #define OPERAND_R3 0x03
3161 #define OPERAND_R4 0x04
3162 #define OPERAND_SRCA 0x20
3163 #define OPERAND_SRCB 0x21
3164 #define OPERAND_ACCU 0x31
3165 #define OPERAND_ZF 0x32
3166 #define OPERAND_CF 0x33
3167
3168 #define CS_GPR(n) (0x2600 + (n) * 8)
3169
3170 static void
3171 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
3172 struct anv_bo *bo, uint32_t offset)
3173 {
3174 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
3175 .RegisterAddress = reg,
3176 .MemoryAddress = { bo, offset });
3177 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
3178 .RegisterAddress = reg + 4,
3179 .MemoryAddress = { bo, offset + 4 });
3180 }
3181
3182 void anv_CmdCopyQueryPoolResults(
3183 VkCmdBuffer cmdBuffer,
3184 VkQueryPool queryPool,
3185 uint32_t startQuery,
3186 uint32_t queryCount,
3187 VkBuffer destBuffer,
3188 VkDeviceSize destOffset,
3189 VkDeviceSize destStride,
3190 VkQueryResultFlags flags)
3191 {
3192 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3193 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
3194 struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
3195 uint32_t slot_offset, dst_offset;
3196
3197 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
3198 /* Where is the availabilty info supposed to go? */
3199 anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
3200 return;
3201 }
3202
3203 assert(pool->type == VK_QUERY_TYPE_OCCLUSION);
3204
3205 /* FIXME: If we're not waiting, should we just do this on the CPU? */
3206 if (flags & VK_QUERY_RESULT_WAIT_BIT)
3207 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
3208 .CommandStreamerStallEnable = true);
3209
3210 dst_offset = buffer->offset + destOffset;
3211 for (uint32_t i = 0; i < queryCount; i++) {
3212
3213 slot_offset = (startQuery + i) * sizeof(struct anv_query_pool_slot);
3214
3215 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0), &pool->bo, slot_offset);
3216 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(1), &pool->bo, slot_offset + 8);
3217
3218 /* FIXME: We need to clamp the result for 32 bit. */
3219
3220 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GEN8_MI_MATH);
3221 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
3222 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
3223 dw[3] = alu(OPCODE_SUB, 0, 0);
3224 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
3225
3226 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
3227 .RegisterAddress = CS_GPR(2),
3228 /* FIXME: This is only lower 32 bits */
3229 .MemoryAddress = { buffer->bo, dst_offset });
3230
3231 if (flags & VK_QUERY_RESULT_64_BIT)
3232 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
3233 .RegisterAddress = CS_GPR(2) + 4,
3234 /* FIXME: This is only lower 32 bits */
3235 .MemoryAddress = { buffer->bo, dst_offset + 4 });
3236
3237 dst_offset += destStride;
3238 }
3239 }
3240
3241 void anv_CmdInitAtomicCounters(
3242 VkCmdBuffer cmdBuffer,
3243 VkPipelineBindPoint pipelineBindPoint,
3244 uint32_t startCounter,
3245 uint32_t counterCount,
3246 const uint32_t* pData)
3247 {
3248 stub();
3249 }
3250
3251 void anv_CmdLoadAtomicCounters(
3252 VkCmdBuffer cmdBuffer,
3253 VkPipelineBindPoint pipelineBindPoint,
3254 uint32_t startCounter,
3255 uint32_t counterCount,
3256 VkBuffer srcBuffer,
3257 VkDeviceSize srcOffset)
3258 {
3259 stub();
3260 }
3261
3262 void anv_CmdSaveAtomicCounters(
3263 VkCmdBuffer cmdBuffer,
3264 VkPipelineBindPoint pipelineBindPoint,
3265 uint32_t startCounter,
3266 uint32_t counterCount,
3267 VkBuffer destBuffer,
3268 VkDeviceSize destOffset)
3269 {
3270 stub();
3271 }
3272
3273 static void
3274 anv_framebuffer_destroy(struct anv_device *device,
3275 struct anv_object *object,
3276 VkObjectType obj_type)
3277 {
3278 struct anv_framebuffer *fb = (struct anv_framebuffer *)object;
3279
3280 assert(obj_type == VK_OBJECT_TYPE_FRAMEBUFFER);
3281
3282 anv_DestroyObject((VkDevice) device,
3283 VK_OBJECT_TYPE_DYNAMIC_VP_STATE,
3284 fb->vp_state);
3285
3286 anv_device_free(device, fb);
3287 }
3288
3289 VkResult anv_CreateFramebuffer(
3290 VkDevice _device,
3291 const VkFramebufferCreateInfo* pCreateInfo,
3292 VkFramebuffer* pFramebuffer)
3293 {
3294 struct anv_device *device = (struct anv_device *) _device;
3295 struct anv_framebuffer *framebuffer;
3296
3297 static const struct anv_depth_stencil_view null_view =
3298 { .depth_format = D16_UNORM, .depth_stride = 0, .stencil_stride = 0 };
3299
3300 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
3301
3302 framebuffer = anv_device_alloc(device, sizeof(*framebuffer), 8,
3303 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
3304 if (framebuffer == NULL)
3305 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3306
3307 framebuffer->base.destructor = anv_framebuffer_destroy;
3308
3309 framebuffer->color_attachment_count = pCreateInfo->colorAttachmentCount;
3310 for (uint32_t i = 0; i < pCreateInfo->colorAttachmentCount; i++) {
3311 framebuffer->color_attachments[i] =
3312 (struct anv_surface_view *) pCreateInfo->pColorAttachments[i].view;
3313 }
3314
3315 if (pCreateInfo->pDepthStencilAttachment) {
3316 framebuffer->depth_stencil =
3317 (struct anv_depth_stencil_view *) pCreateInfo->pDepthStencilAttachment->view;
3318 } else {
3319 framebuffer->depth_stencil = &null_view;
3320 }
3321
3322 framebuffer->sample_count = pCreateInfo->sampleCount;
3323 framebuffer->width = pCreateInfo->width;
3324 framebuffer->height = pCreateInfo->height;
3325 framebuffer->layers = pCreateInfo->layers;
3326
3327 vkCreateDynamicViewportState((VkDevice) device,
3328 &(VkDynamicVpStateCreateInfo) {
3329 .sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO,
3330 .viewportAndScissorCount = 1,
3331 .pViewports = (VkViewport[]) {
3332 {
3333 .originX = 0,
3334 .originY = 0,
3335 .width = pCreateInfo->width,
3336 .height = pCreateInfo->height,
3337 .minDepth = 0,
3338 .maxDepth = 1
3339 },
3340 },
3341 .pScissors = (VkRect[]) {
3342 { { 0, 0 },
3343 { pCreateInfo->width, pCreateInfo->height } },
3344 }
3345 },
3346 &framebuffer->vp_state);
3347
3348 *pFramebuffer = (VkFramebuffer) framebuffer;
3349
3350 return VK_SUCCESS;
3351 }
3352
3353 VkResult anv_CreateRenderPass(
3354 VkDevice _device,
3355 const VkRenderPassCreateInfo* pCreateInfo,
3356 VkRenderPass* pRenderPass)
3357 {
3358 struct anv_device *device = (struct anv_device *) _device;
3359 struct anv_render_pass *pass;
3360 size_t size;
3361
3362 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
3363
3364 size = sizeof(*pass) +
3365 pCreateInfo->layers * sizeof(struct anv_render_pass_layer);
3366 pass = anv_device_alloc(device, size, 8,
3367 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
3368 if (pass == NULL)
3369 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
3370
3371 pass->render_area = pCreateInfo->renderArea;
3372
3373 pass->num_layers = pCreateInfo->layers;
3374
3375 pass->num_clear_layers = 0;
3376 for (uint32_t i = 0; i < pCreateInfo->layers; i++) {
3377 pass->layers[i].color_load_op = pCreateInfo->pColorLoadOps[i];
3378 pass->layers[i].clear_color = pCreateInfo->pColorLoadClearValues[i];
3379 if (pass->layers[i].color_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
3380 pass->num_clear_layers++;
3381 }
3382
3383 *pRenderPass = (VkRenderPass) pass;
3384
3385 return VK_SUCCESS;
3386 }
3387
3388 void
3389 anv_cmd_buffer_fill_render_targets(struct anv_cmd_buffer *cmd_buffer)
3390 {
3391 struct anv_framebuffer *framebuffer = cmd_buffer->framebuffer;
3392 struct anv_bindings *bindings = cmd_buffer->bindings;
3393
3394 for (uint32_t i = 0; i < framebuffer->color_attachment_count; i++) {
3395 const struct anv_surface_view *view = framebuffer->color_attachments[i];
3396
3397 struct anv_state state =
3398 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
3399 memcpy(state.map, view->surface_state.map, 64);
3400
3401 /* The address goes in dwords 8 and 9 of the SURFACE_STATE */
3402 *(uint64_t *)(state.map + 8 * 4) =
3403 anv_reloc_list_add(&cmd_buffer->surface_relocs,
3404 cmd_buffer->device,
3405 state.offset + 8 * 4,
3406 view->bo, view->offset);
3407
3408 bindings->descriptors[VK_SHADER_STAGE_FRAGMENT].surfaces[i] = state.offset;
3409 }
3410 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
3411 }
3412
3413 static void
3414 anv_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
3415 struct anv_render_pass *pass)
3416 {
3417 const struct anv_depth_stencil_view *view =
3418 cmd_buffer->framebuffer->depth_stencil;
3419
3420 /* FIXME: Implement the PMA stall W/A */
3421
3422 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER,
3423 .SurfaceType = SURFTYPE_2D,
3424 .DepthWriteEnable = view->depth_stride > 0,
3425 .StencilWriteEnable = view->stencil_stride > 0,
3426 .HierarchicalDepthBufferEnable = false,
3427 .SurfaceFormat = view->depth_format,
3428 .SurfacePitch = view->depth_stride > 0 ? view->depth_stride - 1 : 0,
3429 .SurfaceBaseAddress = { view->bo, view->depth_offset },
3430 .Height = pass->render_area.extent.height - 1,
3431 .Width = pass->render_area.extent.width - 1,
3432 .LOD = 0,
3433 .Depth = 1 - 1,
3434 .MinimumArrayElement = 0,
3435 .DepthBufferObjectControlState = GEN8_MOCS,
3436 .RenderTargetViewExtent = 1 - 1,
3437 .SurfaceQPitch = 0);
3438
3439 /* Disable hierarchial depth buffers. */
3440 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HIER_DEPTH_BUFFER);
3441
3442 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STENCIL_BUFFER,
3443 .StencilBufferEnable = view->stencil_stride > 0,
3444 .StencilBufferObjectControlState = GEN8_MOCS,
3445 .SurfacePitch = view->stencil_stride > 0 ? view->stencil_stride - 1 : 0,
3446 .SurfaceBaseAddress = { view->bo, view->stencil_offset },
3447 .SurfaceQPitch = 0);
3448
3449 /* Clear the clear params. */
3450 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_CLEAR_PARAMS);
3451 }
3452
3453 void anv_CmdBeginRenderPass(
3454 VkCmdBuffer cmdBuffer,
3455 const VkRenderPassBegin* pRenderPassBegin)
3456 {
3457 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
3458 struct anv_render_pass *pass = (struct anv_render_pass *) pRenderPassBegin->renderPass;
3459 struct anv_framebuffer *framebuffer =
3460 (struct anv_framebuffer *) pRenderPassBegin->framebuffer;
3461
3462 cmd_buffer->framebuffer = framebuffer;
3463
3464 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE,
3465 .ClippedDrawingRectangleYMin = pass->render_area.offset.y,
3466 .ClippedDrawingRectangleXMin = pass->render_area.offset.x,
3467 .ClippedDrawingRectangleYMax =
3468 pass->render_area.offset.y + pass->render_area.extent.height - 1,
3469 .ClippedDrawingRectangleXMax =
3470 pass->render_area.offset.x + pass->render_area.extent.width - 1,
3471 .DrawingRectangleOriginY = 0,
3472 .DrawingRectangleOriginX = 0);
3473
3474 anv_cmd_buffer_fill_render_targets(cmd_buffer);
3475
3476 anv_cmd_buffer_emit_depth_stencil(cmd_buffer, pass);
3477
3478 anv_cmd_buffer_clear(cmd_buffer, pass);
3479 }
3480
3481 void anv_CmdEndRenderPass(
3482 VkCmdBuffer cmdBuffer,
3483 VkRenderPass renderPass)
3484 {
3485 /* Emit a flushing pipe control at the end of a pass. This is kind of a
3486 * hack but it ensures that render targets always actually get written.
3487 * Eventually, we should do flushing based on image format transitions
3488 * or something of that nature.
3489 */
3490 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *)cmdBuffer;
3491 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
3492 .PostSyncOperation = NoWrite,
3493 .RenderTargetCacheFlushEnable = true,
3494 .InstructionCacheInvalidateEnable = true,
3495 .DepthCacheFlushEnable = true,
3496 .VFCacheInvalidationEnable = true,
3497 .TextureCacheInvalidationEnable = true,
3498 .CommandStreamerStallEnable = true);
3499 }
3500
3501 void vkCmdDbgMarkerBegin(
3502 VkCmdBuffer cmdBuffer,
3503 const char* pMarker)
3504 __attribute__ ((visibility ("default")));
3505
3506 void vkCmdDbgMarkerEnd(
3507 VkCmdBuffer cmdBuffer)
3508 __attribute__ ((visibility ("default")));
3509
3510 VkResult vkDbgSetObjectTag(
3511 VkDevice device,
3512 VkObject object,
3513 size_t tagSize,
3514 const void* pTag)
3515 __attribute__ ((visibility ("default")));
3516
3517
3518 void vkCmdDbgMarkerBegin(
3519 VkCmdBuffer cmdBuffer,
3520 const char* pMarker)
3521 {
3522 }
3523
3524 void vkCmdDbgMarkerEnd(
3525 VkCmdBuffer cmdBuffer)
3526 {
3527 }
3528
3529 VkResult vkDbgSetObjectTag(
3530 VkDevice device,
3531 VkObject object,
3532 size_t tagSize,
3533 const void* pTag)
3534 {
3535 return VK_SUCCESS;
3536 }