Add vulkan driver for BDW
[mesa.git] / src / vulkan / device.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "private.h"
31
32 static int
33 anv_env_get_int(const char *name)
34 {
35 const char *val = getenv(name);
36
37 if (!val)
38 return 0;
39
40 return strtol(val, NULL, 0);
41 }
42
43 static VkResult
44 fill_physical_device(struct anv_physical_device *device,
45 struct anv_instance *instance,
46 const char *path)
47 {
48 int fd;
49
50 fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
51 if (fd < 0)
52 return vk_error(VK_ERROR_UNAVAILABLE);
53
54 device->instance = instance;
55 device->path = path;
56
57 device->chipset_id = anv_env_get_int("INTEL_DEVID_OVERRIDE");
58 device->no_hw = false;
59 if (device->chipset_id) {
60 /* INTEL_DEVID_OVERRIDE implies INTEL_NO_HW. */
61 device->no_hw = true;
62 } else {
63 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
64 }
65 if (!device->chipset_id)
66 goto fail;
67
68 device->name = brw_get_device_name(device->chipset_id);
69 device->info = brw_get_device_info(device->chipset_id, -1);
70 if (!device->info)
71 goto fail;
72
73 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT))
74 goto fail;
75
76 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2))
77 goto fail;
78
79 if (!anv_gem_get_param(fd, I915_PARAM_HAS_LLC))
80 goto fail;
81
82 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CONSTANTS))
83 goto fail;
84
85 close(fd);
86
87 return VK_SUCCESS;
88
89 fail:
90 close(fd);
91
92 return vk_error(VK_ERROR_UNAVAILABLE);
93 }
94
95 static void *default_alloc(
96 void* pUserData,
97 size_t size,
98 size_t alignment,
99 VkSystemAllocType allocType)
100 {
101 return malloc(size);
102 }
103
104 static void default_free(
105 void* pUserData,
106 void* pMem)
107 {
108 free(pMem);
109 }
110
111 static const VkAllocCallbacks default_alloc_callbacks = {
112 .pUserData = NULL,
113 .pfnAlloc = default_alloc,
114 .pfnFree = default_free
115 };
116
117 VkResult VKAPI vkCreateInstance(
118 const VkInstanceCreateInfo* pCreateInfo,
119 VkInstance* pInstance)
120 {
121 struct anv_instance *instance;
122 const VkAllocCallbacks *alloc_callbacks = &default_alloc_callbacks;
123 void *user_data = NULL;
124 VkResult result;
125
126 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
127
128 if (pCreateInfo->pAllocCb) {
129 alloc_callbacks = pCreateInfo->pAllocCb;
130 user_data = pCreateInfo->pAllocCb->pUserData;
131 }
132 instance = alloc_callbacks->pfnAlloc(user_data, sizeof(*instance), 8,
133 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
134 if (!instance)
135 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
136
137 instance->pAllocUserData = alloc_callbacks->pUserData;
138 instance->pfnAlloc = alloc_callbacks->pfnAlloc;
139 instance->pfnFree = alloc_callbacks->pfnFree;
140 instance->apiVersion = pCreateInfo->pAppInfo->apiVersion;
141
142 instance->physicalDeviceCount = 0;
143 result = fill_physical_device(&instance->physicalDevice,
144 instance, "/dev/dri/renderD128");
145 if (result == VK_SUCCESS)
146 instance->physicalDeviceCount++;
147
148 *pInstance = (VkInstance) instance;
149
150 return VK_SUCCESS;
151 }
152
153 VkResult VKAPI vkDestroyInstance(
154 VkInstance _instance)
155 {
156 struct anv_instance *instance = (struct anv_instance *) _instance;
157
158 instance->pfnFree(instance->pAllocUserData, instance);
159
160 return VK_SUCCESS;
161 }
162
163 VkResult VKAPI vkEnumeratePhysicalDevices(
164 VkInstance _instance,
165 uint32_t* pPhysicalDeviceCount,
166 VkPhysicalDevice* pPhysicalDevices)
167 {
168 struct anv_instance *instance = (struct anv_instance *) _instance;
169
170 if (*pPhysicalDeviceCount >= 1)
171 pPhysicalDevices[0] = (VkPhysicalDevice) &instance->physicalDevice;
172 *pPhysicalDeviceCount = instance->physicalDeviceCount;
173
174 return VK_SUCCESS;
175 }
176
177 VkResult VKAPI vkGetPhysicalDeviceInfo(
178 VkPhysicalDevice physicalDevice,
179 VkPhysicalDeviceInfoType infoType,
180 size_t* pDataSize,
181 void* pData)
182 {
183 struct anv_physical_device *device = (struct anv_physical_device *) physicalDevice;
184 VkPhysicalDeviceProperties *properties;
185 VkPhysicalDevicePerformance *performance;
186 VkPhysicalDeviceQueueProperties *queue_properties;
187 VkPhysicalDeviceMemoryProperties *memory_properties;
188 uint64_t ns_per_tick = 80;
189
190 switch (infoType) {
191 case VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES:
192 properties = pData;
193 assert(*pDataSize >= sizeof(*properties));
194 *pDataSize = sizeof(*properties); /* Assuming we have to return the size of our struct. */
195
196 properties->apiVersion = 1;
197 properties->driverVersion = 1;
198 properties->vendorId = 0x8086;
199 properties->deviceId = device->chipset_id;
200 properties->deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
201 strcpy(properties->deviceName, device->name);
202 properties->maxInlineMemoryUpdateSize = 0;
203 properties->maxBoundDescriptorSets = 0;
204 properties->maxThreadGroupSize = 0;
205 properties->timestampFrequency = 1000 * 1000 * 1000 / ns_per_tick;
206 properties->multiColorAttachmentClears = 0;
207 properties->maxDescriptorSets = 2;
208 properties->maxViewports = 16;
209 properties->maxColorAttachments = 8;
210 return VK_SUCCESS;
211
212 case VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE:
213 performance = pData;
214 assert(*pDataSize >= sizeof(*performance));
215 *pDataSize = sizeof(*performance); /* Assuming we have to return the size of our struct. */
216
217 performance->maxDeviceClock = 1.0;
218 performance->aluPerClock = 1.0;
219 performance->texPerClock = 1.0;
220 performance->primsPerClock = 1.0;
221 performance->pixelsPerClock = 1.0;
222 return VK_SUCCESS;
223
224 case VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES:
225 queue_properties = pData;
226 assert(*pDataSize >= sizeof(*queue_properties));
227 *pDataSize = sizeof(*queue_properties);
228
229 queue_properties->queueFlags = 0;
230 queue_properties->queueCount = 1;
231 queue_properties->maxAtomicCounters = 0;
232 queue_properties->supportsTimestamps = 0;
233 queue_properties->maxMemReferences = 0;
234 return VK_SUCCESS;
235
236 case VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES:
237 memory_properties = pData;
238 assert(*pDataSize >= sizeof(*memory_properties));
239 *pDataSize = sizeof(*memory_properties);
240
241 memory_properties->supportsMigration = false;
242 memory_properties->supportsPinning = false;
243 return VK_SUCCESS;
244
245 default:
246 return VK_UNSUPPORTED;
247 }
248
249 }
250
251 void * vkGetProcAddr(
252 VkPhysicalDevice physicalDevice,
253 const char* pName)
254 {
255 return NULL;
256 }
257
258 static void
259 parse_debug_flags(struct anv_device *device)
260 {
261 const char *debug, *p, *end;
262
263 debug = getenv("INTEL_DEBUG");
264 device->dump_aub = false;
265 if (debug) {
266 for (p = debug; *p; p = end + 1) {
267 end = strchrnul(p, ',');
268 if (end - p == 3 && memcmp(p, "aub", 3) == 0)
269 device->dump_aub = true;
270 if (end - p == 5 && memcmp(p, "no_hw", 5) == 0)
271 device->no_hw = true;
272 if (*end == '\0')
273 break;
274 }
275 }
276 }
277
278 VkResult VKAPI vkCreateDevice(
279 VkPhysicalDevice _physicalDevice,
280 const VkDeviceCreateInfo* pCreateInfo,
281 VkDevice* pDevice)
282 {
283 struct anv_physical_device *physicalDevice =
284 (struct anv_physical_device *) _physicalDevice;
285 struct anv_instance *instance = physicalDevice->instance;
286 struct anv_device *device;
287
288 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
289
290 device = instance->pfnAlloc(instance->pAllocUserData,
291 sizeof(*device), 8,
292 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
293 if (!device)
294 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
295
296 device->no_hw = physicalDevice->no_hw;
297 parse_debug_flags(device);
298
299 device->instance = physicalDevice->instance;
300 device->fd = open("/dev/dri/renderD128", O_RDWR | O_CLOEXEC);
301 if (device->fd == -1)
302 goto fail_device;
303
304 device->context_id = anv_gem_create_context(device);
305 if (device->context_id == -1)
306 goto fail_fd;
307
308 anv_block_pool_init(&device->dyn_state_block_pool, device, 2048);
309
310 anv_state_pool_init(&device->dyn_state_pool,
311 &device->dyn_state_block_pool);
312
313 anv_block_pool_init(&device->instruction_block_pool, device, 2048);
314 anv_block_pool_init(&device->surface_state_block_pool, device, 2048);
315
316 anv_state_pool_init(&device->surface_state_pool,
317 &device->surface_state_block_pool);
318
319 device->compiler = anv_compiler_create(device->fd);
320 device->aub_writer = NULL;
321
322 device->info = *physicalDevice->info;
323
324 pthread_mutex_init(&device->mutex, NULL);
325
326 *pDevice = (VkDevice) device;
327
328 return VK_SUCCESS;
329
330 fail_fd:
331 close(device->fd);
332 fail_device:
333 anv_device_free(device, device);
334
335 return vk_error(VK_ERROR_UNAVAILABLE);
336 }
337
338 VkResult VKAPI vkDestroyDevice(
339 VkDevice _device)
340 {
341 struct anv_device *device = (struct anv_device *) _device;
342
343 anv_compiler_destroy(device->compiler);
344
345 anv_block_pool_finish(&device->dyn_state_block_pool);
346 anv_block_pool_finish(&device->instruction_block_pool);
347 anv_block_pool_finish(&device->surface_state_block_pool);
348
349 close(device->fd);
350
351 if (device->aub_writer)
352 anv_aub_writer_destroy(device->aub_writer);
353
354 anv_device_free(device, device);
355
356 return VK_SUCCESS;
357 }
358
359 VkResult VKAPI vkGetGlobalExtensionInfo(
360 VkExtensionInfoType infoType,
361 uint32_t extensionIndex,
362 size_t* pDataSize,
363 void* pData)
364 {
365 uint32_t *count;
366
367 switch (infoType) {
368 case VK_EXTENSION_INFO_TYPE_COUNT:
369 count = pData;
370 assert(*pDataSize == 4);
371 *count = 0;
372 return VK_SUCCESS;
373
374 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
375 return vk_error(VK_ERROR_INVALID_EXTENSION);
376
377 default:
378 return VK_UNSUPPORTED;
379 }
380 }
381
382 VkResult VKAPI vkGetPhysicalDeviceExtensionInfo(
383 VkPhysicalDevice physicalDevice,
384 VkExtensionInfoType infoType,
385 uint32_t extensionIndex,
386 size_t* pDataSize,
387 void* pData)
388 {
389 uint32_t *count;
390
391 switch (infoType) {
392 case VK_EXTENSION_INFO_TYPE_COUNT:
393 count = pData;
394 assert(*pDataSize == 4);
395 *count = 0;
396 return VK_SUCCESS;
397
398 case VK_EXTENSION_INFO_TYPE_PROPERTIES:
399 return vk_error(VK_ERROR_INVALID_EXTENSION);
400
401 default:
402 return VK_UNSUPPORTED;
403 }
404 }
405
406 VkResult VKAPI vkEnumerateLayers(
407 VkPhysicalDevice physicalDevice,
408 size_t maxStringSize,
409 size_t* pLayerCount,
410 char* const* pOutLayers,
411 void* pReserved)
412 {
413 *pLayerCount = 0;
414
415 return VK_SUCCESS;
416 }
417
418 VkResult VKAPI vkGetDeviceQueue(
419 VkDevice _device,
420 uint32_t queueNodeIndex,
421 uint32_t queueIndex,
422 VkQueue* pQueue)
423 {
424 struct anv_device *device = (struct anv_device *) _device;
425 struct anv_queue *queue;
426
427 /* FIXME: Should allocate these at device create time. */
428
429 queue = anv_device_alloc(device, sizeof(*queue), 8,
430 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
431 if (queue == NULL)
432 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
433
434 queue->device = device;
435 queue->pool = &device->surface_state_pool;
436
437 queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
438 *(uint32_t *)queue->completed_serial.map = 0;
439 queue->next_serial = 1;
440
441 *pQueue = (VkQueue) queue;
442
443 return VK_SUCCESS;
444 }
445
446 static const uint32_t BATCH_SIZE = 8192;
447
448 VkResult
449 anv_batch_init(struct anv_batch *batch, struct anv_device *device)
450 {
451 VkResult result;
452
453 result = anv_bo_init_new(&batch->bo, device, BATCH_SIZE);
454 if (result != VK_SUCCESS)
455 return result;
456
457 batch->bo.map =
458 anv_gem_mmap(device, batch->bo.gem_handle, 0, BATCH_SIZE);
459 if (batch->bo.map == NULL) {
460 anv_gem_close(device, batch->bo.gem_handle);
461 return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
462 }
463
464 batch->cmd_relocs.num_relocs = 0;
465 batch->surf_relocs.num_relocs = 0;
466 batch->next = batch->bo.map;
467
468 return VK_SUCCESS;
469 }
470
471 void
472 anv_batch_finish(struct anv_batch *batch, struct anv_device *device)
473 {
474 anv_gem_munmap(batch->bo.map, BATCH_SIZE);
475 anv_gem_close(device, batch->bo.gem_handle);
476 }
477
478 void
479 anv_batch_reset(struct anv_batch *batch)
480 {
481 batch->next = batch->bo.map;
482 batch->cmd_relocs.num_relocs = 0;
483 batch->surf_relocs.num_relocs = 0;
484 }
485
486 void *
487 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
488 {
489 void *p = batch->next;
490
491 batch->next += num_dwords * 4;
492
493 return p;
494 }
495
496 static void
497 anv_reloc_list_append(struct anv_reloc_list *list,
498 struct anv_reloc_list *other, uint32_t offset)
499 {
500 uint32_t i, count;
501
502 count = list->num_relocs;
503 memcpy(&list->relocs[count], &other->relocs[0],
504 other->num_relocs * sizeof(other->relocs[0]));
505 memcpy(&list->reloc_bos[count], &other->reloc_bos[0],
506 other->num_relocs * sizeof(other->reloc_bos[0]));
507 for (i = 0; i < other->num_relocs; i++)
508 list->relocs[i + count].offset += offset;
509
510 count += other->num_relocs;
511 }
512
513 static uint64_t
514 anv_reloc_list_add(struct anv_reloc_list *list,
515 uint32_t offset,
516 struct anv_bo *target_bo, uint32_t delta)
517 {
518 struct drm_i915_gem_relocation_entry *entry;
519 int index;
520
521 assert(list->num_relocs < ANV_BATCH_MAX_RELOCS);
522
523 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
524 index = list->num_relocs++;
525 list->reloc_bos[index] = target_bo;
526 entry = &list->relocs[index];
527 entry->target_handle = target_bo->gem_handle;
528 entry->delta = delta;
529 entry->offset = offset;
530 entry->presumed_offset = target_bo->offset;
531 entry->read_domains = 0;
532 entry->write_domain = 0;
533
534 return target_bo->offset + delta;
535 }
536
537 void
538 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
539 {
540 uint32_t size, offset;
541
542 size = other->next - other->bo.map;
543 memcpy(batch->next, other->bo.map, size);
544
545 offset = batch->next - batch->bo.map;
546 anv_reloc_list_append(&batch->cmd_relocs, &other->cmd_relocs, offset);
547 anv_reloc_list_append(&batch->surf_relocs, &other->surf_relocs, offset);
548
549 batch->next += size;
550 }
551
552 uint64_t
553 anv_batch_emit_reloc(struct anv_batch *batch,
554 void *location, struct anv_bo *bo, uint32_t delta)
555 {
556 return anv_reloc_list_add(&batch->cmd_relocs,
557 location - batch->bo.map, bo, delta);
558 }
559
560 VkResult VKAPI vkQueueSubmit(
561 VkQueue _queue,
562 uint32_t cmdBufferCount,
563 const VkCmdBuffer* pCmdBuffers,
564 VkFence fence)
565 {
566 struct anv_queue *queue = (struct anv_queue *) _queue;
567 struct anv_device *device = queue->device;
568 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) pCmdBuffers[0];
569 int ret;
570
571 assert(cmdBufferCount == 1);
572
573 if (device->dump_aub)
574 anv_cmd_buffer_dump(cmd_buffer);
575
576 if (!device->no_hw) {
577 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf);
578 if (ret != 0)
579 goto fail;
580
581 for (uint32_t i = 0; i < cmd_buffer->bo_count; i++)
582 cmd_buffer->exec2_bos[i]->offset = cmd_buffer->exec2_objects[i].offset;
583 } else {
584 *(uint32_t *)queue->completed_serial.map = cmd_buffer->serial;
585 }
586
587 return VK_SUCCESS;
588
589 fail:
590 pthread_mutex_unlock(&device->mutex);
591
592 return vk_error(VK_ERROR_UNKNOWN);
593 }
594
595 VkResult VKAPI vkQueueAddMemReferences(
596 VkQueue queue,
597 uint32_t count,
598 const VkDeviceMemory* pMems)
599 {
600 return VK_SUCCESS;
601 }
602
603 VkResult vkQueueRemoveMemReferences(
604 VkQueue queue,
605 uint32_t count,
606 const VkDeviceMemory* pMems)
607 {
608 return VK_SUCCESS;
609 }
610
611 VkResult VKAPI vkQueueWaitIdle(
612 VkQueue _queue)
613 {
614 struct anv_queue *queue = (struct anv_queue *) _queue;
615
616 return vkDeviceWaitIdle((VkDevice) queue->device);
617 }
618
619 VkResult VKAPI vkDeviceWaitIdle(
620 VkDevice _device)
621 {
622 struct anv_device *device = (struct anv_device *) _device;
623 struct anv_state state;
624 struct anv_batch batch;
625 struct drm_i915_gem_execbuffer2 execbuf;
626 struct drm_i915_gem_exec_object2 exec2_objects[1];
627 struct anv_bo *bo = NULL;
628 VkResult result;
629 int64_t timeout;
630 int ret;
631
632 state = anv_state_pool_alloc(&device->dyn_state_pool, 32, 32);
633 bo = &device->dyn_state_pool.block_pool->bo;
634 batch.next = state.map;
635 anv_batch_emit(&batch, GEN8_MI_BATCH_BUFFER_END);
636 anv_batch_emit(&batch, GEN8_MI_NOOP);
637
638 exec2_objects[0].handle = bo->gem_handle;
639 exec2_objects[0].relocation_count = 0;
640 exec2_objects[0].relocs_ptr = 0;
641 exec2_objects[0].alignment = 0;
642 exec2_objects[0].offset = bo->offset;
643 exec2_objects[0].flags = 0;
644 exec2_objects[0].rsvd1 = 0;
645 exec2_objects[0].rsvd2 = 0;
646
647 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
648 execbuf.buffer_count = 1;
649 execbuf.batch_start_offset = state.offset;
650 execbuf.batch_len = batch.next - state.map;
651 execbuf.cliprects_ptr = 0;
652 execbuf.num_cliprects = 0;
653 execbuf.DR1 = 0;
654 execbuf.DR4 = 0;
655
656 execbuf.flags =
657 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
658 execbuf.rsvd1 = device->context_id;
659 execbuf.rsvd2 = 0;
660
661 if (!device->no_hw) {
662 ret = anv_gem_execbuffer(device, &execbuf);
663 if (ret != 0) {
664 result = vk_error(VK_ERROR_UNKNOWN);
665 goto fail;
666 }
667
668 timeout = INT64_MAX;
669 ret = anv_gem_wait(device, bo->gem_handle, &timeout);
670 if (ret != 0) {
671 result = vk_error(VK_ERROR_UNKNOWN);
672 goto fail;
673 }
674 }
675
676 anv_state_pool_free(&device->dyn_state_pool, state);
677
678 return VK_SUCCESS;
679
680 fail:
681 anv_state_pool_free(&device->dyn_state_pool, state);
682
683 return result;
684 }
685
686 void *
687 anv_device_alloc(struct anv_device * device,
688 size_t size,
689 size_t alignment,
690 VkSystemAllocType allocType)
691 {
692 return device->instance->pfnAlloc(device->instance->pAllocUserData,
693 size,
694 alignment,
695 allocType);
696 }
697
698 void
699 anv_device_free(struct anv_device * device,
700 void * mem)
701 {
702 return device->instance->pfnFree(device->instance->pAllocUserData,
703 mem);
704 }
705
706 VkResult
707 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
708 {
709 bo->gem_handle = anv_gem_create(device, size);
710 if (!bo->gem_handle)
711 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
712
713 bo->map = NULL;
714 bo->index = 0;
715 bo->offset = 0;
716 bo->size = size;
717
718 return VK_SUCCESS;
719 }
720
721 VkResult VKAPI vkAllocMemory(
722 VkDevice _device,
723 const VkMemoryAllocInfo* pAllocInfo,
724 VkDeviceMemory* pMem)
725 {
726 struct anv_device *device = (struct anv_device *) _device;
727 struct anv_device_memory *mem;
728 VkResult result;
729
730 assert(pAllocInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO);
731
732 mem = anv_device_alloc(device, sizeof(*mem), 8,
733 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
734 if (mem == NULL)
735 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
736
737 result = anv_bo_init_new(&mem->bo, device, pAllocInfo->allocationSize);
738 if (result != VK_SUCCESS)
739 goto fail;
740
741 *pMem = (VkDeviceMemory) mem;
742
743 return VK_SUCCESS;
744
745 fail:
746 anv_device_free(device, mem);
747
748 return result;
749 }
750
751 VkResult VKAPI vkFreeMemory(
752 VkDevice _device,
753 VkDeviceMemory _mem)
754 {
755 struct anv_device *device = (struct anv_device *) _device;
756 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
757
758 if (mem->bo.map)
759 anv_gem_munmap(mem->bo.map, mem->bo.size);
760
761 if (mem->bo.gem_handle != 0)
762 anv_gem_close(device, mem->bo.gem_handle);
763
764 anv_device_free(device, mem);
765
766 return VK_SUCCESS;
767 }
768
769 VkResult VKAPI vkSetMemoryPriority(
770 VkDevice device,
771 VkDeviceMemory mem,
772 VkMemoryPriority priority)
773 {
774 return VK_SUCCESS;
775 }
776
777 VkResult VKAPI vkMapMemory(
778 VkDevice _device,
779 VkDeviceMemory _mem,
780 VkDeviceSize offset,
781 VkDeviceSize size,
782 VkMemoryMapFlags flags,
783 void** ppData)
784 {
785 struct anv_device *device = (struct anv_device *) _device;
786 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
787
788 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
789 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
790 * at a time is valid. We could just mmap up front and return an offset
791 * pointer here, but that may exhaust virtual memory on 32 bit
792 * userspace. */
793
794 mem->map = anv_gem_mmap(device, mem->bo.gem_handle, offset, size);
795 mem->map_size = size;
796
797 *ppData = mem->map;
798
799 return VK_SUCCESS;
800 }
801
802 VkResult VKAPI vkUnmapMemory(
803 VkDevice _device,
804 VkDeviceMemory _mem)
805 {
806 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
807
808 anv_gem_munmap(mem->map, mem->map_size);
809
810 return VK_SUCCESS;
811 }
812
813 VkResult VKAPI vkFlushMappedMemory(
814 VkDevice device,
815 VkDeviceMemory mem,
816 VkDeviceSize offset,
817 VkDeviceSize size)
818 {
819 /* clflush here for !llc platforms */
820
821 return VK_SUCCESS;
822 }
823
824 VkResult VKAPI vkPinSystemMemory(
825 VkDevice device,
826 const void* pSysMem,
827 size_t memSize,
828 VkDeviceMemory* pMem)
829 {
830 return VK_SUCCESS;
831 }
832
833 VkResult VKAPI vkGetMultiDeviceCompatibility(
834 VkPhysicalDevice physicalDevice0,
835 VkPhysicalDevice physicalDevice1,
836 VkPhysicalDeviceCompatibilityInfo* pInfo)
837 {
838 return VK_UNSUPPORTED;
839 }
840
841 VkResult VKAPI vkOpenSharedMemory(
842 VkDevice device,
843 const VkMemoryOpenInfo* pOpenInfo,
844 VkDeviceMemory* pMem)
845 {
846 return VK_UNSUPPORTED;
847 }
848
849 VkResult VKAPI vkOpenSharedSemaphore(
850 VkDevice device,
851 const VkSemaphoreOpenInfo* pOpenInfo,
852 VkSemaphore* pSemaphore)
853 {
854 return VK_UNSUPPORTED;
855 }
856
857 VkResult VKAPI vkOpenPeerMemory(
858 VkDevice device,
859 const VkPeerMemoryOpenInfo* pOpenInfo,
860 VkDeviceMemory* pMem)
861 {
862 return VK_UNSUPPORTED;
863 }
864
865 VkResult VKAPI vkOpenPeerImage(
866 VkDevice device,
867 const VkPeerImageOpenInfo* pOpenInfo,
868 VkImage* pImage,
869 VkDeviceMemory* pMem)
870 {
871 return VK_UNSUPPORTED;
872 }
873
874 static VkResult
875 anv_instance_destructor(struct anv_device * device,
876 VkObject object)
877 {
878 return vkDestroyInstance(object);
879 }
880
881 static VkResult
882 anv_noop_destructor(struct anv_device * device,
883 VkObject object)
884 {
885 return VK_SUCCESS;
886 }
887
888 static VkResult
889 anv_device_destructor(struct anv_device * device,
890 VkObject object)
891 {
892 return vkDestroyDevice(object);
893 }
894
895 static VkResult
896 anv_cmd_buffer_destructor(struct anv_device * device,
897 VkObject object)
898 {
899 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) object;
900
901 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
902 anv_batch_finish(&cmd_buffer->batch, device);
903 anv_device_free(device, cmd_buffer->exec2_objects);
904 anv_device_free(device, cmd_buffer->exec2_bos);
905 anv_device_free(device, cmd_buffer);
906
907 return VK_SUCCESS;
908 }
909
910 static VkResult
911 anv_pipeline_destructor(struct anv_device * device,
912 VkObject object)
913 {
914 struct anv_pipeline *pipeline = (struct anv_pipeline *) object;
915
916 return anv_pipeline_destroy(pipeline);
917 }
918
919 static VkResult
920 anv_free_destructor(struct anv_device * device,
921 VkObject object)
922 {
923 anv_device_free(device, (void *) object);
924
925 return VK_SUCCESS;
926 }
927
928 static VkResult (*anv_object_destructors[])(struct anv_device *device,
929 VkObject object) = {
930 [VK_OBJECT_TYPE_INSTANCE] = anv_instance_destructor,
931 [VK_OBJECT_TYPE_PHYSICAL_DEVICE] = anv_noop_destructor,
932 [VK_OBJECT_TYPE_DEVICE] = anv_device_destructor,
933 [VK_OBJECT_TYPE_QUEUE] = anv_noop_destructor,
934 [VK_OBJECT_TYPE_COMMAND_BUFFER] = anv_cmd_buffer_destructor,
935 [VK_OBJECT_TYPE_PIPELINE] = anv_pipeline_destructor,
936 [VK_OBJECT_TYPE_SHADER] = anv_free_destructor,
937 [VK_OBJECT_TYPE_BUFFER] = anv_free_destructor,
938 [VK_OBJECT_TYPE_IMAGE] = anv_free_destructor,
939 [VK_OBJECT_TYPE_RENDER_PASS] = anv_free_destructor
940 };
941
942 VkResult VKAPI vkDestroyObject(
943 VkDevice _device,
944 VkObjectType objType,
945 VkObject object)
946 {
947 struct anv_device *device = (struct anv_device *) _device;
948
949 assert(objType < ARRAY_SIZE(anv_object_destructors) &&
950 anv_object_destructors[objType] != NULL);
951
952 return anv_object_destructors[objType](device, object);
953 }
954
955 static void
956 fill_memory_requirements(
957 VkObjectType objType,
958 VkObject object,
959 VkMemoryRequirements * memory_requirements)
960 {
961 struct anv_buffer *buffer;
962 struct anv_image *image;
963
964 memory_requirements->memPropsAllowed =
965 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
966 VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT |
967 /* VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT | */
968 VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT |
969 VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL |
970 VK_MEMORY_PROPERTY_SHAREABLE_BIT;
971
972 memory_requirements->memPropsRequired = 0;
973
974 switch (objType) {
975 case VK_OBJECT_TYPE_BUFFER:
976 buffer = (struct anv_buffer *) object;
977 memory_requirements->size = buffer->size;
978 memory_requirements->alignment = 16;
979 break;
980 case VK_OBJECT_TYPE_IMAGE:
981 image = (struct anv_image *) object;
982 memory_requirements->size = image->size;
983 memory_requirements->alignment = image->alignment;
984 break;
985 default:
986 memory_requirements->size = 0;
987 break;
988 }
989 }
990
991 VkResult VKAPI vkGetObjectInfo(
992 VkDevice _device,
993 VkObjectType objType,
994 VkObject object,
995 VkObjectInfoType infoType,
996 size_t* pDataSize,
997 void* pData)
998 {
999 VkMemoryRequirements memory_requirements;
1000
1001 switch (infoType) {
1002 case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
1003 fill_memory_requirements(objType, object, &memory_requirements);
1004 memcpy(pData, &memory_requirements,
1005 MIN2(*pDataSize, sizeof(memory_requirements)));
1006 *pDataSize = sizeof(memory_requirements);
1007 return VK_SUCCESS;
1008
1009 case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
1010 default:
1011 return VK_UNSUPPORTED;
1012 }
1013
1014 }
1015
1016 VkResult VKAPI vkQueueBindObjectMemory(
1017 VkQueue queue,
1018 VkObjectType objType,
1019 VkObject object,
1020 uint32_t allocationIdx,
1021 VkDeviceMemory _mem,
1022 VkDeviceSize memOffset)
1023 {
1024 struct anv_buffer *buffer;
1025 struct anv_image *image;
1026 struct anv_device_memory *mem = (struct anv_device_memory *) _mem;
1027
1028 switch (objType) {
1029 case VK_OBJECT_TYPE_BUFFER:
1030 buffer = (struct anv_buffer *) object;
1031 buffer->mem = mem;
1032 buffer->offset = memOffset;
1033 break;
1034 case VK_OBJECT_TYPE_IMAGE:
1035 image = (struct anv_image *) object;
1036 image->mem = mem;
1037 image->offset = memOffset;
1038 break;
1039 default:
1040 break;
1041 }
1042
1043 return VK_SUCCESS;
1044 }
1045
1046 VkResult VKAPI vkQueueBindObjectMemoryRange(
1047 VkQueue queue,
1048 VkObjectType objType,
1049 VkObject object,
1050 uint32_t allocationIdx,
1051 VkDeviceSize rangeOffset,
1052 VkDeviceSize rangeSize,
1053 VkDeviceMemory mem,
1054 VkDeviceSize memOffset)
1055 {
1056 return VK_UNSUPPORTED;
1057 }
1058
1059 VkResult vkQueueBindImageMemoryRange(
1060 VkQueue queue,
1061 VkImage image,
1062 uint32_t allocationIdx,
1063 const VkImageMemoryBindInfo* pBindInfo,
1064 VkDeviceMemory mem,
1065 VkDeviceSize memOffset)
1066 {
1067 return VK_UNSUPPORTED;
1068 }
1069
1070 VkResult VKAPI vkCreateFence(
1071 VkDevice device,
1072 const VkFenceCreateInfo* pCreateInfo,
1073 VkFence* pFence)
1074 {
1075 return VK_UNSUPPORTED;
1076 }
1077
1078 VkResult VKAPI vkResetFences(
1079 VkDevice device,
1080 uint32_t fenceCount,
1081 VkFence* pFences)
1082 {
1083 return VK_UNSUPPORTED;
1084 }
1085
1086 VkResult VKAPI vkGetFenceStatus(
1087 VkDevice device,
1088 VkFence fence)
1089 {
1090 return VK_UNSUPPORTED;
1091 }
1092
1093 VkResult VKAPI vkWaitForFences(
1094 VkDevice device,
1095 uint32_t fenceCount,
1096 const VkFence* pFences,
1097 bool32_t waitAll,
1098 uint64_t timeout)
1099 {
1100 return VK_UNSUPPORTED;
1101 }
1102
1103 // Queue semaphore functions
1104
1105 VkResult VKAPI vkCreateSemaphore(
1106 VkDevice device,
1107 const VkSemaphoreCreateInfo* pCreateInfo,
1108 VkSemaphore* pSemaphore)
1109 {
1110 return VK_UNSUPPORTED;
1111 }
1112
1113 VkResult VKAPI vkQueueSignalSemaphore(
1114 VkQueue queue,
1115 VkSemaphore semaphore)
1116 {
1117 return VK_UNSUPPORTED;
1118 }
1119
1120 VkResult VKAPI vkQueueWaitSemaphore(
1121 VkQueue queue,
1122 VkSemaphore semaphore)
1123 {
1124 return VK_UNSUPPORTED;
1125 }
1126
1127 // Event functions
1128
1129 VkResult VKAPI vkCreateEvent(
1130 VkDevice device,
1131 const VkEventCreateInfo* pCreateInfo,
1132 VkEvent* pEvent)
1133 {
1134 return VK_UNSUPPORTED;
1135 }
1136
1137 VkResult VKAPI vkGetEventStatus(
1138 VkDevice device,
1139 VkEvent event)
1140 {
1141 return VK_UNSUPPORTED;
1142 }
1143
1144 VkResult VKAPI vkSetEvent(
1145 VkDevice device,
1146 VkEvent event)
1147 {
1148 return VK_UNSUPPORTED;
1149 }
1150
1151 VkResult VKAPI vkResetEvent(
1152 VkDevice device,
1153 VkEvent event)
1154 {
1155 return VK_UNSUPPORTED;
1156 }
1157
1158 // Query functions
1159
1160 struct anv_query_pool {
1161 VkQueryType type;
1162 uint32_t slots;
1163 struct anv_bo bo;
1164 };
1165
1166 VkResult VKAPI vkCreateQueryPool(
1167 VkDevice _device,
1168 const VkQueryPoolCreateInfo* pCreateInfo,
1169 VkQueryPool* pQueryPool)
1170 {
1171 struct anv_device *device = (struct anv_device *) _device;
1172 struct anv_query_pool *pool;
1173 VkResult result;
1174
1175 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
1176
1177 pool = anv_device_alloc(device, sizeof(*pool), 8,
1178 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1179 if (pool == NULL)
1180 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1181
1182 pool->type = pCreateInfo->queryType;
1183 result = anv_bo_init_new(&pool->bo, device, pCreateInfo->slots * 16);
1184 if (result != VK_SUCCESS)
1185 goto fail;
1186
1187 *pQueryPool = (VkQueryPool) pool;
1188
1189 return VK_SUCCESS;
1190
1191 fail:
1192 anv_device_free(device, pool);
1193
1194 return result;
1195 }
1196
1197 VkResult VKAPI vkGetQueryPoolResults(
1198 VkDevice device,
1199 VkQueryPool queryPool,
1200 uint32_t startQuery,
1201 uint32_t queryCount,
1202 size_t* pDataSize,
1203 void* pData,
1204 VkQueryResultFlags flags)
1205 {
1206 return VK_UNSUPPORTED;
1207 }
1208
1209 // Format capabilities
1210
1211 VkResult VKAPI vkGetFormatInfo(
1212 VkDevice device,
1213 VkFormat format,
1214 VkFormatInfoType infoType,
1215 size_t* pDataSize,
1216 void* pData)
1217 {
1218 return VK_UNSUPPORTED;
1219 }
1220
1221 // Buffer functions
1222
1223 VkResult VKAPI vkCreateBuffer(
1224 VkDevice _device,
1225 const VkBufferCreateInfo* pCreateInfo,
1226 VkBuffer* pBuffer)
1227 {
1228 struct anv_device *device = (struct anv_device *) _device;
1229 struct anv_buffer *buffer;
1230
1231 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1232
1233 buffer = anv_device_alloc(device, sizeof(*buffer), 8,
1234 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1235 if (buffer == NULL)
1236 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1237
1238 buffer->size = pCreateInfo->size;
1239 buffer->mem = NULL;
1240 buffer->offset = 0;
1241
1242 *pBuffer = (VkBuffer) buffer;
1243
1244 return VK_SUCCESS;
1245 }
1246
1247 // Buffer view functions
1248
1249 VkResult VKAPI vkCreateBufferView(
1250 VkDevice _device,
1251 const VkBufferViewCreateInfo* pCreateInfo,
1252 VkBufferView* pView)
1253 {
1254 struct anv_device *device = (struct anv_device *) _device;
1255 struct anv_buffer_view *view;
1256 const struct anv_format *format;
1257
1258 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO);
1259
1260 view = anv_device_alloc(device, sizeof(*view), 8,
1261 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1262 if (view == NULL)
1263 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1264
1265 view->buffer = (struct anv_buffer *) pCreateInfo->buffer;
1266 view->offset = pCreateInfo->offset;
1267 view->surface_state =
1268 anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
1269
1270 format = anv_format_for_vk_format(pCreateInfo->format);
1271 /* This assumes RGBA float format. */
1272 uint32_t stride = 4;
1273 uint32_t num_elements = pCreateInfo->range / stride;
1274 struct GEN8_RENDER_SURFACE_STATE surface_state = {
1275 .SurfaceType = SURFTYPE_BUFFER,
1276 .SurfaceArray = false,
1277 .SurfaceFormat = format->format,
1278 .SurfaceVerticalAlignment = VALIGN4,
1279 .SurfaceHorizontalAlignment = HALIGN4,
1280 .TileMode = LINEAR,
1281 .VerticalLineStride = 0,
1282 .VerticalLineStrideOffset = 0,
1283 .SamplerL2BypassModeDisable = true,
1284 .RenderCacheReadWriteMode = WriteOnlyCache,
1285 .MemoryObjectControlState = 0, /* FIXME: MOCS */
1286 .BaseMipLevel = 0,
1287 .SurfaceQPitch = 0,
1288 .Height = (num_elements >> 7) & 0x3fff,
1289 .Width = num_elements & 0x7f,
1290 .Depth = (num_elements >> 21) & 0x3f,
1291 .SurfacePitch = stride - 1,
1292 .MinimumArrayElement = 0,
1293 .NumberofMultisamples = MULTISAMPLECOUNT_1,
1294 .XOffset = 0,
1295 .YOffset = 0,
1296 .SurfaceMinLOD = 0,
1297 .MIPCountLOD = 0,
1298 .AuxiliarySurfaceMode = AUX_NONE,
1299 .RedClearColor = 0,
1300 .GreenClearColor = 0,
1301 .BlueClearColor = 0,
1302 .AlphaClearColor = 0,
1303 .ShaderChannelSelectRed = SCS_RED,
1304 .ShaderChannelSelectGreen = SCS_GREEN,
1305 .ShaderChannelSelectBlue = SCS_BLUE,
1306 .ShaderChannelSelectAlpha = SCS_ALPHA,
1307 .ResourceMinLOD = 0,
1308 /* FIXME: We assume that the image must be bound at this time. */
1309 .SurfaceBaseAddress = { NULL, view->buffer->offset + view->offset },
1310 };
1311
1312 GEN8_RENDER_SURFACE_STATE_pack(NULL, view->surface_state.map, &surface_state);
1313
1314 *pView = (VkImageView) view;
1315
1316 return VK_SUCCESS;
1317 }
1318
1319 // Sampler functions
1320
1321 struct anv_sampler {
1322 uint32_t state[4];
1323 };
1324
1325 VkResult VKAPI vkCreateSampler(
1326 VkDevice _device,
1327 const VkSamplerCreateInfo* pCreateInfo,
1328 VkSampler* pSampler)
1329 {
1330 struct anv_device *device = (struct anv_device *) _device;
1331 struct anv_sampler *sampler;
1332
1333 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1334
1335 sampler = anv_device_alloc(device, sizeof(*sampler), 8,
1336 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1337 if (!sampler)
1338 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1339
1340 struct GEN8_SAMPLER_STATE sampler_state = {
1341 .SamplerDisable = 0,
1342 .TextureBorderColorMode = 0,
1343 .LODPreClampMode = 0,
1344 .BaseMipLevel = 0,
1345 .MipModeFilter = 0,
1346 .MagModeFilter = 0,
1347 .MinModeFilter = 0,
1348 .TextureLODBias = 0,
1349 .AnisotropicAlgorithm = 0,
1350 .MinLOD = 0,
1351 .MaxLOD = 0,
1352 .ChromaKeyEnable = 0,
1353 .ChromaKeyIndex = 0,
1354 .ChromaKeyMode = 0,
1355 .ShadowFunction = 0,
1356 .CubeSurfaceControlMode = 0,
1357 .IndirectStatePointer = 0,
1358 .LODClampMagnificationMode = 0,
1359 .MaximumAnisotropy = 0,
1360 .RAddressMinFilterRoundingEnable = 0,
1361 .RAddressMagFilterRoundingEnable = 0,
1362 .VAddressMinFilterRoundingEnable = 0,
1363 .VAddressMagFilterRoundingEnable = 0,
1364 .UAddressMinFilterRoundingEnable = 0,
1365 .UAddressMagFilterRoundingEnable = 0,
1366 .TrilinearFilterQuality = 0,
1367 .NonnormalizedCoordinateEnable = 0,
1368 .TCXAddressControlMode = 0,
1369 .TCYAddressControlMode = 0,
1370 .TCZAddressControlMode = 0,
1371 };
1372
1373 GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state);
1374
1375 *pSampler = (VkSampler) sampler;
1376
1377 return VK_SUCCESS;
1378 }
1379
1380 // Descriptor set functions
1381
1382 VkResult VKAPI vkCreateDescriptorSetLayout(
1383 VkDevice _device,
1384 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
1385 VkDescriptorSetLayout* pSetLayout)
1386 {
1387 struct anv_device *device = (struct anv_device *) _device;
1388 struct anv_descriptor_set_layout *set_layout;
1389 uint32_t count, k;
1390 size_t size, total;
1391
1392 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
1393
1394 count = 0;
1395 for (uint32_t i = 0; i < pCreateInfo->count; i++)
1396 count += pCreateInfo->pBinding[i].count;
1397
1398 size = sizeof(*set_layout) +
1399 count * sizeof(set_layout->bindings[0]);
1400 set_layout = anv_device_alloc(device, size, 8,
1401 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1402 if (!set_layout)
1403 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1404
1405 k = 0;
1406 total = 0;
1407 for (uint32_t i = 0; i < pCreateInfo->count; i++) {
1408 for (uint32_t j = 0; j < pCreateInfo->pBinding[i].count; j++) {
1409 set_layout->bindings[k].mask = pCreateInfo->pBinding[i].stageFlags;
1410 set_layout->bindings[k].type = pCreateInfo->pBinding[i].descriptorType;
1411 k++;
1412 }
1413
1414 total += pCreateInfo->pBinding[i].count *
1415 __builtin_popcount(pCreateInfo->pBinding[i].stageFlags);
1416 }
1417
1418 set_layout->total = total;
1419 set_layout->count = count;
1420
1421 *pSetLayout = (VkDescriptorSetLayout) set_layout;
1422
1423 return VK_SUCCESS;
1424 }
1425
1426 VkResult VKAPI vkBeginDescriptorPoolUpdate(
1427 VkDevice device,
1428 VkDescriptorUpdateMode updateMode)
1429 {
1430 return VK_UNSUPPORTED;
1431 }
1432
1433 VkResult VKAPI vkEndDescriptorPoolUpdate(
1434 VkDevice device,
1435 VkCmdBuffer cmd)
1436 {
1437 return VK_UNSUPPORTED;
1438 }
1439
1440 VkResult VKAPI vkCreateDescriptorPool(
1441 VkDevice device,
1442 VkDescriptorPoolUsage poolUsage,
1443 uint32_t maxSets,
1444 const VkDescriptorPoolCreateInfo* pCreateInfo,
1445 VkDescriptorPool* pDescriptorPool)
1446 {
1447 return VK_UNSUPPORTED;
1448 }
1449
1450 VkResult VKAPI vkResetDescriptorPool(
1451 VkDevice device,
1452 VkDescriptorPool descriptorPool)
1453 {
1454 return VK_UNSUPPORTED;
1455 }
1456
1457 VkResult VKAPI vkAllocDescriptorSets(
1458 VkDevice _device,
1459 VkDescriptorPool descriptorPool,
1460 VkDescriptorSetUsage setUsage,
1461 uint32_t count,
1462 const VkDescriptorSetLayout* pSetLayouts,
1463 VkDescriptorSet* pDescriptorSets,
1464 uint32_t* pCount)
1465 {
1466 struct anv_device *device = (struct anv_device *) _device;
1467 const struct anv_descriptor_set_layout *layout;
1468 struct anv_descriptor_set *set;
1469 size_t size;
1470
1471 for (uint32_t i = 0; i < count; i++) {
1472 layout = (struct anv_descriptor_set_layout *) pSetLayouts[i];
1473 size = sizeof(*set) + layout->total * sizeof(set->descriptors[0]);
1474 set = anv_device_alloc(device, size, 8,
1475 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1476 if (!set) {
1477 *pCount = i;
1478 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1479 }
1480
1481 pDescriptorSets[i] = (VkDescriptorSet) set;
1482 }
1483
1484 *pCount = count;
1485
1486 return VK_UNSUPPORTED;
1487 }
1488
1489 void VKAPI vkClearDescriptorSets(
1490 VkDevice device,
1491 VkDescriptorPool descriptorPool,
1492 uint32_t count,
1493 const VkDescriptorSet* pDescriptorSets)
1494 {
1495 }
1496
1497 void VKAPI vkUpdateDescriptors(
1498 VkDevice _device,
1499 VkDescriptorSet descriptorSet,
1500 uint32_t updateCount,
1501 const void** ppUpdateArray)
1502 {
1503 struct anv_descriptor_set *set = (struct anv_descriptor_set *) descriptorSet;
1504 VkUpdateSamplers *update_samplers;
1505 VkUpdateSamplerTextures *update_sampler_textures;
1506 VkUpdateImages *update_images;
1507 VkUpdateBuffers *update_buffers;
1508 VkUpdateAsCopy *update_as_copy;
1509
1510 for (uint32_t i = 0; i < updateCount; i++) {
1511 const struct anv_common *common = ppUpdateArray[i];
1512
1513 switch (common->sType) {
1514 case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
1515 update_samplers = (VkUpdateSamplers *) common;
1516
1517 for (uint32_t j = 0; j < update_samplers->count; j++) {
1518 set->descriptors[update_samplers->binding + j] =
1519 (void *) update_samplers->pSamplers[j];
1520 }
1521 break;
1522
1523 case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
1524 /* FIXME: Shouldn't this be *_UPDATE_SAMPLER_IMAGES? */
1525 update_sampler_textures = (VkUpdateSamplerTextures *) common;
1526
1527 for (uint32_t j = 0; j < update_sampler_textures->count; j++) {
1528 set->descriptors[update_sampler_textures->binding + j] =
1529 (void *) update_sampler_textures->pSamplerImageViews[j].pImageView->view;
1530 }
1531 break;
1532
1533 case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
1534 update_images = (VkUpdateImages *) common;
1535
1536 for (uint32_t j = 0; j < update_images->count; j++) {
1537 set->descriptors[update_images->binding + j] =
1538 (void *) update_images->pImageViews[j].view;
1539 }
1540 break;
1541
1542 case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
1543 update_buffers = (VkUpdateBuffers *) common;
1544
1545 for (uint32_t j = 0; j < update_buffers->count; j++) {
1546 set->descriptors[update_buffers->binding + j] =
1547 (void *) update_buffers->pBufferViews[j].view;
1548 }
1549 /* FIXME: descriptor arrays? */
1550 break;
1551
1552 case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
1553 update_as_copy = (VkUpdateAsCopy *) common;
1554 (void) update_as_copy;
1555 break;
1556
1557 default:
1558 break;
1559 }
1560 }
1561 }
1562
1563 // State object functions
1564
1565 static inline int64_t
1566 clamp_int64(int64_t x, int64_t min, int64_t max)
1567 {
1568 if (x < min)
1569 return min;
1570 else if (x < max)
1571 return x;
1572 else
1573 return max;
1574 }
1575
1576 VkResult VKAPI vkCreateDynamicViewportState(
1577 VkDevice _device,
1578 const VkDynamicVpStateCreateInfo* pCreateInfo,
1579 VkDynamicVpState* pState)
1580 {
1581 struct anv_device *device = (struct anv_device *) _device;
1582 struct anv_dynamic_vp_state *state;
1583
1584 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO);
1585
1586 state = anv_device_alloc(device, sizeof(*state), 8,
1587 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1588 if (state == NULL)
1589 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1590
1591 unsigned count = pCreateInfo->viewportAndScissorCount;
1592 state->sf_clip_vp = anv_state_pool_alloc(&device->dyn_state_pool,
1593 count * 64, 64);
1594 state->cc_vp = anv_state_pool_alloc(&device->dyn_state_pool,
1595 count * 8, 32);
1596 state->scissor = anv_state_pool_alloc(&device->dyn_state_pool,
1597 count * 32, 32);
1598
1599 for (uint32_t i = 0; i < pCreateInfo->viewportAndScissorCount; i++) {
1600 const VkViewport *vp = &pCreateInfo->pViewports[i];
1601 const VkRect *s = &pCreateInfo->pScissors[i];
1602
1603 struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = {
1604 .ViewportMatrixElementm00 = vp->width / 2,
1605 .ViewportMatrixElementm11 = vp->height / 2,
1606 .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
1607 .ViewportMatrixElementm30 = vp->originX + vp->width / 2,
1608 .ViewportMatrixElementm31 = vp->originY + vp->height / 2,
1609 .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
1610 .XMinClipGuardband = -1.0f,
1611 .XMaxClipGuardband = 1.0f,
1612 .YMinClipGuardband = -1.0f,
1613 .YMaxClipGuardband = 1.0f,
1614 .XMinViewPort = vp->originX,
1615 .XMaxViewPort = vp->originX + vp->width - 1,
1616 .YMinViewPort = vp->originY,
1617 .YMaxViewPort = vp->originY + vp->height - 1,
1618 };
1619
1620 struct GEN8_CC_VIEWPORT cc_viewport = {
1621 .MinimumDepth = vp->minDepth,
1622 .MaximumDepth = vp->maxDepth
1623 };
1624
1625 /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
1626 * ymax < ymin for empty clips. In case clip x, y, width height are all
1627 * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
1628 * what we want. Just special case empty clips and produce a canonical
1629 * empty clip. */
1630 static const struct GEN8_SCISSOR_RECT empty_scissor = {
1631 .ScissorRectangleYMin = 1,
1632 .ScissorRectangleXMin = 1,
1633 .ScissorRectangleYMax = 0,
1634 .ScissorRectangleXMax = 0
1635 };
1636
1637 const int max = 0xffff;
1638 struct GEN8_SCISSOR_RECT scissor = {
1639 /* Do this math using int64_t so overflow gets clamped correctly. */
1640 .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
1641 .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
1642 .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
1643 .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
1644 };
1645
1646 GEN8_SF_CLIP_VIEWPORT_pack(NULL, state->sf_clip_vp.map + i * 64, &sf_clip_viewport);
1647 GEN8_CC_VIEWPORT_pack(NULL, state->cc_vp.map + i * 32, &cc_viewport);
1648
1649 if (s->extent.width <= 0 || s->extent.height <= 0) {
1650 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &empty_scissor);
1651 } else {
1652 GEN8_SCISSOR_RECT_pack(NULL, state->scissor.map + i * 32, &scissor);
1653 }
1654 }
1655
1656 *pState = (VkDynamicVpState) state;
1657
1658 return VK_SUCCESS;
1659 }
1660
1661 VkResult VKAPI vkCreateDynamicRasterState(
1662 VkDevice _device,
1663 const VkDynamicRsStateCreateInfo* pCreateInfo,
1664 VkDynamicRsState* pState)
1665 {
1666 struct anv_device *device = (struct anv_device *) _device;
1667 struct anv_dynamic_rs_state *state;
1668
1669 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO);
1670
1671 state = anv_device_alloc(device, sizeof(*state), 8,
1672 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1673 if (state == NULL)
1674 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1675
1676 /* Missing these:
1677 * float depthBias;
1678 * float depthBiasClamp;
1679 * float slopeScaledDepthBias;
1680 * float pointFadeThreshold;
1681 * // optional (GL45) - Size of point fade threshold
1682 */
1683
1684 struct GEN8_3DSTATE_SF sf = {
1685 GEN8_3DSTATE_SF_header,
1686 .LineWidth = pCreateInfo->lineWidth,
1687 .PointWidth = pCreateInfo->pointSize,
1688 };
1689
1690 GEN8_3DSTATE_SF_pack(NULL, state->state_sf, &sf);
1691
1692 *pState = (VkDynamicRsState) state;
1693
1694 return VK_SUCCESS;
1695 }
1696
1697 VkResult VKAPI vkCreateDynamicColorBlendState(
1698 VkDevice _device,
1699 const VkDynamicCbStateCreateInfo* pCreateInfo,
1700 VkDynamicCbState* pState)
1701 {
1702 struct anv_device *device = (struct anv_device *) _device;
1703 struct anv_dynamic_cb_state *state;
1704
1705 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO);
1706
1707 state = anv_device_alloc(device, sizeof(*state), 8,
1708 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1709 if (state == NULL)
1710 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1711
1712 *pState = (VkDynamicCbState) state;
1713
1714 return VK_SUCCESS;
1715 }
1716
1717 VkResult VKAPI vkCreateDynamicDepthStencilState(
1718 VkDevice device,
1719 const VkDynamicDsStateCreateInfo* pCreateInfo,
1720 VkDynamicDsState* pState)
1721 {
1722 return VK_UNSUPPORTED;
1723 }
1724
1725 // Command buffer functions
1726
1727 VkResult VKAPI vkCreateCommandBuffer(
1728 VkDevice _device,
1729 const VkCmdBufferCreateInfo* pCreateInfo,
1730 VkCmdBuffer* pCmdBuffer)
1731 {
1732 struct anv_device *device = (struct anv_device *) _device;
1733 struct anv_cmd_buffer *cmd_buffer;
1734 VkResult result;
1735
1736 cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
1737 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1738 if (cmd_buffer == NULL)
1739 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1740
1741 cmd_buffer->device = device;
1742
1743 result = anv_batch_init(&cmd_buffer->batch, device);
1744 if (result != VK_SUCCESS)
1745 goto fail;
1746
1747 cmd_buffer->exec2_objects =
1748 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_objects[0]), 8,
1749 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1750 if (cmd_buffer->exec2_objects == NULL) {
1751 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1752 goto fail_batch;
1753 }
1754
1755 cmd_buffer->exec2_bos =
1756 anv_device_alloc(device, 8192 * sizeof(cmd_buffer->exec2_bos[0]), 8,
1757 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
1758 if (cmd_buffer->exec2_bos == NULL) {
1759 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1760 goto fail_exec2_objects;
1761 }
1762
1763 anv_state_stream_init(&cmd_buffer->surface_state_stream,
1764 &device->surface_state_block_pool);
1765
1766 cmd_buffer->dirty = 0;
1767 cmd_buffer->vb_dirty = 0;
1768
1769 *pCmdBuffer = (VkCmdBuffer) cmd_buffer;
1770
1771 return VK_SUCCESS;
1772
1773 fail_exec2_objects:
1774 anv_device_free(device, cmd_buffer->exec2_objects);
1775 fail_batch:
1776 anv_batch_finish(&cmd_buffer->batch, device);
1777 fail:
1778 anv_device_free(device, cmd_buffer);
1779
1780 return result;
1781 }
1782
1783 VkResult VKAPI vkBeginCommandBuffer(
1784 VkCmdBuffer cmdBuffer,
1785 const VkCmdBufferBeginInfo* pBeginInfo)
1786 {
1787 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1788 struct anv_device *device = cmd_buffer->device;
1789
1790 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT,
1791 .PipelineSelection = _3D);
1792 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_SIP);
1793
1794 anv_batch_emit(&cmd_buffer->batch, GEN8_STATE_BASE_ADDRESS,
1795 .GeneralStateBaseAddress = { NULL, 0 },
1796 .GeneralStateBaseAddressModifyEnable = true,
1797 .GeneralStateBufferSize = 0xfffff,
1798 .GeneralStateBufferSizeModifyEnable = true,
1799
1800 .SurfaceStateBaseAddress = { &device->surface_state_block_pool.bo, 0 },
1801 .SurfaceStateMemoryObjectControlState = 0, /* FIXME: MOCS */
1802 .SurfaceStateBaseAddressModifyEnable = true,
1803
1804 .DynamicStateBaseAddress = { &device->dyn_state_block_pool.bo, 0 },
1805 .DynamicStateBaseAddressModifyEnable = true,
1806 .DynamicStateBufferSize = 0xfffff,
1807 .DynamicStateBufferSizeModifyEnable = true,
1808
1809 .IndirectObjectBaseAddress = { NULL, 0 },
1810 .IndirectObjectBaseAddressModifyEnable = true,
1811 .IndirectObjectBufferSize = 0xfffff,
1812 .IndirectObjectBufferSizeModifyEnable = true,
1813
1814 .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
1815 .InstructionBaseAddressModifyEnable = true,
1816 .InstructionBufferSize = 0xfffff,
1817 .InstructionBuffersizeModifyEnable = true);
1818
1819 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF_STATISTICS,
1820 .StatisticsEnable = true);
1821 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HS, .Enable = false);
1822 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_TE, .TEEnable = false);
1823 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DS, .FunctionEnable = false);
1824 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false);
1825
1826 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
1827 .ConstantBufferOffset = 0,
1828 .ConstantBufferSize = 4);
1829 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
1830 .ConstantBufferOffset = 4,
1831 .ConstantBufferSize = 4);
1832 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
1833 .ConstantBufferOffset = 8,
1834 .ConstantBufferSize = 4);
1835
1836 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_CLIP,
1837 .ClipEnable = true,
1838 .ViewportXYClipTestEnable = true);
1839 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_CHROMAKEY,
1840 .ChromaKeyKillEnable = false);
1841 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SBE_SWIZ);
1842 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS);
1843
1844 /* Hardcoded state: */
1845 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER,
1846 .SurfaceType = SURFTYPE_2D,
1847 .Width = 1,
1848 .Height = 1,
1849 .SurfaceFormat = D16_UNORM,
1850 .SurfaceBaseAddress = { NULL, 0 },
1851 .HierarchicalDepthBufferEnable = 0);
1852
1853 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_WM_DEPTH_STENCIL,
1854 .DepthTestEnable = false,
1855 .DepthBufferWriteEnable = false);
1856
1857 return VK_SUCCESS;
1858 }
1859
1860 static void
1861 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
1862 struct anv_bo *bo, struct anv_reloc_list *list)
1863 {
1864 struct drm_i915_gem_exec_object2 *obj;
1865
1866 bo->index = cmd_buffer->bo_count;
1867 obj = &cmd_buffer->exec2_objects[bo->index];
1868 cmd_buffer->exec2_bos[bo->index] = bo;
1869 cmd_buffer->bo_count++;
1870
1871 obj->handle = bo->gem_handle;
1872 obj->relocation_count = 0;
1873 obj->relocs_ptr = 0;
1874 obj->alignment = 0;
1875 obj->offset = bo->offset;
1876 obj->flags = 0;
1877 obj->rsvd1 = 0;
1878 obj->rsvd2 = 0;
1879
1880 if (list) {
1881 obj->relocation_count = list->num_relocs;
1882 obj->relocs_ptr = (uintptr_t) list->relocs;
1883 }
1884 }
1885
1886 static void
1887 anv_cmd_buffer_add_validate_bos(struct anv_cmd_buffer *cmd_buffer,
1888 struct anv_reloc_list *list)
1889 {
1890 struct anv_bo *bo, *batch_bo;
1891
1892 batch_bo = &cmd_buffer->batch.bo;
1893 for (size_t i = 0; i < list->num_relocs; i++) {
1894 bo = list->reloc_bos[i];
1895 /* Skip any relocations targeting the batch bo. We need to make sure
1896 * it's the last in the list so we'll add it manually later.
1897 */
1898 if (bo == batch_bo)
1899 continue;
1900 if (bo->index < cmd_buffer->bo_count && cmd_buffer->exec2_bos[bo->index] == bo)
1901 continue;
1902
1903 anv_cmd_buffer_add_bo(cmd_buffer, bo, NULL);
1904 }
1905 }
1906
1907 static void
1908 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1909 struct anv_reloc_list *list)
1910 {
1911 struct anv_bo *bo;
1912
1913 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
1914 * struct drm_i915_gem_exec_object2 against the bos current offset and if
1915 * all bos haven't moved it will skip relocation processing alltogether.
1916 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
1917 * value of offset so we can set it either way. For that to work we need
1918 * to make sure all relocs use the same presumed offset.
1919 */
1920
1921 for (size_t i = 0; i < list->num_relocs; i++) {
1922 bo = list->reloc_bos[i];
1923 if (bo->offset != list->relocs[i].presumed_offset)
1924 cmd_buffer->need_reloc = true;
1925
1926 list->relocs[i].target_handle = bo->index;
1927 }
1928 }
1929
1930 VkResult VKAPI vkEndCommandBuffer(
1931 VkCmdBuffer cmdBuffer)
1932 {
1933 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1934 struct anv_device *device = cmd_buffer->device;
1935 struct anv_batch *batch = &cmd_buffer->batch;
1936
1937 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_END);
1938
1939 /* Round batch up to an even number of dwords. */
1940 if ((batch->next - batch->bo.map) & 4)
1941 anv_batch_emit(batch, GEN8_MI_NOOP);
1942
1943 cmd_buffer->bo_count = 0;
1944 cmd_buffer->need_reloc = false;
1945
1946 /* Lock for access to bo->index. */
1947 pthread_mutex_lock(&device->mutex);
1948
1949 /* Add block pool bos first so we can add them with their relocs. */
1950 anv_cmd_buffer_add_bo(cmd_buffer, &device->surface_state_block_pool.bo,
1951 &batch->surf_relocs);
1952
1953 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->surf_relocs);
1954 anv_cmd_buffer_add_validate_bos(cmd_buffer, &batch->cmd_relocs);
1955 anv_cmd_buffer_add_bo(cmd_buffer, &batch->bo, &batch->cmd_relocs);
1956 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->surf_relocs);
1957 anv_cmd_buffer_process_relocs(cmd_buffer, &batch->cmd_relocs);
1958
1959 cmd_buffer->execbuf.buffers_ptr = (uintptr_t) cmd_buffer->exec2_objects;
1960 cmd_buffer->execbuf.buffer_count = cmd_buffer->bo_count;
1961 cmd_buffer->execbuf.batch_start_offset = 0;
1962 cmd_buffer->execbuf.batch_len = batch->next - batch->bo.map;
1963 cmd_buffer->execbuf.cliprects_ptr = 0;
1964 cmd_buffer->execbuf.num_cliprects = 0;
1965 cmd_buffer->execbuf.DR1 = 0;
1966 cmd_buffer->execbuf.DR4 = 0;
1967
1968 cmd_buffer->execbuf.flags = I915_EXEC_HANDLE_LUT;
1969 if (!cmd_buffer->need_reloc)
1970 cmd_buffer->execbuf.flags |= I915_EXEC_NO_RELOC;
1971 cmd_buffer->execbuf.flags |= I915_EXEC_RENDER;
1972 cmd_buffer->execbuf.rsvd1 = device->context_id;
1973 cmd_buffer->execbuf.rsvd2 = 0;
1974
1975 pthread_mutex_unlock(&device->mutex);
1976
1977 return VK_SUCCESS;
1978 }
1979
1980 VkResult VKAPI vkResetCommandBuffer(
1981 VkCmdBuffer cmdBuffer)
1982 {
1983 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1984
1985 anv_batch_reset(&cmd_buffer->batch);
1986
1987 return VK_SUCCESS;
1988 }
1989
1990 // Command buffer building functions
1991
1992 void VKAPI vkCmdBindPipeline(
1993 VkCmdBuffer cmdBuffer,
1994 VkPipelineBindPoint pipelineBindPoint,
1995 VkPipeline _pipeline)
1996 {
1997 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
1998
1999 cmd_buffer->pipeline = (struct anv_pipeline *) _pipeline;
2000 cmd_buffer->dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
2001 }
2002
2003 void VKAPI vkCmdBindDynamicStateObject(
2004 VkCmdBuffer cmdBuffer,
2005 VkStateBindPoint stateBindPoint,
2006 VkDynamicStateObject dynamicState)
2007 {
2008 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2009 struct anv_dynamic_vp_state *vp_state;
2010
2011 switch (stateBindPoint) {
2012 case VK_STATE_BIND_POINT_VIEWPORT:
2013 vp_state = (struct anv_dynamic_vp_state *) dynamicState;
2014
2015 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_SCISSOR_STATE_POINTERS,
2016 .ScissorRectPointer = vp_state->scissor.offset);
2017 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2018 .CCViewportPointer = vp_state->cc_vp.offset);
2019 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2020 .SFClipViewportPointer = vp_state->sf_clip_vp.offset);
2021 break;
2022 case VK_STATE_BIND_POINT_RASTER:
2023 cmd_buffer->rs_state = (struct anv_dynamic_rs_state *) dynamicState;
2024 cmd_buffer->dirty |= ANV_CMD_BUFFER_RS_DIRTY;
2025 break;
2026 case VK_STATE_BIND_POINT_COLOR_BLEND:
2027 case VK_STATE_BIND_POINT_DEPTH_STENCIL:
2028 break;
2029 default:
2030 break;
2031 };
2032 }
2033
2034 void VKAPI vkCmdBindDescriptorSets(
2035 VkCmdBuffer cmdBuffer,
2036 VkPipelineBindPoint pipelineBindPoint,
2037 uint32_t firstSet,
2038 uint32_t setCount,
2039 const VkDescriptorSet* pDescriptorSets,
2040 uint32_t dynamicOffsetCount,
2041 const uint32_t* pDynamicOffsets)
2042 {
2043 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2044
2045 /* What are the semantics for setting descriptor sets? Assuming that
2046 * setting preserves lower sets and invalidate higher sets. This means that
2047 * we can set the number of active sets to firstSet + setCount.
2048 */
2049
2050 for (uint32_t i = 0; i < setCount; i++)
2051 cmd_buffer->descriptor_sets[firstSet + i] =
2052 (struct anv_descriptor_set *) pDescriptorSets[i];
2053
2054 cmd_buffer->num_descriptor_sets = firstSet + setCount;
2055 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
2056 }
2057
2058 void VKAPI vkCmdBindIndexBuffer(
2059 VkCmdBuffer cmdBuffer,
2060 VkBuffer _buffer,
2061 VkDeviceSize offset,
2062 VkIndexType indexType)
2063 {
2064 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2065 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2066
2067 static const uint32_t vk_to_gen_index_type[] = {
2068 [VK_INDEX_TYPE_UINT8] = INDEX_BYTE,
2069 [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
2070 [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
2071 };
2072
2073 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER,
2074 .IndexFormat = vk_to_gen_index_type[indexType],
2075 .MemoryObjectControlState = 0,
2076 .BufferStartingAddress = { &buffer->mem->bo, buffer->offset + offset },
2077 .BufferSize = buffer->size - offset);
2078 }
2079
2080 void VKAPI vkCmdBindVertexBuffers(
2081 VkCmdBuffer cmdBuffer,
2082 uint32_t startBinding,
2083 uint32_t bindingCount,
2084 const VkBuffer* pBuffers,
2085 const VkDeviceSize* pOffsets)
2086 {
2087 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2088
2089 /* We have to defer setting up vertex buffer since we need the buffer
2090 * stride from the pipeline. */
2091
2092 for (uint32_t i = 0; i < bindingCount; i++) {
2093 cmd_buffer->vb[startBinding + i].buffer = (struct anv_buffer *) pBuffers[i];
2094 cmd_buffer->vb[startBinding + i].offset = pOffsets[i];
2095 cmd_buffer->vb_dirty |= 1 << (startBinding + i);
2096 }
2097 }
2098
2099 static void
2100 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
2101 {
2102 static const uint32_t opcodes[] = {
2103 [VK_SHADER_STAGE_VERTEX] = 38,
2104 [VK_SHADER_STAGE_TESS_CONTROL] = 39,
2105 [VK_SHADER_STAGE_TESS_EVALUATION] = 40,
2106 [VK_SHADER_STAGE_GEOMETRY] = 41,
2107 [VK_SHADER_STAGE_FRAGMENT] = 42,
2108 [VK_SHADER_STAGE_COMPUTE] = 0,
2109 };
2110
2111 struct anv_pipeline_layout *layout = cmd_buffer->pipeline->layout;
2112 struct anv_framebuffer *framebuffer = cmd_buffer->framebuffer;
2113
2114 for (uint32_t s = 0; s < VK_NUM_SHADER_STAGE; s++) {
2115
2116 uint32_t bias = s == VK_SHADER_STAGE_FRAGMENT ? MAX_RTS : 0;
2117 uint32_t count, *table;
2118 struct anv_state table_state;
2119
2120 if (layout)
2121 count = layout->stage[s].count + bias;
2122 else if (s == VK_SHADER_STAGE_FRAGMENT)
2123 count = framebuffer->color_attachment_count;
2124 else
2125 count = 0;
2126
2127 if (count == 0)
2128 continue;
2129
2130 table_state = anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
2131 count * 4, 32);
2132 table = table_state.map;
2133
2134 if (s == VK_SHADER_STAGE_FRAGMENT) {
2135 for (uint32_t i = 0; i < framebuffer->color_attachment_count; i++) {
2136 struct anv_color_attachment_view *view = framebuffer->color_attachments[i];
2137 table[i] = view->surface_state.offset;
2138
2139 /* Don't write the reloc back to the surface state. We do that at
2140 * submit time. Surface address is dwords 8-9. */
2141 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2142 view->surface_state.offset + 8 * sizeof(int32_t),
2143 &view->image->mem->bo, view->image->offset);
2144 }
2145 }
2146
2147 if (layout) {
2148 for (uint32_t i = 0; i < layout->stage[s].count; i++) {
2149 struct anv_pipeline_layout_entry *e = &layout->stage[s].entries[i];
2150 struct anv_image_view *image_view;
2151 struct anv_buffer_view *buffer_view;
2152 void *d = cmd_buffer->descriptor_sets[e->set]->descriptors[e->index];
2153
2154 switch (e->type) {
2155 case VK_DESCRIPTOR_TYPE_SAMPLER:
2156 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2157 break;
2158 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2159 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2160 image_view = d;
2161 table[bias + i] = image_view->surface_state.offset;
2162 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2163 image_view->surface_state.offset + 8 * sizeof(int32_t),
2164 &image_view->image->mem->bo,
2165 image_view->image->offset);
2166 break;
2167 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2168 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2169 /* FIXME: What are these? TBOs? */
2170 break;
2171
2172 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2173 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2174 buffer_view = d;
2175 table[bias + i] = buffer_view->surface_state.offset;
2176 anv_reloc_list_add(&cmd_buffer->batch.surf_relocs,
2177 buffer_view->surface_state.offset + 8 * sizeof(int32_t),
2178 &buffer_view->buffer->mem->bo,
2179 buffer_view->buffer->offset + buffer_view->offset);
2180 break;
2181
2182 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2183 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2184 break;
2185 default:
2186 break;
2187 }
2188 }
2189 }
2190
2191 /* FIXME: Samplers */
2192
2193 /* The binding table pointer commands all have the same structure, only
2194 * the opcode differs.
2195 */
2196 anv_batch_emit(&cmd_buffer->batch,
2197 GEN8_3DSTATE_BINDING_TABLE_POINTERS_VS,
2198 ._3DCommandSubOpcode = opcodes[s],
2199 .PointertoVSBindingTable = table_state.offset);
2200 }
2201 }
2202
2203 static void
2204 anv_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
2205 {
2206 struct anv_pipeline *pipeline = cmd_buffer->pipeline;
2207 const uint32_t num_buffers = __builtin_popcount(cmd_buffer->vb_dirty);
2208 const uint32_t num_dwords = 1 + num_buffers * 4;
2209 uint32_t *p;
2210
2211 if (cmd_buffer->vb_dirty) {
2212 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
2213 GEN8_3DSTATE_VERTEX_BUFFERS);
2214 uint32_t vb, i = 0;
2215 for_each_bit(vb, cmd_buffer->vb_dirty) {
2216 struct anv_buffer *buffer = cmd_buffer->vb[vb].buffer;
2217 uint32_t offset = cmd_buffer->vb[vb].offset;
2218
2219 struct GEN8_VERTEX_BUFFER_STATE state = {
2220 .VertexBufferIndex = vb,
2221 .MemoryObjectControlState = 0,
2222 .AddressModifyEnable = true,
2223 .BufferPitch = pipeline->binding_stride[vb],
2224 .BufferStartingAddress = { &buffer->mem->bo, buffer->offset + offset },
2225 .BufferSize = buffer->size - offset
2226 };
2227
2228 GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
2229 i++;
2230 }
2231 }
2232
2233 if (cmd_buffer->dirty & ANV_CMD_BUFFER_PIPELINE_DIRTY)
2234 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
2235
2236 if (cmd_buffer->dirty & ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY)
2237 flush_descriptor_sets(cmd_buffer);
2238
2239 if (cmd_buffer->dirty & (ANV_CMD_BUFFER_PIPELINE_DIRTY | ANV_CMD_BUFFER_RS_DIRTY)) {
2240 /* maybe: anv_batch_merge(batch, GEN8_3DSTATE_SF, a, b) */
2241 uint32_t *dw;
2242
2243 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GEN8_3DSTATE_SF_length);
2244 for (uint32_t i = 0; i < GEN8_3DSTATE_SF_length; i++)
2245 dw[i] = cmd_buffer->rs_state->state_sf[i] | pipeline->state_sf[i];
2246 }
2247
2248 cmd_buffer->vb_dirty = 0;
2249 cmd_buffer->dirty = 0;
2250 }
2251
2252 void VKAPI vkCmdDraw(
2253 VkCmdBuffer cmdBuffer,
2254 uint32_t firstVertex,
2255 uint32_t vertexCount,
2256 uint32_t firstInstance,
2257 uint32_t instanceCount)
2258 {
2259 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2260
2261 anv_cmd_buffer_flush_state(cmd_buffer);
2262
2263 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2264 .VertexAccessType = SEQUENTIAL,
2265 .VertexCountPerInstance = vertexCount,
2266 .StartVertexLocation = firstVertex,
2267 .InstanceCount = instanceCount,
2268 .StartInstanceLocation = firstInstance,
2269 .BaseVertexLocation = 0);
2270 }
2271
2272 void VKAPI vkCmdDrawIndexed(
2273 VkCmdBuffer cmdBuffer,
2274 uint32_t firstIndex,
2275 uint32_t indexCount,
2276 int32_t vertexOffset,
2277 uint32_t firstInstance,
2278 uint32_t instanceCount)
2279 {
2280 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2281
2282 anv_cmd_buffer_flush_state(cmd_buffer);
2283
2284 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2285 .VertexAccessType = RANDOM,
2286 .VertexCountPerInstance = indexCount,
2287 .StartVertexLocation = firstIndex,
2288 .InstanceCount = instanceCount,
2289 .StartInstanceLocation = firstInstance,
2290 .BaseVertexLocation = 0);
2291 }
2292
2293 static void
2294 anv_batch_lrm(struct anv_batch *batch,
2295 uint32_t reg, struct anv_bo *bo, uint32_t offset)
2296 {
2297 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM,
2298 .RegisterAddress = reg,
2299 .MemoryAddress = { bo, offset });
2300 }
2301
2302 static void
2303 anv_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
2304 {
2305 anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM,
2306 .RegisterOffset = reg,
2307 .DataDWord = imm);
2308 }
2309
2310 /* Auto-Draw / Indirect Registers */
2311 #define GEN7_3DPRIM_END_OFFSET 0x2420
2312 #define GEN7_3DPRIM_START_VERTEX 0x2430
2313 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
2314 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
2315 #define GEN7_3DPRIM_START_INSTANCE 0x243C
2316 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
2317
2318 void VKAPI vkCmdDrawIndirect(
2319 VkCmdBuffer cmdBuffer,
2320 VkBuffer _buffer,
2321 VkDeviceSize offset,
2322 uint32_t count,
2323 uint32_t stride)
2324 {
2325 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2326 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2327 struct anv_bo *bo = &buffer->mem->bo;
2328 uint32_t bo_offset = buffer->offset + offset;
2329
2330 anv_cmd_buffer_flush_state(cmd_buffer);
2331
2332 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2333 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2334 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2335 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
2336 anv_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
2337
2338 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2339 .IndirectParameterEnable = true,
2340 .VertexAccessType = SEQUENTIAL);
2341 }
2342
2343 void VKAPI vkCmdDrawIndexedIndirect(
2344 VkCmdBuffer cmdBuffer,
2345 VkBuffer _buffer,
2346 VkDeviceSize offset,
2347 uint32_t count,
2348 uint32_t stride)
2349 {
2350 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2351 struct anv_buffer *buffer = (struct anv_buffer *) _buffer;
2352 struct anv_bo *bo = &buffer->mem->bo;
2353 uint32_t bo_offset = buffer->offset + offset;
2354
2355 anv_cmd_buffer_flush_state(cmd_buffer);
2356
2357 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
2358 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
2359 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
2360 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
2361 anv_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
2362
2363 anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE,
2364 .IndirectParameterEnable = true,
2365 .VertexAccessType = RANDOM);
2366 }
2367
2368 void VKAPI vkCmdDispatch(
2369 VkCmdBuffer cmdBuffer,
2370 uint32_t x,
2371 uint32_t y,
2372 uint32_t z)
2373 {
2374 }
2375
2376 void VKAPI vkCmdDispatchIndirect(
2377 VkCmdBuffer cmdBuffer,
2378 VkBuffer buffer,
2379 VkDeviceSize offset)
2380 {
2381 }
2382
2383 void VKAPI vkCmdSetEvent(
2384 VkCmdBuffer cmdBuffer,
2385 VkEvent event,
2386 VkPipeEvent pipeEvent)
2387 {
2388 }
2389
2390 void VKAPI vkCmdResetEvent(
2391 VkCmdBuffer cmdBuffer,
2392 VkEvent event,
2393 VkPipeEvent pipeEvent)
2394 {
2395 }
2396
2397 void VKAPI vkCmdWaitEvents(
2398 VkCmdBuffer cmdBuffer,
2399 VkWaitEvent waitEvent,
2400 uint32_t eventCount,
2401 const VkEvent* pEvents,
2402 uint32_t memBarrierCount,
2403 const void** ppMemBarriers)
2404 {
2405 }
2406
2407 void VKAPI vkCmdPipelineBarrier(
2408 VkCmdBuffer cmdBuffer,
2409 VkWaitEvent waitEvent,
2410 uint32_t pipeEventCount,
2411 const VkPipeEvent* pPipeEvents,
2412 uint32_t memBarrierCount,
2413 const void** ppMemBarriers)
2414 {
2415 }
2416
2417 static void
2418 anv_batch_emit_ps_depth_count(struct anv_batch *batch,
2419 struct anv_bo *bo, uint32_t offset)
2420 {
2421 anv_batch_emit(batch, GEN8_PIPE_CONTROL,
2422 .DestinationAddressType = DAT_PPGTT,
2423 .PostSyncOperation = WritePSDepthCount,
2424 .Address = { bo, offset }); /* FIXME: This is only lower 32 bits */
2425 }
2426
2427 void VKAPI vkCmdBeginQuery(
2428 VkCmdBuffer cmdBuffer,
2429 VkQueryPool queryPool,
2430 uint32_t slot,
2431 VkQueryControlFlags flags)
2432 {
2433 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2434 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
2435
2436 switch (pool->type) {
2437 case VK_QUERY_TYPE_OCCLUSION:
2438 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, slot * 16);
2439 break;
2440
2441 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2442 break;
2443
2444 default:
2445 break;
2446 }
2447 }
2448
2449 void VKAPI vkCmdEndQuery(
2450 VkCmdBuffer cmdBuffer,
2451 VkQueryPool queryPool,
2452 uint32_t slot)
2453 {
2454 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2455 struct anv_query_pool *pool = (struct anv_query_pool *) queryPool;
2456
2457 switch (pool->type) {
2458 case VK_QUERY_TYPE_OCCLUSION:
2459 anv_batch_emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, slot * 16 + 8);
2460 break;
2461
2462 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2463 break;
2464
2465 default:
2466 break;
2467 }
2468 }
2469
2470 void VKAPI vkCmdResetQueryPool(
2471 VkCmdBuffer cmdBuffer,
2472 VkQueryPool queryPool,
2473 uint32_t startQuery,
2474 uint32_t queryCount)
2475 {
2476 }
2477
2478 #define TIMESTAMP 0x44070
2479
2480 void VKAPI vkCmdWriteTimestamp(
2481 VkCmdBuffer cmdBuffer,
2482 VkTimestampType timestampType,
2483 VkBuffer destBuffer,
2484 VkDeviceSize destOffset)
2485 {
2486 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2487 struct anv_buffer *buffer = (struct anv_buffer *) destBuffer;
2488 struct anv_bo *bo = &buffer->mem->bo;
2489
2490 switch (timestampType) {
2491 case VK_TIMESTAMP_TYPE_TOP:
2492 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM,
2493 .RegisterAddress = TIMESTAMP,
2494 .MemoryAddress = { bo, buffer->offset + destOffset });
2495 break;
2496
2497 case VK_TIMESTAMP_TYPE_BOTTOM:
2498 anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL,
2499 .DestinationAddressType = DAT_PPGTT,
2500 .PostSyncOperation = WriteTimestamp,
2501 .Address = /* FIXME: This is only lower 32 bits */
2502 { bo, buffer->offset + destOffset });
2503 break;
2504
2505 default:
2506 break;
2507 }
2508 }
2509
2510 void VKAPI vkCmdCopyQueryPoolResults(
2511 VkCmdBuffer cmdBuffer,
2512 VkQueryPool queryPool,
2513 uint32_t startQuery,
2514 uint32_t queryCount,
2515 VkBuffer destBuffer,
2516 VkDeviceSize destOffset,
2517 VkDeviceSize destStride,
2518 VkQueryResultFlags flags)
2519 {
2520 }
2521
2522 void VKAPI vkCmdInitAtomicCounters(
2523 VkCmdBuffer cmdBuffer,
2524 VkPipelineBindPoint pipelineBindPoint,
2525 uint32_t startCounter,
2526 uint32_t counterCount,
2527 const uint32_t* pData)
2528 {
2529 }
2530
2531 void VKAPI vkCmdLoadAtomicCounters(
2532 VkCmdBuffer cmdBuffer,
2533 VkPipelineBindPoint pipelineBindPoint,
2534 uint32_t startCounter,
2535 uint32_t counterCount,
2536 VkBuffer srcBuffer,
2537 VkDeviceSize srcOffset)
2538 {
2539 }
2540
2541 void VKAPI vkCmdSaveAtomicCounters(
2542 VkCmdBuffer cmdBuffer,
2543 VkPipelineBindPoint pipelineBindPoint,
2544 uint32_t startCounter,
2545 uint32_t counterCount,
2546 VkBuffer destBuffer,
2547 VkDeviceSize destOffset)
2548 {
2549 }
2550
2551 VkResult VKAPI vkCreateFramebuffer(
2552 VkDevice _device,
2553 const VkFramebufferCreateInfo* pCreateInfo,
2554 VkFramebuffer* pFramebuffer)
2555 {
2556 struct anv_device *device = (struct anv_device *) _device;
2557 struct anv_framebuffer *framebuffer;
2558
2559 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
2560
2561 framebuffer = anv_device_alloc(device, sizeof(*framebuffer), 8,
2562 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2563 if (framebuffer == NULL)
2564 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2565
2566 framebuffer->color_attachment_count = pCreateInfo->colorAttachmentCount;
2567 for (uint32_t i = 0; i < pCreateInfo->colorAttachmentCount; i++) {
2568 framebuffer->color_attachments[i] =
2569 (struct anv_color_attachment_view *) pCreateInfo->pColorAttachments[i].view;
2570 }
2571
2572 if (pCreateInfo->pDepthStencilAttachment) {
2573 framebuffer->depth_stencil =
2574 (struct anv_depth_stencil_view *) pCreateInfo->pDepthStencilAttachment->view;
2575 }
2576
2577 framebuffer->sample_count = pCreateInfo->sampleCount;
2578 framebuffer->width = pCreateInfo->width;
2579 framebuffer->height = pCreateInfo->height;
2580 framebuffer->layers = pCreateInfo->layers;
2581
2582 *pFramebuffer = (VkFramebuffer) framebuffer;
2583
2584 return VK_SUCCESS;
2585 }
2586
2587 VkResult VKAPI vkCreateRenderPass(
2588 VkDevice _device,
2589 const VkRenderPassCreateInfo* pCreateInfo,
2590 VkRenderPass* pRenderPass)
2591 {
2592 struct anv_device *device = (struct anv_device *) _device;
2593 struct anv_render_pass *pass;
2594
2595 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO);
2596
2597 pass = anv_device_alloc(device, sizeof(*pass), 8,
2598 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
2599 if (pass == NULL)
2600 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2601
2602 pass->render_area = pCreateInfo->renderArea;
2603
2604 *pRenderPass = (VkRenderPass) pass;
2605
2606 return VK_SUCCESS;
2607 }
2608
2609 void VKAPI vkCmdBeginRenderPass(
2610 VkCmdBuffer cmdBuffer,
2611 const VkRenderPassBegin* pRenderPassBegin)
2612 {
2613 struct anv_cmd_buffer *cmd_buffer = (struct anv_cmd_buffer *) cmdBuffer;
2614 struct anv_render_pass *pass = (struct anv_render_pass *) pRenderPassBegin->renderPass;
2615
2616 cmd_buffer->framebuffer = (struct anv_framebuffer *) pRenderPassBegin->framebuffer;
2617 cmd_buffer->dirty |= ANV_CMD_BUFFER_DESCRIPTOR_SET_DIRTY;
2618
2619 anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE,
2620 .ClippedDrawingRectangleYMin = pass->render_area.offset.y,
2621 .ClippedDrawingRectangleXMin = pass->render_area.offset.x,
2622 .ClippedDrawingRectangleYMax =
2623 pass->render_area.offset.y + pass->render_area.extent.height - 1,
2624 .ClippedDrawingRectangleXMax =
2625 pass->render_area.offset.x + pass->render_area.extent.width - 1,
2626 .DrawingRectangleOriginY = 0,
2627 .DrawingRectangleOriginX = 0);
2628 }
2629
2630 void VKAPI vkCmdEndRenderPass(
2631 VkCmdBuffer cmdBuffer,
2632 VkRenderPass renderPass)
2633 {
2634 }